From 862dbbc1c16a17b33e0e619b0e996b608a1c5069 Mon Sep 17 00:00:00 2001 From: Andronik Date: Tue, 2 Aug 2022 20:49:03 +0200 Subject: [PATCH 001/348] offences: make fn slash_fraction non-static (#11956) * offences: make fn slash_fraction non-static * Bastifmt (inline variable) --- frame/babe/src/equivocation.rs | 4 ++-- frame/grandpa/src/equivocation.rs | 4 ++-- frame/im-online/src/lib.rs | 6 +++--- frame/im-online/src/tests.rs | 10 ++++++---- frame/offences/benchmarking/src/lib.rs | 6 ++---- frame/offences/src/lib.rs | 3 +-- frame/offences/src/mock.rs | 4 ++-- primitives/staking/src/offence.rs | 7 +++---- 8 files changed, 21 insertions(+), 23 deletions(-) diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index df46f3544b389..f55bda751887d 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -284,9 +284,9 @@ impl Offence self.slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index 804272c20480f..181d22fba545c 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -353,9 +353,9 @@ impl Offence self.time_slot } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders_count: u32) -> Perbill { // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational(3 * offenders_count, validator_set_count); + let x = Perbill::from_rational(3 * offenders_count, self.validator_set_count); // _ ^ 2 x.square() } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index f190f6672f309..34c1c70d79f75 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -958,12 +958,12 @@ impl Offence for UnresponsivenessOffence { self.session_index } - fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { + fn slash_fraction(&self, offenders: u32) -> Perbill { // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% // when 13/30 are offline (around 5% when 1/3 are offline). - if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational(3 * threshold, validator_set_count); + if let Some(threshold) = offenders.checked_sub(self.validator_set_count / 10 + 1) { + let x = Perbill::from_rational(3 * threshold, self.validator_set_count); x.saturating_mul(Perbill::from_percent(7)) } else { Perbill::default() diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 288081556a085..05e1af169dba9 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -36,22 +36,24 @@ use sp_runtime::{ #[test] fn test_unresponsiveness_slash_fraction() { + let dummy_offence = + UnresponsivenessOffence { session_index: 0, validator_set_count: 50, offenders: vec![()] }; // A single case of unresponsiveness is not slashed. - assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero()); + assert_eq!(dummy_offence.slash_fraction(1), Perbill::zero()); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(5, 50), + dummy_offence.slash_fraction(5), Perbill::zero(), // 0% ); assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(7, 50), + dummy_offence.slash_fraction(7), Perbill::from_parts(4200000), // 0.42% ); // One third offline should be punished around 5%. assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(17, 50), + dummy_offence.slash_fraction(17), Perbill::from_parts(46200000), // 4.62% ); } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 98c6390964d82..b793bd8d2699a 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -288,15 +288,13 @@ benchmarks! { let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; let keys = ImOnline::::keys(); let validator_set_count = keys.len() as u32; - - let slash_fraction = UnresponsivenessOffence::::slash_fraction( - offenders.len() as u32, validator_set_count, - ); + let offenders_count = offenders.len() as u32; let offence = UnresponsivenessOffence { session_index: 0, validator_set_count, offenders, }; + let slash_fraction = offence.slash_fraction(offenders_count); assert_eq!(System::::event_count(), 0); }: { let _ = ::ReportUnresponsiveness::report_offence( diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index e4b75d9c3c015..ae454d6b06740 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -120,7 +120,6 @@ where fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { let offenders = offence.offenders(); let time_slot = offence.time_slot(); - let validator_set_count = offence.validator_set_count(); // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. @@ -134,7 +133,7 @@ where let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed - let new_fraction = O::slash_fraction(offenders_count, validator_set_count); + let new_fraction = offence.slash_fraction(offenders_count); let slash_perbill: Vec<_> = (0..concurrent_offenders.len()).map(|_| new_fraction).collect(); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 6a69b54b3cca0..d9ecf44ad8734 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -168,8 +168,8 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { - Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) + fn slash_fraction(&self, offenders_count: u32) -> Perbill { + Perbill::from_percent(5 + offenders_count * 100 / self.validator_set_count) } } diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 4261063993a52..f6517b9e9028b 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -108,11 +108,10 @@ pub trait Offence { } /// A slash fraction of the total exposure that should be slashed for this - /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. + /// particular offence for the `offenders_count` that happened at a singular `TimeSlot`. /// - /// `offenders_count` - the count of unique offending authorities. It is >0. - /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; + /// `offenders_count` - the count of unique offending authorities for this `TimeSlot`. It is >0. + fn slash_fraction(&self, offenders_count: u32) -> Perbill; } /// Errors that may happen on offence reports. From b27c470eaff379f512d1dec052aff5d551ed3b03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 3 Aug 2022 10:46:10 +0200 Subject: [PATCH 002/348] Revert non-best block (#11716) * Revert non-best block This makes `revert` also revert non-best blocks. * Update client/db/src/lib.rs * Do not count leaves against the maximum number to revert * Add some explanation * Fix bug * Apply suggestions from code review Co-authored-by: Davide Galassi Co-authored-by: Davide Galassi --- client/api/src/backend.rs | 3 +- client/api/src/leaves.rs | 5 + client/db/src/lib.rs | 149 +++++++++++++++---- client/service/src/chain_ops/revert_chain.rs | 7 + 4 files changed, 135 insertions(+), 29 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index af8552886b72e..54784a2f27b64 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -507,7 +507,8 @@ pub trait Backend: AuxStore + Send + Sync { /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an - /// inconsistent state. + /// inconsistent state. All blocks higher than the best block are also reverted and not counting + /// towards `n`. /// /// Returns the number of blocks that were successfully reverted and the list of finalized /// blocks that has been reverted. diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 2e5d4be3a5462..26eda46e6f76f 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -259,6 +259,11 @@ where removed } + + /// Returns the highest leaf and all hashes associated to it. + pub fn highest_leaf(&self) -> Option<(N, &[H])> { + self.storage.iter().next().map(|(k, v)| (k.0.clone(), &v[..])) + } } /// Helper for undoing operations. diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7dd49f9831f1c..e14d3a26aa557 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2057,36 +2057,46 @@ impl sc_client_api::backend::Backend for Backend { ) -> ClientResult<(NumberFor, HashSet)> { let mut reverted_finalized = HashSet::new(); - let mut best_number = self.blockchain.info().best_number; - let mut best_hash = self.blockchain.info().best_hash; + let info = self.blockchain.info(); - let finalized = self.blockchain.info().finalized_number; + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .and_then(|(n, h)| h.last().map(|h| (n, *h))); + + let best_number = info.best_number; + let best_hash = info.best_hash; + + let finalized = info.finalized_number; let revertible = best_number - finalized; let n = if !revert_finalized && revertible < n { revertible } else { n }; + let (n, mut number_to_revert, mut hash_to_revert) = match highest_leaf { + Some((l_n, l_h)) => (n + (l_n - best_number), l_n, l_h), + None => (n, best_number, best_hash), + }; + let mut revert_blocks = || -> ClientResult> { for c in 0..n.saturated_into::() { - if best_number.is_zero() { + if number_to_revert.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed = - self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { + self.blockchain.header(BlockId::Hash(hash_to_revert))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number + "Error reverting to {}. Block header not found.", + hash_to_revert, )) })?; let removed_hash = removed.hash(); - let prev_number = best_number.saturating_sub(One::one()); - let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!( - "Error reverting to {}. Block hash not found.", - best_number - )) - })?; + let prev_number = number_to_revert.saturating_sub(One::one()); + let prev_hash = + if prev_number == best_number { best_hash } else { *removed.parent_hash() }; if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) @@ -2096,12 +2106,15 @@ impl sc_client_api::backend::Backend for Backend { Some(commit) => { apply_state_commit(&mut transaction, commit); - best_number = prev_number; - best_hash = prev_hash; + number_to_revert = prev_number; + hash_to_revert = prev_hash; - let update_finalized = best_number < finalized; + let update_finalized = number_to_revert < finalized; - let key = utils::number_and_hash_to_lookup_key(best_number, &best_hash)?; + let key = utils::number_and_hash_to_lookup_key( + number_to_revert, + &hash_to_revert, + )?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2111,12 +2124,14 @@ impl sc_client_api::backend::Backend for Backend { reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { - if hash == best_hash { - if !best_number.is_zero() && - self.have_state_at(&prev_hash, best_number - One::one()) - { + if hash == hash_to_revert { + if !number_to_revert.is_zero() && + self.have_state_at( + &prev_hash, + number_to_revert - One::one(), + ) { let lookup_key = utils::number_and_hash_to_lookup_key( - best_number - One::one(), + number_to_revert - One::one(), prev_hash, )?; transaction.set_from_vec( @@ -2137,13 +2152,16 @@ impl sc_client_api::backend::Backend for Backend { &mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, - best_hash, + hash_to_revert, ); self.storage.db.commit(transaction)?; + + let is_best = number_to_revert < best_number; + self.blockchain.update_meta(MetaUpdate { - hash: best_hash, - number: best_number, - is_best: true, + hash: hash_to_revert, + number: number_to_revert, + is_best, is_finalized: update_finalized, with_state: false, }); @@ -2161,7 +2179,7 @@ impl sc_client_api::backend::Backend for Backend { let mut transaction = Transaction::new(); let mut leaves = self.blockchain.leaves.write(); - leaves.revert(best_hash, best_number); + leaves.revert(hash_to_revert, number_to_revert); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -3463,4 +3481,79 @@ pub(crate) mod tests { insert_header_no_head(&backend, 1, block0, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); } + + #[test] + fn revert_non_best_blocks() { + let backend = Backend::::new_test(10, 10); + + let genesis = + insert_block(&backend, 0, Default::default(), None, Default::default(), vec![], None) + .unwrap(); + + let block1 = + insert_block(&backend, 1, genesis, None, Default::default(), vec![], None).unwrap(); + + let block2 = + insert_block(&backend, 2, block1, None, Default::default(), vec![], None).unwrap(); + + let block3 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block4 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 4, + parent_hash: block3, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; + + let block3_fork = insert_header_no_head(&backend, 3, block2, Default::default()); + + assert!(backend.have_state_at(&block1, 1)); + assert!(backend.have_state_at(&block2, 2)); + assert!(backend.have_state_at(&block3, 3)); + assert!(backend.have_state_at(&block4, 4)); + assert!(backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); + assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + + assert_eq!(3, backend.revert(1, false).unwrap().0); + + assert!(backend.have_state_at(&block1, 1)); + assert!(!backend.have_state_at(&block2, 2)); + assert!(!backend.have_state_at(&block3, 3)); + assert!(!backend.have_state_at(&block4, 4)); + assert!(!backend.have_state_at(&block3_fork, 3)); + + assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); + assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); + } } diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs index 9a3ce6024ed92..3ee4399d063b3 100644 --- a/client/service/src/chain_ops/revert_chain.rs +++ b/client/service/src/chain_ops/revert_chain.rs @@ -40,6 +40,13 @@ where info!("There aren't any non-finalized blocks to revert."); } else { info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); + + if reverted.0 > blocks { + info!( + "Number of reverted blocks is higher than requested \ + because of reverted leaves higher than the best block." + ) + } } Ok(()) } From 28ac0a8591c3b9765206cada54980e9ea3416ea0 Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Wed, 3 Aug 2022 12:34:33 +0100 Subject: [PATCH 003/348] Fix docs urls (#11966) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix docs urls * Update bin/node-template/README.md * Update frame/aura/src/lib.rs * Run cargo +nightly fmt Co-authored-by: Bastian Köcher --- README.md | 2 +- bin/node-template/README.md | 32 ++++++++----------- bin/node-template/docs/rust-setup.md | 6 ++-- bin/node-template/pallets/template/src/lib.rs | 10 +++--- client/rpc-api/src/state/mod.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 2 +- frame/examples/basic/src/lib.rs | 2 +- frame/remark/src/tests.rs | 3 ++ frame/sudo/README.md | 2 +- frame/sudo/src/lib.rs | 2 +- .../procedural/src/pallet/expand/error.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- .../procedural/src/pallet/expand/mod.rs | 2 +- .../src/pallet/expand/pallet_struct.rs | 2 +- frame/support/src/dispatch.rs | 2 +- frame/support/src/traits/misc.rs | 2 +- primitives/runtime/src/bounded/bounded_vec.rs | 2 +- .../runtime/src/bounded/weak_bounded_vec.rs | 2 +- 18 files changed, 39 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index b716794428a00..c609641af7ce2 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Substrate is a next-generation framework for blockchain innovation 🚀. ## Trying it out Simply go to [docs.substrate.io](https://docs.substrate.io) and follow the -[installation](https://docs.substrate.io/v3/getting-started/overview) instructions. You can +[installation](https://docs.substrate.io/main-docs/install/) instructions. You can also try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Contributions & Code of Conduct diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 8defb870fa1b0..0f6fd9450aeee 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -114,7 +114,7 @@ local node template. ### Multi-Node Local Testnet If you want to see the multi-node consensus algorithm in action, refer to our -[Start a Private Network tutorial](https://docs.substrate.io/tutorials/v3/private-network). +[Simulate a network tutorial](https://docs.substrate.io/tutorials/get-started/simulate-network/). ## Template Structure @@ -129,7 +129,7 @@ Substrate-based blockchain nodes expose a number of capabilities: - Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the nodes in the network to communicate with one another. - Consensus: Blockchains must have a way to come to - [consensus](https://docs.substrate.io/v3/advanced/consensus) on the state of the + [consensus](https://docs.substrate.io/main-docs/fundamentals/consensus/) on the state of the network. Substrate makes it possible to supply custom consensus engines and also ships with several consensus mechanisms that have been built on top of [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). @@ -138,22 +138,20 @@ Substrate-based blockchain nodes expose a number of capabilities: There are several files in the `node` directory - take special note of the following: - [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://docs.substrate.io/v3/runtime/chain-specs) is a + [chain specification](https://docs.substrate.io/main-docs/build/chain-spec/) is a source code file that defines a Substrate chain's initial (genesis) state. Chain specifications are useful for development and testing, and critical when architecting the launch of a production chain. Take note of the `development_config` and `testnet_genesis` functions, which are used to define the genesis state for the local development chain configuration. These functions identify some - [well-known accounts](https://docs.substrate.io/v3/tools/subkey#well-known-keys) + [well-known accounts](https://docs.substrate.io/reference/command-line-tools/subkey/) and use them to configure the blockchain's initial state. - [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of the libraries that this file imports and the names of the functions it invokes. In particular, there are references to consensus-related topics, such as the - [longest chain rule](https://docs.substrate.io/v3/advanced/consensus#longest-chain-rule), - the [Aura](https://docs.substrate.io/v3/advanced/consensus#aura) block authoring - mechanism and the - [GRANDPA](https://docs.substrate.io/v3/advanced/consensus#grandpa) finality - gadget. + [block finalization and forks](https://docs.substrate.io/main-docs/fundamentals/consensus/#finalization-and-forks) + and other [consensus mechanisms](https://docs.substrate.io/main-docs/fundamentals/consensus/#default-consensus-models) + such as Aura for block authoring and GRANDPA for finality. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -165,16 +163,15 @@ capabilities and configuration parameters that it exposes: ### Runtime In Substrate, the terms -"[runtime](https://docs.substrate.io/v3/getting-started/glossary#runtime)" and -"[state transition function](https://docs.substrate.io/v3/getting-started/glossary#state-transition-function-stf)" +"runtime" and "state transition function" are analogous - they refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. The Substrate project in this repository uses -the [FRAME](https://docs.substrate.io/v3/runtime/frame) framework to construct a +[FRAME](https://docs.substrate.io/main-docs/fundamentals/runtime-intro/#frame) to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://docs.substrate.io/v3/runtime/macros) that makes it easy to +[macro language](https://docs.substrate.io/reference/frame-macros/) that makes it easy to create pallets and flexibly compose them to create blockchains that can address -[a variety of needs](https://www.substrate.io/substrate-users/). +[a variety of needs](https://substrate.io/ecosystem/projects/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: @@ -184,8 +181,7 @@ the following: - The pallets are composed into a single runtime by way of the [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) macro, which is part of the core - [FRAME Support](https://docs.substrate.io/v3/runtime/frame#support-crate) - library. + FRAME Support [system](https://docs.substrate.io/reference/frame-pallets/#system-pallets) library. ### Pallets @@ -196,12 +192,12 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: - Storage: FRAME defines a rich set of powerful - [storage abstractions](https://docs.substrate.io/v3/runtime/storage) that makes + [storage abstractions](https://docs.substrate.io/main-docs/build/runtime-storage/) that makes it easy to use Substrate's efficient key-value database to manage the evolving state of a blockchain. - Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) from outside of the runtime in order to update its state. -- Events: Substrate uses [events and errors](https://docs.substrate.io/v3/runtime/events-and-errors) +- Events: Substrate uses [events and errors](https://docs.substrate.io/main-docs/build/events-errors/) to notify users of important changes in the runtime. - Errors: When a dispatchable fails, it returns an error. - Config: The `Config` configuration interface is used to define the types and parameters upon diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index ea133ca847af7..2755966e3ae0f 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -3,7 +3,7 @@ title: Installation --- This guide is for reference only, please check the latest information on getting starting with Substrate -[here](https://docs.substrate.io/v3/getting-started/installation/). +[here](https://docs.substrate.io/main-docs/install/). This page will guide you through the **2 steps** needed to prepare a computer for **Substrate** development. Since Substrate is built with [the Rust programming language](https://www.rust-lang.org/), the first @@ -73,11 +73,11 @@ brew install openssl ### Windows -**_PLEASE NOTE:_** Native development of Substrate is _not_ very well supported! It is _highly_ +**_PLEASE NOTE:_** Native Windows development of Substrate is _not_ very well supported! It is _highly_ recommend to use [Windows Subsystem Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10) (WSL) and follow the instructions for [Ubuntu/Debian](#ubuntudebian). Please refer to the separate -[guide for native Windows development](https://docs.substrate.io/v3/getting-started/windows-users/). +[guide for native Windows development](https://docs.substrate.io/main-docs/install/windows/). ## Rust developer environment diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 067c7ce2575a0..2c337146859ef 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -2,7 +2,7 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// +/// pub use pallet::*; #[cfg(test)] @@ -31,15 +31,15 @@ pub mod pallet { pub struct Pallet(_); // The pallet's runtime storage items. - // https://docs.substrate.io/v3/runtime/storage + // https://docs.substrate.io/main-docs/build/runtime-storage/ #[pallet::storage] #[pallet::getter(fn something)] // Learn more about declaring storage items: - // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items + // https://docs.substrate.io/main-docs/build/runtime-storage/#declaring-storage-items pub type Something = StorageValue<_, u32>; // Pallets use events to inform users when important changes are made. - // https://docs.substrate.io/v3/runtime/events-and-errors + // https://docs.substrate.io/main-docs/build/events-errors/ #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -68,7 +68,7 @@ pub mod pallet { pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://docs.substrate.io/v3/runtime/origins + // https://docs.substrate.io/main-docs/build/origins/ let who = ensure_signed(origin)?; // Update storage. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 54bf21674a8bd..40e208c2eba8d 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -265,7 +265,7 @@ pub trait StateApi { /// [substrate storage][1], [transparent keys in substrate][2], /// [querying substrate storage via rpc][3]. /// - /// [1]: https://docs.substrate.io/v3/advanced/storage#storage-map-keys + /// [1]: https://docs.substrate.io/main-docs/fundamentals/state-transitions-and-storage/ /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index d7b933577ead5..93e14f358208e 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -26,7 +26,7 @@ use frame_system::RawOrigin; // To actually run this benchmark on pallet-example-basic, we need to put this pallet into the // runtime and compile it with `runtime-benchmarks` feature. The detail procedures are // documented at: -// https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark +// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ // // The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. // The exact command of how the estimate generated is printed at the top of the file. diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index f8acc1962388f..03dc2c613c01e 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -318,7 +318,7 @@ const MILLICENTS: u32 = 1_000_000_000; // - assigns a dispatch class `operational` if the argument of the call is more than 1000. // // More information can be read at: -// - https://docs.substrate.io/v3/runtime/weights-and-fees +// - https://docs.substrate.io/main-docs/build/tx-weights-fees/ // // Manually configuring weight is an advanced operation and what you really need may well be // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index 60a376c5afca5..eac006054c1d5 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -30,11 +30,14 @@ fn generates_event() { System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. assert_ok!(Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),)); let events = System::events(); + // this one we create as we expect it let system_event: ::Event = Event::Stored { content_hash: sp_io::hashing::blake2_256(&data).into(), sender: caller, } .into(); + // this one we actually go into the system pallet and get the last event + // because we know its there from block +1 let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); }); diff --git a/frame/sudo/README.md b/frame/sudo/README.md index e8f688091e326..7342832d2d7a7 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -72,6 +72,6 @@ You need to set an initial superuser account as the sudo `key`. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html -[`Origin`]: https://docs.substrate.io/v3/runtime/origins +[`Origin`]: https://docs.substrate.io/main-docs/build/origins/ License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index d9e72b37f2970..a47b5a79bd017 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -88,7 +88,7 @@ //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Origin`]: https://docs.substrate.io/v3/runtime/origins +//! [`Origin`]: https://docs.substrate.io/main-docs/build/origins/ #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 124e8b312ce39..5a8487b09de5c 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -111,7 +111,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( #[doc = r" - Custom [dispatch errors](https://docs.substrate.io/v3/runtime/events-and-errors) + Custom [dispatch errors](https://docs.substrate.io/main-docs/build/events-errors/) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index acd60ab959c61..791f930302207 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -98,7 +98,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&event_item.attrs).is_empty() { event_item.attrs.push(syn::parse_quote!( #[doc = r" - The [event](https://docs.substrate.io/v3/runtime/events-and-errors) emitted + The [event](https://docs.substrate.io/main-docs/build/events-errors/) emitted by this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 83bef7a97af1f..c33d2386700b2 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -74,7 +74,7 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { def.item.attrs.push(syn::parse_quote!( #[doc = r" The module that hosts all the - [FRAME](https://docs.substrate.io/v3/runtime/frame) + [FRAME](https://docs.substrate.io/main-docs/build/events-errors/) types needed to add this pallet to a runtime. "] diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 52586a70a521a..a4a8acc10f799 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,7 +62,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://docs.substrate.io/v3/runtime/frame#pallets) implementing + The [pallet](https://docs.substrate.io/reference/frame-pallets/#pallets) implementing the on-chain logic. "] )); diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index ae4230efc63f8..2b05478e0b02a 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -306,7 +306,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// /// The following are reserved function signatures: /// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/v3/runtime/events-and-errors). +/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/main-docs/build/events-errors/). /// The default behavior is to call `deposit_event` from the [System /// module](../frame_system/index.html). However, you can write your own implementation for events /// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index ccbb47909d5f4..ddb7f6f41b378 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -763,7 +763,7 @@ impl MaxEncodedLen for WrapperOpaque { fn max_encoded_len() -> usize { let t_max_len = T::max_encoded_len(); - // See scale encoding https://docs.substrate.io/v3/advanced/scale-codec + // See scale encoding: https://docs.substrate.io/reference/scale-codec/ if t_max_len < 64 { t_max_len + 1 } else if t_max_len < 2usize.pow(14) { diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index 10d9fc608c273..d5f3f2da0d615 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -847,7 +847,7 @@ where fn max_encoded_len() -> usize { // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/runtime/src/bounded/weak_bounded_vec.rs index ed9f4bba62b55..a447e7285f906 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/runtime/src/bounded/weak_bounded_vec.rs @@ -443,7 +443,7 @@ where fn max_encoded_len() -> usize { // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://docs.substrate.io/v3/advanced/scale-codec + // See: https://docs.substrate.io/reference/scale-codec/ codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) From 744eda47f545e31083ee722abb3a286f91a7f98d Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 3 Aug 2022 12:35:47 +0100 Subject: [PATCH 004/348] nit improvements to pallet template (#11968) --- bin/node-template/pallets/template/src/lib.rs | 8 ++++---- bin/node-template/pallets/template/src/mock.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 2c337146859ef..f1519efe062bd 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -19,6 +19,10 @@ pub mod pallet { use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { @@ -26,10 +30,6 @@ pub mod pallet { type Event: From> + IsType<::Event>; } - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); - // The pallet's runtime storage items. // https://docs.substrate.io/main-docs/build/runtime-storage/ #[pallet::storage] diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 8721fe6c78851..e03f37b2eea69 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -17,8 +17,8 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - TemplateModule: pallet_template::{Pallet, Call, Storage, Event}, + System: frame_system, + TemplateModule: pallet_template, } ); From 614126401211afc95f43b62a80f83f45d38f9279 Mon Sep 17 00:00:00 2001 From: lucasvanmol Date: Thu, 4 Aug 2022 00:16:24 -0700 Subject: [PATCH 005/348] Update node-template's docker-compose.yml (#11802) * Update docker-compose.yml * Use production image Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Co-authored-by: parity-processbot <> --- bin/node-template/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node-template/docker-compose.yml b/bin/node-template/docker-compose.yml index cfc4437bbae41..bc1922f47d963 100644 --- a/bin/node-template/docker-compose.yml +++ b/bin/node-template/docker-compose.yml @@ -3,7 +3,7 @@ version: "3.2" services: dev: container_name: node-template - image: paritytech/ci-linux:974ba3ac-20201006 + image: paritytech/ci-linux:production working_dir: /var/www/node-template ports: - "9944:9944" From 317808ab8920e261840013cc0e522b47a6c0d57d Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Thu, 4 Aug 2022 15:47:52 +0800 Subject: [PATCH 006/348] Beefy: use VersionedFinalityProof instead of SignedCommitment (#11962) * beefy: use VersionedFinalityProof instead of SignedCommitment. * Change the exposed RPC API to support versioned proofs. Co-authored-by: Adrian Catangiu --- client/beefy/rpc/src/lib.rs | 51 ++++++++++---------- client/beefy/rpc/src/notification.rs | 12 ++--- client/beefy/src/import.rs | 26 +++++------ client/beefy/src/justification.rs | 53 ++++++++++++--------- client/beefy/src/lib.rs | 14 +++--- client/beefy/src/notification.rs | 21 +++++---- client/beefy/src/tests.rs | 50 +++++++++++--------- client/beefy/src/worker.rs | 70 +++++++++++++++------------- 8 files changed, 157 insertions(+), 140 deletions(-) diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 91ff59324bd95..3be182ceb8f39 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,7 +35,7 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}; +use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; mod notification; @@ -101,7 +101,7 @@ pub trait BeefyApi { /// Implements the BeefyApi RPC trait for interacting with BEEFY. pub struct Beefy { - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, beefy_best_block: Arc>>, executor: SubscriptionTaskExecutor, } @@ -112,7 +112,7 @@ where { /// Creates a new Beefy Rpc handler instance. pub fn new( - signed_commitment_stream: BeefySignedCommitmentStream, + finality_proof_stream: BeefyVersionedFinalityProofStream, best_block_stream: BeefyBestBlockStream, executor: SubscriptionTaskExecutor, ) -> Result { @@ -126,20 +126,21 @@ where }); executor.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); - Ok(Self { signed_commitment_stream, beefy_best_block, executor }) + Ok(Self { finality_proof_stream, beefy_best_block, executor }) } } #[async_trait] -impl BeefyApiServer for Beefy +impl BeefyApiServer + for Beefy where Block: BlockT, { fn subscribe_justifications(&self, mut sink: SubscriptionSink) -> SubscriptionResult { let stream = self - .signed_commitment_stream + .finality_proof_stream .subscribe() - .map(|sc| notification::EncodedSignedCommitment::new::(sc)); + .map(|vfp| notification::EncodedVersionedFinalityProof::new::(vfp)); let fut = async move { sink.pipe_from_stream(stream).await; @@ -164,31 +165,31 @@ mod tests { use super::*; use beefy_gadget::{ - justification::BeefySignedCommitment, - notification::{BeefyBestBlockStream, BeefySignedCommitmentSender}, + justification::BeefyVersionedFinalityProof, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; - use beefy_primitives::{known_payload_ids, Payload}; + use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) { + fn setup_io_handler() -> (RpcModule>, BeefyVersionedFinalityProofSender) { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (RpcModule>, BeefySignedCommitmentSender) { - let (commitment_sender, commitment_stream) = - BeefySignedCommitmentStream::::channel(); + ) -> (RpcModule>, BeefyVersionedFinalityProofSender) { + let (finality_proof_sender, finality_proof_stream) = + BeefyVersionedFinalityProofStream::::channel(); let handler = - Beefy::new(commitment_stream, best_block_stream, sc_rpc::testing::test_executor()) + Beefy::new(finality_proof_stream, best_block_stream, sc_rpc::testing::test_executor()) .expect("Setting up the BEEFY RPC handler works"); - (handler.into_rpc(), commitment_sender) + (handler.into_rpc(), finality_proof_sender) } #[tokio::test] @@ -262,21 +263,21 @@ mod tests { assert_eq!(response.result, expected); } - fn create_commitment() -> BeefySignedCommitment { + fn create_finality_proof() -> BeefyVersionedFinalityProof { let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); - BeefySignedCommitment:: { + BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: beefy_primitives::Commitment { payload, block_number: 5, validator_set_id: 0, }, signatures: vec![], - } + }) } #[tokio::test] async fn subscribe_and_listen_to_one_justification() { - let (rpc, commitment_sender) = setup_io_handler(); + let (rpc, finality_proof_sender) = setup_io_handler(); // Subscribe let mut sub = rpc @@ -284,16 +285,16 @@ mod tests { .await .unwrap(); - // Notify with commitment - let commitment = create_commitment(); - let r: Result<(), ()> = commitment_sender.notify(|| Ok(commitment.clone())); + // Notify with finality_proof + let finality_proof = create_finality_proof(); + let r: Result<(), ()> = finality_proof_sender.notify(|| Ok(finality_proof.clone())); r.unwrap(); // Inspect what we received let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); - let recv_commitment: BeefySignedCommitment = + let recv_finality_proof: BeefyVersionedFinalityProof = Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(&recv_sub_id, sub.subscription_id()); - assert_eq!(recv_commitment, commitment); + assert_eq!(recv_finality_proof, finality_proof); } } diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs index cdda667782dd5..a815425644d52 100644 --- a/client/beefy/rpc/src/notification.rs +++ b/client/beefy/rpc/src/notification.rs @@ -21,19 +21,19 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::Block as BlockT; -/// An encoded signed commitment proving that the given header has been finalized. +/// An encoded finality proof proving that the given header has been finalized. /// The given bytes should be the SCALE-encoded representation of a -/// `beefy_primitives::SignedCommitment`. +/// `beefy_primitives::VersionedFinalityProof`. #[derive(Clone, Serialize, Deserialize)] -pub struct EncodedSignedCommitment(sp_core::Bytes); +pub struct EncodedVersionedFinalityProof(sp_core::Bytes); -impl EncodedSignedCommitment { +impl EncodedVersionedFinalityProof { pub fn new( - signed_commitment: beefy_gadget::justification::BeefySignedCommitment, + finality_proof: beefy_gadget::justification::BeefyVersionedFinalityProof, ) -> Self where Block: BlockT, { - EncodedSignedCommitment(signed_commitment.encode().into()) + EncodedVersionedFinalityProof(finality_proof.encode().into()) } } diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 7caeb49db5e50..129484199de89 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{crypto::Signature, BeefyApi, VersionedFinalityProof, BEEFY_ENGINE_ID}; +use beefy_primitives::{BeefyApi, BEEFY_ENGINE_ID}; use codec::Encode; use log::error; use std::{collections::HashMap, sync::Arc}; @@ -34,7 +34,8 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ - justification::decode_and_verify_commitment, notification::BeefySignedCommitmentSender, + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + notification::BeefyVersionedFinalityProofSender, }; /// A block-import handler for BEEFY. @@ -47,7 +48,7 @@ pub struct BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefySignedCommitmentSender, + justification_sender: BeefyVersionedFinalityProofSender, } impl Clone for BeefyBlockImport { @@ -67,7 +68,7 @@ impl BeefyBlockImport { backend: Arc, runtime: Arc, inner: I, - justification_sender: BeefySignedCommitmentSender, + justification_sender: BeefyVersionedFinalityProofSender, ) -> BeefyBlockImport { BeefyBlockImport { backend, runtime, inner, justification_sender } } @@ -85,7 +86,7 @@ where encoded: &EncodedJustification, number: NumberFor, hash: ::Hash, - ) -> Result, Signature>, ConsensusError> { + ) -> Result, ConsensusError> { let block_id = BlockId::hash(hash); let validator_set = self .runtime @@ -94,7 +95,7 @@ where .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .ok_or_else(|| ConsensusError::ClientImport("Unknown validator set".to_string()))?; - decode_and_verify_commitment::(&encoded[..], number, &validator_set) + decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) } /// Import BEEFY justification: Send it to worker for processing and also append it to backend. @@ -105,7 +106,7 @@ where fn import_beefy_justification_unchecked( &self, number: NumberFor, - justification: VersionedFinalityProof, Signature>, + justification: BeefyVersionedFinalityProof, ) { // Append the justification to the block in the backend. if let Err(e) = self.backend.append_justification( @@ -115,14 +116,9 @@ where error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); } // Send the justification to the BEEFY voter for processing. - match justification { - // TODO #11838: Should not unpack, these channels should also use - // `VersionedFinalityProof`. - VersionedFinalityProof::V1(signed_commitment) => self - .justification_sender - .notify(|| Ok::<_, ()>(signed_commitment)) - .expect("forwards closure result; the closure always returns Ok; qed."), - }; + self.justification_sender + .notify(|| Ok::<_, ()>(justification)) + .expect("forwards closure result; the closure always returns Ok; qed."); } } diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index 2a5191daec4b5..d9be18593dac7 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -25,17 +25,17 @@ use codec::{Decode, Encode}; use sp_consensus::Error as ConsensusError; use sp_runtime::traits::{Block as BlockT, NumberFor}; -/// A commitment with matching BEEFY authorities' signatures. -pub type BeefySignedCommitment = - beefy_primitives::SignedCommitment, beefy_primitives::crypto::Signature>; +/// A finality proof with matching BEEFY authorities' signatures. +pub type BeefyVersionedFinalityProof = + beefy_primitives::VersionedFinalityProof, Signature>; -/// Decode and verify a Beefy SignedCommitment. -pub(crate) fn decode_and_verify_commitment( +/// Decode and verify a Beefy FinalityProof. +pub(crate) fn decode_and_verify_finality_proof( encoded: &[u8], target_number: NumberFor, validator_set: &ValidatorSet, -) -> Result, Signature>, ConsensusError> { - let proof = , Signature>>::decode(&mut &*encoded) +) -> Result, ConsensusError> { + let proof = >::decode(&mut &*encoded) .map_err(|_| ConsensusError::InvalidJustification)?; verify_with_validator_set::(target_number, validator_set, &proof).map(|_| proof) } @@ -44,7 +44,7 @@ pub(crate) fn decode_and_verify_commitment( fn verify_with_validator_set( target_number: NumberFor, validator_set: &ValidatorSet, - proof: &VersionedFinalityProof, Signature>, + proof: &BeefyVersionedFinalityProof, ) -> Result<(), ConsensusError> { match proof { VersionedFinalityProof::V1(signed_commitment) => { @@ -80,17 +80,19 @@ fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { - use beefy_primitives::{known_payload_ids, Commitment, Payload, SignedCommitment}; + use beefy_primitives::{ + known_payload_ids, Commitment, Payload, SignedCommitment, VersionedFinalityProof, + }; use substrate_test_runtime_client::runtime::Block; use super::*; use crate::{keystore::tests::Keyring, tests::make_beefy_ids}; - pub(crate) fn new_signed_commitment( + pub(crate) fn new_finality_proof( block_num: NumberFor, validator_set: &ValidatorSet, keys: &[Keyring], - ) -> BeefySignedCommitment { + ) -> BeefyVersionedFinalityProof { let commitment = Commitment { payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, @@ -98,7 +100,7 @@ pub(crate) mod tests { }; let message = commitment.encode(); let signatures = keys.iter().map(|key| Some(key.sign(&message))).collect(); - SignedCommitment { commitment, signatures } + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }) } #[test] @@ -108,7 +110,7 @@ pub(crate) mod tests { // build valid justification let block_num = 42; - let proof = new_signed_commitment(block_num, &validator_set, keys); + let proof = new_finality_proof(block_num, &validator_set, keys); let good_proof = proof.clone().into(); // should verify successfully @@ -132,7 +134,10 @@ pub(crate) mod tests { // wrong signatures length -> should fail verification let mut bad_proof = proof.clone(); // change length of signatures - bad_proof.signatures.pop().flatten().unwrap(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; + bad_signed_commitment.signatures.pop().flatten().unwrap(); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -140,8 +145,11 @@ pub(crate) mod tests { // not enough signatures -> should fail verification let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; // remove a signature (but same length) - *bad_proof.signatures.first_mut().unwrap() = None; + *bad_signed_commitment.signatures.first_mut().unwrap() = None; match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -149,9 +157,12 @@ pub(crate) mod tests { // not enough _correct_ signatures -> should fail verification let mut bad_proof = proof.clone(); + let bad_signed_commitment = match bad_proof { + VersionedFinalityProof::V1(ref mut sc) => sc, + }; // change a signature to a different key - *bad_proof.signatures.first_mut().unwrap() = - Some(Keyring::Dave.sign(&proof.commitment.encode())); + *bad_signed_commitment.signatures.first_mut().unwrap() = + Some(Keyring::Dave.sign(&bad_signed_commitment.commitment.encode())); match verify_with_validator_set::(block_num + 1, &validator_set, &bad_proof.into()) { Err(ConsensusError::InvalidJustification) => (), _ => assert!(false, "Expected Err(ConsensusError::InvalidJustification)"), @@ -159,19 +170,19 @@ pub(crate) mod tests { } #[test] - fn should_decode_and_verify_commitment() { + fn should_decode_and_verify_finality_proof() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let block_num = 1; // build valid justification - let proof = new_signed_commitment(block_num, &validator_set, keys); - let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); + let proof = new_finality_proof(block_num, &validator_set, keys); + let versioned_proof: BeefyVersionedFinalityProof = proof.into(); let encoded = versioned_proof.encode(); // should successfully decode and verify let verified = - decode_and_verify_commitment::(&encoded, block_num, &validator_set).unwrap(); + decode_and_verify_finality_proof::(&encoded, block_num, &validator_set).unwrap(); assert_eq!(verified, versioned_proof); } } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 81c72dec8cd08..bdf44e056786c 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -46,8 +46,8 @@ mod tests; use crate::{ import::BeefyBlockImport, notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefySignedCommitmentSender, - BeefySignedCommitmentStream, + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, }, }; @@ -121,11 +121,11 @@ where pub struct BeefyVoterLinks { // BlockImport -> Voter links /// Stream of BEEFY signed commitments from block import to voter. - pub from_block_import_justif_stream: BeefySignedCommitmentStream, + pub from_block_import_justif_stream: BeefyVersionedFinalityProofStream, // Voter -> RPC links /// Sends BEEFY signed commitments from voter to RPC. - pub to_rpc_justif_sender: BeefySignedCommitmentSender, + pub to_rpc_justif_sender: BeefyVersionedFinalityProofSender, /// Sends BEEFY best block hashes from voter to RPC. pub to_rpc_best_block_sender: BeefyBestBlockSender, } @@ -134,7 +134,7 @@ pub struct BeefyVoterLinks { #[derive(Clone)] pub struct BeefyRPCLinks { /// Stream of signed commitments coming from the voter. - pub from_voter_justif_stream: BeefySignedCommitmentStream, + pub from_voter_justif_stream: BeefyVersionedFinalityProofStream, /// Stream of BEEFY best block hashes coming from the voter. pub from_voter_best_beefy_stream: BeefyBestBlockStream, } @@ -156,13 +156,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - notification::BeefySignedCommitmentStream::::channel(); + notification::BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = notification::BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - notification::BeefySignedCommitmentStream::::channel(); + notification::BeefyVersionedFinalityProofStream::::channel(); // BlockImport let import = diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs index 9479891714234..c673115e487f3 100644 --- a/client/beefy/src/notification.rs +++ b/client/beefy/src/notification.rs @@ -19,7 +19,7 @@ use sc_utils::notification::{NotificationSender, NotificationStream, TracingKeyStr}; use sp_runtime::traits::Block as BlockT; -use crate::justification::BeefySignedCommitment; +use crate::justification::BeefyVersionedFinalityProof; /// The sending half of the notifications channel(s) used to send /// notifications about best BEEFY block from the gadget side. @@ -31,13 +31,14 @@ pub type BeefyBestBlockStream = NotificationStream<::Hash, BeefyBestBlockTracingKey>; /// The sending half of the notifications channel(s) used to send notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentSender = NotificationSender>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofSender = + NotificationSender>; /// The receiving half of a notifications channel used to receive notifications -/// about signed commitments generated at the end of a BEEFY round. -pub type BeefySignedCommitmentStream = - NotificationStream, BeefySignedCommitmentTracingKey>; +/// about versioned finality proof generated at the end of a BEEFY round. +pub type BeefyVersionedFinalityProofStream = + NotificationStream, BeefyVersionedFinalityProofTracingKey>; /// Provides tracing key for BEEFY best block stream. #[derive(Clone)] @@ -46,9 +47,9 @@ impl TracingKeyStr for BeefyBestBlockTracingKey { const TRACING_KEY: &'static str = "mpsc_beefy_best_block_notification_stream"; } -/// Provides tracing key for BEEFY signed commitments stream. +/// Provides tracing key for BEEFY versioned finality proof stream. #[derive(Clone)] -pub struct BeefySignedCommitmentTracingKey; -impl TracingKeyStr for BeefySignedCommitmentTracingKey { - const TRACING_KEY: &'static str = "mpsc_beefy_signed_commitments_notification_stream"; +pub struct BeefyVersionedFinalityProofTracingKey; +impl TracingKeyStr for BeefyVersionedFinalityProofTracingKey { + const TRACING_KEY: &'static str = "mpsc_beefy_versioned_finality_proof_notification_stream"; } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 9c8f443dd1f7e..134339009302b 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -397,17 +397,18 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, peers: &[BeefyKeyring], -) -> (Vec>, Vec>>) { +) -> (Vec>, Vec>>) +{ let mut best_block_streams = Vec::new(); - let mut signed_commitment_streams = Vec::new(); + let mut versioned_finality_proof_streams = Vec::new(); for peer_id in 0..peers.len() { let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); - signed_commitment_streams.push(from_voter_justif_stream.subscribe()); + versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); } - (best_block_streams, signed_commitment_streams) + (best_block_streams, versioned_finality_proof_streams) } fn wait_for_best_beefy_blocks( @@ -437,7 +438,7 @@ fn wait_for_best_beefy_blocks( } fn wait_for_beefy_signed_commitments( - streams: Vec>>, + streams: Vec>>, net: &Arc>, runtime: &mut Runtime, expected_commitment_block_nums: &[u64], @@ -446,9 +447,12 @@ fn wait_for_beefy_signed_commitments( let len = expected_commitment_block_nums.len(); streams.into_iter().for_each(|stream| { let mut expected = expected_commitment_block_nums.iter(); - wait_for.push(Box::pin(stream.take(len).for_each(move |signed_commitment| { + wait_for.push(Box::pin(stream.take(len).for_each(move |versioned_finality_proof| { let expected = expected.next(); async move { + let signed_commitment = match versioned_finality_proof { + beefy_primitives::VersionedFinalityProof::V1(sc) => sc, + }; let commitment_block_num = signed_commitment.commitment.block_number; assert_eq!(expected, Some(commitment_block_num).as_ref()); // TODO: also verify commitment payload, validator set id, and signatures. @@ -486,7 +490,7 @@ fn finalize_block_and_wait_for_beefy( finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); for block in finalize_targets { let finalize = BlockId::number(*block); @@ -499,11 +503,11 @@ fn finalize_block_and_wait_for_beefy( // run for quarter second then verify no new best beefy block available let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, runtime, None); } else { // run until expected beefy blocks are received wait_for_best_beefy_blocks(best_blocks, &net, runtime, expected_beefy); - wait_for_beefy_signed_commitments(signed_commitments, &net, runtime, expected_beefy); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, runtime, expected_beefy); } } @@ -574,19 +578,19 @@ fn lagging_validators() { // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[23, 24, 25]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); @@ -596,20 +600,20 @@ fn lagging_validators() { // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) - let (best_blocks, signed_commitments) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[60]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } #[test] @@ -647,7 +651,7 @@ fn correct_beefy_payload() { // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); - let (best_blocks, signed_commitments) = + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); // now 2 good validators and 1 bad one are voting @@ -673,10 +677,10 @@ fn correct_beefy_payload() { // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); - streams_empty_after_timeout(signed_commitments, &net, &mut runtime, None); + streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // 3rd good validator catches up and votes as well - let (best_blocks, signed_commitments) = + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); net.lock() .peer(2) @@ -687,7 +691,7 @@ fn correct_beefy_payload() { // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); - wait_for_beefy_signed_commitments(signed_commitments, &net, &mut runtime, &[11]); + wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[11]); } #[test] @@ -746,7 +750,7 @@ fn beefy_importing_blocks() { let block_num = 2; let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); @@ -781,7 +785,7 @@ fn beefy_importing_blocks() { let block_num = 3; let keys = &[BeefyKeyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let proof = crate::justification::tests::new_signed_commitment(block_num, &validator_set, keys); + let proof = crate::justification::tests::new_finality_proof(block_num, &validator_set, keys); let versioned_proof: VersionedFinalityProof, Signature> = proof.into(); let encoded = versioned_proof.encode(); let justif = Some(Justifications::from((BEEFY_ENGINE_ID, encoded))); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 3bff0822ebdb4..2c4985c0e6966 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -49,7 +49,7 @@ use beefy_primitives::{ use crate::{ error::Error, gossip::{topic, GossipValidator}, - justification::BeefySignedCommitment, + justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, @@ -212,7 +212,7 @@ pub(crate) struct BeefyWorker { /// Buffer holding votes for future processing. pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, /// Buffer holding justifications for future processing. - pending_justifications: BTreeMap, Vec>>, + pending_justifications: BTreeMap, Vec>>, /// Chooses which incoming votes to accept and which votes to generate. voting_oracle: VoterOracle, } @@ -381,9 +381,12 @@ where /// Expects `justification` to be valid. fn triage_incoming_justif( &mut self, - justification: BeefySignedCommitment, + justification: BeefyVersionedFinalityProof, ) -> Result<(), Error> { - let block_num = justification.commitment.block_number; + let signed_commitment = match justification { + VersionedFinalityProof::V1(ref sc) => sc, + }; + let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { RoundAction::Process => self.finalize(justification), @@ -417,39 +420,39 @@ where validator_set_id: rounds.validator_set_id(), }; - let signed_commitment = SignedCommitment { commitment, signatures }; + let finality_proof = + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures }); metric_set!(self, beefy_round_concluded, block_num); - info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + info!(target: "beefy", "🥩 Round #{} concluded, finality_proof: {:?}.", round.1, finality_proof); if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), - ( - BEEFY_ENGINE_ID, - VersionedFinalityProof::V1(signed_commitment.clone()).encode(), - ), + (BEEFY_ENGINE_ID, finality_proof.clone().encode()), ) { - debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, signed_commitment); + debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } - // We created the `signed_commitment` and know to be valid. - self.finalize(signed_commitment); + // We created the `finality_proof` and know to be valid. + self.finalize(finality_proof); } } Ok(()) } - /// Provide BEEFY finality for block based on `signed_commitment`: + /// Provide BEEFY finality for block based on `finality_proof`: /// 1. Prune irrelevant past sessions from the oracle, /// 2. Set BEEFY best block, - /// 3. Send best block hash and `signed_commitment` to RPC worker. + /// 3. Send best block hash and `finality_proof` to RPC worker. /// - /// Expects `signed commitment` to be valid. - fn finalize(&mut self, signed_commitment: BeefySignedCommitment) { + /// Expects `finality proof` to be valid. + fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) { // Prune any now "finalized" sessions from queue. self.voting_oracle.try_prune(); - + let signed_commitment = match finality_proof { + VersionedFinalityProof::V1(ref sc) => sc, + }; let block_num = signed_commitment.commitment.block_number; if Some(block_num) > self.best_beefy_block { // Set new best BEEFY block number. @@ -465,7 +468,7 @@ where self.links .to_rpc_justif_sender - .notify(|| Ok::<_, ()>(signed_commitment)) + .notify(|| Ok::<_, ()>(finality_proof)) .expect("forwards closure result; the closure always returns Ok; qed."); } else { debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); @@ -832,7 +835,7 @@ pub(crate) mod tests { use super::*; use crate::{ keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}, + notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, @@ -859,10 +862,11 @@ pub(crate) mod tests { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = - BeefySignedCommitmentStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = BeefyBestBlockStream::::channel(); - let (_, from_block_import_justif_stream) = BeefySignedCommitmentStream::::channel(); + let (_, from_block_import_justif_stream) = + BeefyVersionedFinalityProofStream::::channel(); let beefy_rpc_links = BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream }; @@ -1168,37 +1172,37 @@ pub(crate) mod tests { let mut net = BeefyTestNet::new(1, 0); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); - let create_signed_commitment = |block_num: NumberFor| { + let create_finality_proof = |block_num: NumberFor| { let commitment = Commitment { payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; - SignedCommitment { commitment, signatures: vec![None] } + VersionedFinalityProof::V1(SignedCommitment { commitment, signatures: vec![None] }) }; - // no 'best beefy block' or signed commitments + // no 'best beefy block' or finality proofs assert_eq!(worker.best_beefy_block, None); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - assert_eq!(signed_commitments.poll_next_unpin(cx), Poll::Pending); + assert_eq!(finality_proof.poll_next_unpin(cx), Poll::Pending); Poll::Ready(()) })); // unknown hash for block #1 - let (mut best_block_streams, mut signed_commitments) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - let mut signed_commitments = signed_commitments.drain(..).next().unwrap(); - let justif = create_signed_commitment(1); + let mut finality_proof = finality_proofs.drain(..).next().unwrap(); + let justif = create_finality_proof(1); worker.finalize(justif.clone()); assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); - match signed_commitments.poll_next_unpin(cx) { + match finality_proof.poll_next_unpin(cx) { // expect justification Poll::Ready(Some(received)) => assert_eq!(received, justif), v => panic!("unexpected value: {:?}", v), @@ -1211,7 +1215,7 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); net.generate_blocks(2, 10, &validator_set, false); - let justif = create_signed_commitment(2); + let justif = create_finality_proof(2); worker.finalize(justif); assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { From 9ece574bd1472275eed212acf8c69ae49c38f57a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 4 Aug 2022 11:34:19 +0200 Subject: [PATCH 007/348] Timestamp: `set_timestamp` sets `DidUpdate` (#11960) * Timestamp: `set_timestamp` sets `DidUpdate` There exists the `set_timestamp` in the Timestamp pallet for setting the current timestamp. The problem is that it doesn't set `DidUpdate`. This results in `on_finalize` panicking. There is no real reason why the function doesn't also set `DidUpdate`. * Update frame/timestamp/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Fix Babe tests Co-authored-by: Oliver Tale-Yazdi --- frame/babe/src/mock.rs | 2 +- frame/babe/src/tests.rs | 2 +- frame/timestamp/src/lib.rs | 2 ++ frame/timestamp/src/tests.rs | 5 ++--- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 5677eb7e28e49..c2ba3c2be06d8 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -429,7 +429,7 @@ pub fn generate_equivocation_proof( System::reset_events(); System::initialize(¤t_block, &parent_hash, &pre_digest); System::set_block_number(current_block); - Timestamp::set_timestamp(current_block); + Timestamp::set_timestamp(*current_slot * Babe::slot_duration()); System::finalize() }; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 0859bb7a40849..ece0883387709 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -659,7 +659,7 @@ fn report_equivocation_invalid_equivocation_proof() { equivocation_proof.second_header = equivocation_proof.first_header.clone(); assert_invalid_equivocation(equivocation_proof); - // missing preruntime digest from one header + // missing pre-runtime digest from one header let mut equivocation_proof = generate_equivocation_proof( offending_validator_index as u32, &offending_authority_pair, diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 81ed67913c2e6..6a7f849d1329a 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -282,6 +282,8 @@ impl Pallet { #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { Now::::put(now); + DidUpdate::::put(true); + >::on_timestamp_set(now); } } diff --git a/frame/timestamp/src/tests.rs b/frame/timestamp/src/tests.rs index f52ba7849c951..ef9fd6e39d4b5 100644 --- a/frame/timestamp/src/tests.rs +++ b/frame/timestamp/src/tests.rs @@ -23,7 +23,7 @@ use frame_support::assert_ok; #[test] fn timestamp_works() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(46); assert_ok!(Timestamp::set(Origin::none(), 69)); assert_eq!(Timestamp::now(), 69); assert_eq!(Some(69), get_captured_moment()); @@ -36,7 +36,6 @@ fn double_timestamp_should_fail() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); assert_ok!(Timestamp::set(Origin::none(), 69)); - let _ = Timestamp::set(Origin::none(), 70); }); } @@ -46,7 +45,7 @@ fn double_timestamp_should_fail() { )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); + crate::Now::::put(44); let _ = Timestamp::set(Origin::none(), 46); }); } From d09f5d92677c460bbb8b9b3e084bd9f3c1ffaf4c Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 4 Aug 2022 22:57:05 +0200 Subject: [PATCH 008/348] Prevent duplicated leaves in the backend (#11941) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Prevent duplicated leaves in the backend * Comments... * Use highest known heaf as a shortcut for not existing header detection * Apply code review suggestion Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- client/db/src/lib.rs | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e14d3a26aa557..0ce408a39b004 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1345,8 +1345,15 @@ impl Backend { let parent_hash = *pending_block.header.parent_hash(); let number = *pending_block.header.number(); + let highest_leaf = self + .blockchain + .leaves + .read() + .highest_leaf() + .map(|(n, _)| n) + .unwrap_or(Zero::zero()); let existing_header = - number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); + number <= highest_leaf && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -3534,7 +3541,24 @@ pub(crate) mod tests { header.hash() }; - let block3_fork = insert_header_no_head(&backend, 3, block2, Default::default()); + let block3_fork = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + let header = Header { + number: 3, + parent_hash: block2, + state_root: BlakeTwo256::trie_root(Vec::new(), StateVersion::V1), + digest: Default::default(), + extrinsics_root: H256::from_low_u64_le(42), + }; + + op.set_block_data(header.clone(), Some(Vec::new()), None, None, NewBlockState::Normal) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + header.hash() + }; assert!(backend.have_state_at(&block1, 1)); assert!(backend.have_state_at(&block2, 2)); @@ -3556,4 +3580,20 @@ pub(crate) mod tests { assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); } + + #[test] + fn test_no_duplicated_leaves_allowed() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + // Add block 2 not as the best block + let block2 = insert_header_no_head(&backend, 2, block1, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block1); + + // Add block 2 as the best block + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); + assert_eq!(backend.blockchain().info().best_hash, block2); + } } From 6527f0cc6e14f750d83a37f468e65a17d4088854 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 5 Aug 2022 09:50:57 +0300 Subject: [PATCH 009/348] Change on-the-wire protocol names to include genesis hash & fork id (#11938) * Rename transactions protocol to include genesis hash * Add protocol name generation to sc_network::utils * Use utils functions for transactions protocol name generation * Extract protocol name generation into public module * Use sc_network::protocol_name::standard_protocol_name() for BEEFY and GRANDPA * minor: add missing newline at EOF * Change block-announces protocol name to include genesis_hash & fork_id * Change protocol names to include genesis hash and fork id Protocols changed: - sync - state - light - sync/warp * Revert "Use sc_network::protocol_name::standard_protocol_name() for BEEFY and GRANDPA" This reverts commit 29aa556ef947e2cfd48264db2a9fc8bc528d7ddb. * Get rid of `protocol_name` module --- Cargo.lock | 2 + .../network/common/src/request_responses.rs | 3 ++ client/network/light/Cargo.toml | 1 + .../light/src/light_client_requests.rs | 23 +++++++++-- .../src/light_client_requests/handler.rs | 20 ++++++++-- client/network/src/config.rs | 6 ++- client/network/src/protocol.rs | 18 +++++++-- client/network/src/request_responses.rs | 10 ++++- client/network/src/service.rs | 13 +++++- client/network/src/service/tests.rs | 11 +++-- client/network/src/transactions.rs | 21 ++++++++-- client/network/sync/Cargo.toml | 1 + .../network/sync/src/block_request_handler.rs | 34 +++++++++++++--- .../network/sync/src/state_request_handler.rs | 40 +++++++++++++++---- .../network/sync/src/warp_request_handler.rs | 30 +++++++++++--- client/network/test/src/lib.rs | 21 +++++++--- client/service/src/builder.rs | 22 ++++++++-- 17 files changed, 226 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec5c5267142a9..703eeffce54a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8435,6 +8435,7 @@ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ "futures", + "hex", "libp2p", "log", "parity-scale-codec", @@ -8455,6 +8456,7 @@ version = "0.10.0-dev" dependencies = [ "fork-tree", "futures", + "hex", "libp2p", "log", "lru", diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index 71570e6beb864..f409d1ac16d61 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -29,6 +29,9 @@ pub struct ProtocolConfig { /// Name of the protocol on the wire. Should be something like `/foo/bar`. pub name: Cow<'static, str>, + /// Fallback on the wire protocol names to support. + pub fallback_names: Vec>, + /// Maximum allowed size, in bytes, of a request. /// /// Any request larger than this value will be declined as a way to avoid allocating too diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index 0037177fb4046..c1a0fb4759320 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +hex = "0.4.0" libp2p = "0.46.1" log = "0.4.16" prost = "0.10" diff --git a/client/network/light/src/light_client_requests.rs b/client/network/light/src/light_client_requests.rs index 9eccef41e833d..b58426cf15992 100644 --- a/client/network/light/src/light_client_requests.rs +++ b/client/network/light/src/light_client_requests.rs @@ -25,16 +25,31 @@ use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; use std::time::Duration; -/// Generate the light client protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the light client protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/light/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/light/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy light client protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/light/2", protocol_id.as_ref()) } /// Generates a [`ProtocolConfig`] for the light client request protocol, refusing incoming /// requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1 * 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(15), diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 3c87ccfd6ed9f..727a9b0d7e820 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{ProofProvider, StorageProof}; +use sc_client_api::{BlockBackend, ProofProvider, StorageProof}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -54,15 +54,27 @@ pub struct LightClientRequestHandler { impl LightClientRequestHandler where B: Block, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`LightClientRequestHandler`]. - pub fn new(protocol_id: &ProtocolId, client: Arc) -> (Self, ProtocolConfig) { + pub fn new( + protocol_id: &ProtocolId, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, ProtocolConfig) { // For now due to lack of data on light client request handling in production systems, this // value is chosen to match the block request limit. let (tx, request_receiver) = mpsc::channel(20); - let mut protocol_config = super::generate_protocol_config(protocol_id); + let mut protocol_config = super::generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); (Self { client, request_receiver, _block: PhantomData::default() }, protocol_config) diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 2622762da5fc9..afd61cae6c83b 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -87,9 +87,13 @@ where /// the network. pub transaction_pool: Arc>, - /// Name of the protocol to use on the wire. Should be different for each chain. + /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, + /// Fork ID to distinguish protocols of different hard forks. Part of the standard protocol + /// name on the wire. + pub fork_id: Option, + /// Import queue to use. /// /// The import queue is the component that verifies that blocks received from other nodes are diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1c933fabcbb5d..a06fff2322ce6 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -276,6 +276,7 @@ where roles: Roles, chain: Arc, protocol_id: ProtocolId, + fork_id: &Option, network_config: &config::NetworkConfiguration, notifications_protocols_handshakes: Vec>, metrics_registry: Option<&Registry>, @@ -371,8 +372,17 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; - let block_announces_protocol: Cow<'static, str> = - format!("/{}/block-announces/1", protocol_id.as_ref()).into(); + let block_announces_protocol = { + let genesis_hash = + chain.block_hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); + if let Some(fork_id) = fork_id { + format!("/{}/{}/block-announces/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/block-announces/1", hex::encode(genesis_hash)) + } + }; + + let legacy_ba_protocol_name = format!("/{}/block-announces/1", protocol_id.as_ref()); let behaviour = { let best_number = info.best_number; @@ -384,8 +394,8 @@ where .encode(); let sync_protocol_config = notifications::ProtocolConfig { - name: block_announces_protocol, - fallback_names: Vec::new(), + name: block_announces_protocol.into(), + fallback_names: iter::once(legacy_ba_protocol_name.into()).collect(), handshake: block_announces_handshake, max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, }; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index cec4aa2a07fba..0d8c6c33b1c95 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -220,7 +220,9 @@ impl RequestResponsesBehaviour { max_request_size: protocol.max_request_size, max_response_size: protocol.max_response_size, }, - iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), + iter::once(protocol.name.as_bytes().to_vec()) + .chain(protocol.fallback_names.iter().map(|name| name.as_bytes().to_vec())) + .zip(iter::repeat(protocol_support)), cfg, ); @@ -1027,6 +1029,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1127,6 +1130,7 @@ mod tests { let protocol_config = ProtocolConfig { name: From::from(protocol_name), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 8, // <-- important for the test request_timeout: Duration::from_secs(30), @@ -1223,6 +1227,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1230,6 +1235,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1247,6 +1253,7 @@ mod tests { let protocol_configs = vec![ ProtocolConfig { name: From::from(protocol_name_1), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), @@ -1254,6 +1261,7 @@ mod tests { }, ProtocolConfig { name: From::from(protocol_name_2), + fallback_names: Vec::new(), max_request_size: 1024, max_response_size: 1024 * 1024, request_timeout: Duration::from_secs(30), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 409ed88c75c00..1210b0ca64224 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -224,8 +224,16 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = - transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( + params.protocol_id.clone(), + params + .chain + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + params.fork_id.clone(), + ); params .network_config .extra_sets @@ -243,6 +251,7 @@ where From::from(¶ms.role), params.chain.clone(), params.protocol_id.clone(), + ¶ms.fork_id, ¶ms.network_config, iter::once(Vec::new()) .chain( diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index de474ee8fe4d0..f757bf4891fbc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -92,21 +92,25 @@ fn build_test_full_node( let protocol_id = ProtocolId::from("/test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); async_std::task::spawn(handler.run().boxed()); protocol_config }; @@ -134,6 +138,7 @@ fn build_test_full_node( chain: client.clone(), transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, + fork_id, import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 043bdeff7ebfc..342b6a0430272 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -127,19 +127,34 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { protocol_name: Cow<'static, str>, + fallback_protocol_names: Vec>, } impl TransactionsHandlerPrototype { /// Create a new instance. - pub fn new(protocol_id: ProtocolId) -> Self { - Self { protocol_name: format!("/{}/transactions/1", protocol_id.as_ref()).into() } + pub fn new>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option, + ) -> Self { + let protocol_name = if let Some(fork_id) = fork_id { + format!("/{}/{}/transactions/1", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/transactions/1", hex::encode(genesis_hash)) + }; + let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); + + Self { + protocol_name: protocol_name.into(), + fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), + } } /// Returns the configuration of the set to put in the network configuration. pub fn set_config(&self) -> config::NonDefaultSetConfig { config::NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), - fallback_names: Vec::new(), + fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, set_config: config::SetConfig { in_peers: 0, diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 3e3526146400a..7c8f8adafd214 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -21,6 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +hex = "0.4.0" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 30cbb32289d66..f4f10ac73c8d9 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -62,9 +62,15 @@ mod rep { } /// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(20), @@ -72,8 +78,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the block protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the block protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy block protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/sync/2", protocol_id.as_ref()) } @@ -129,6 +144,7 @@ where /// Create a new [`BlockRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -136,7 +152,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 8e0bae14046da..6cf6482a44f8b 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -27,7 +27,7 @@ use libp2p::PeerId; use log::{debug, trace}; use lru::LruCache; use prost::Message; -use sc_client_api::ProofProvider; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -50,10 +50,16 @@ mod rep { pub const SAME_REQUEST: Rep = Rep::new(i32::MIN, "Same state request multiple times"); } -/// Generates a [`ProtocolConfig`] for the block request protocol, refusing incoming requests. -pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { +/// Generates a [`ProtocolConfig`] for the state request protocol, refusing incoming requests. +pub fn generate_protocol_config>( + protocol_id: &ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> ProtocolConfig { ProtocolConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 1024 * 1024, max_response_size: 16 * 1024 * 1024, request_timeout: Duration::from_secs(40), @@ -61,8 +67,17 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -/// Generate the state protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: &ProtocolId) -> String { +/// Generate the state protocol name from the genesis hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/state/2", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/state/2", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy state protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: &ProtocolId) -> String { format!("/{}/state/2", protocol_id.as_ref()) } @@ -104,11 +119,12 @@ pub struct StateRequestHandler { impl StateRequestHandler where B: BlockT, - Client: ProofProvider + Send + Sync + 'static, + Client: BlockBackend + ProofProvider + Send + Sync + 'static, { /// Create a new [`StateRequestHandler`]. pub fn new( protocol_id: &ProtocolId, + fork_id: Option<&str>, client: Arc, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { @@ -116,7 +132,15 @@ where // number of peers. let (tx, request_receiver) = mpsc::channel(num_peer_hint); - let mut protocol_config = generate_protocol_config(protocol_id); + let mut protocol_config = generate_protocol_config( + protocol_id, + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + fork_id, + ); protocol_config.inbound_queue = Some(tx); let seen_requests = LruCache::new(num_peer_hint * 2); diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 53ec216a1e668..394bc68449099 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -36,9 +36,15 @@ const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing /// incoming requests. -pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { +pub fn generate_request_response_config>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, +) -> RequestResponseConfig { RequestResponseConfig { - name: generate_protocol_name(protocol_id).into(), + name: generate_protocol_name(genesis_hash, fork_id).into(), + fallback_names: std::iter::once(generate_legacy_protocol_name(protocol_id).into()) + .collect(), max_request_size: 32, max_response_size: MAX_RESPONSE_SIZE, request_timeout: Duration::from_secs(10), @@ -46,8 +52,17 @@ pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestRespo } } -/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. -fn generate_protocol_name(protocol_id: ProtocolId) -> String { +/// Generate the grandpa warp sync protocol name from the genesi hash and fork id. +fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + if let Some(fork_id) = fork_id { + format!("/{}/{}/sync/warp", hex::encode(genesis_hash), fork_id) + } else { + format!("/{}/sync/warp", hex::encode(genesis_hash)) + } +} + +/// Generate the legacy grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_legacy_protocol_name(protocol_id: ProtocolId) -> String { format!("/{}/sync/warp", protocol_id.as_ref()) } @@ -59,13 +74,16 @@ pub struct RequestHandler { impl RequestHandler { /// Create a new [`RequestHandler`]. - pub fn new( + pub fn new>( protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, backend: Arc>, ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); - let mut request_response_config = generate_request_response_config(protocol_id); + let mut request_response_config = + generate_request_response_config(protocol_id, genesis_hash, fork_id); request_response_config.inbound_queue = Some(tx); (Self { backend, request_receiver }, request_response_config) diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 4659684987f77..494ff2048f6c4 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -808,23 +808,25 @@ where let protocol_id = ProtocolId::from("test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + let block_request_protocol_config = { let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, client.clone(), 50); + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, client.clone(), 50); + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + LightClientRequestHandler::new(&protocol_id, None, client.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -832,8 +834,16 @@ where let warp_sync = Arc::new(TestWarpSyncProvider(client.clone())); let warp_protocol_config = { - let (handler, protocol_config) = - warp_request_handler::RequestHandler::new(protocol_id.clone(), warp_sync.clone()); + let (handler, protocol_config) = warp_request_handler::RequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + None, + warp_sync.clone(), + ); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -867,6 +877,7 @@ where chain: client.clone(), transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, + fork_id, import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ec537a33b72d5..998ec12c1d05b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -741,6 +741,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = BlockRequestHandler::new( &protocol_id, + config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set.in_peers as usize + config.network.default_peers_set.out_peers as usize, @@ -753,6 +754,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, + config.chain_spec.fork_id(), client.clone(), config.network.default_peers_set_num_full as usize, ); @@ -763,8 +765,16 @@ where let (warp_sync_provider, warp_sync_protocol_config) = warp_sync .map(|provider| { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + let (handler, protocol_config) = WarpSyncRequestHandler::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + provider.clone(), + ); spawn_handle.spawn("warp-sync-request-handler", Some("networking"), handler.run()); (Some(provider), Some(protocol_config)) }) @@ -772,8 +782,11 @@ where let light_client_request_protocol_config = { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, client.clone()); + let (handler, protocol_config) = LightClientRequestHandler::new( + &protocol_id, + config.chain_spec.fork_id(), + client.clone(), + ); spawn_handle.spawn("light-client-request-handler", Some("networking"), handler.run()); protocol_config }; @@ -808,6 +821,7 @@ where chain: client.clone(), transaction_pool: transaction_pool_adapter as _, protocol_id, + fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), From f64cd4fa987473ad1d05f409afe886d4a620782b Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Fri, 5 Aug 2022 12:48:05 +0300 Subject: [PATCH 010/348] `sync` protocol now can have negotiated fallback name (#11982) --- client/network/src/protocol.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a06fff2322ce6..c51667017d15c 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1577,8 +1577,6 @@ where } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { - debug_assert!(negotiated_fallback.is_none()); - // `received_handshake` can be either a `Status` message if received from the // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block // announces substream. From c172d0f683fab3792b90d876fd6ca27056af9fe9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 5 Aug 2022 19:53:56 +0200 Subject: [PATCH 011/348] system_syncState: Always return highest block (#11979) Before `highestBlock` was an optional that was omitted when it was `None`. We recently changed the way the `highestBlock` is determined, this resulted in having this value in 99.99% of the time being `None` when the node is syncing blocks at the tip. Now we always return a block for `highestBlock`. If sync doesn't return us any best seen block, we return our own local best block as `highestBlock`. This should mainly reflect the same behavior to before we changed the way the best seen block is determined. --- client/rpc-api/src/system/helpers.rs | 12 ++++++------ client/rpc/src/system/tests.rs | 7 ++----- client/service/src/lib.rs | 6 ++++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 4561fccc1e81b..7ddb3f813c249 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -88,10 +88,10 @@ pub struct SyncState { pub starting_block: Number, /// Height of the current best block of the node. pub current_block: Number, - /// Height of the highest block learned from the network. Missing if no block is known yet. - #[serde(default = "Default::default", skip_serializing_if = "Option::is_none")] - pub highest_block: Option, + /// Height of the highest block in the network. + pub highest_block: Number, } + #[cfg(test)] mod tests { use super::*; @@ -129,7 +129,7 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: Some(128u32), + highest_block: 128u32, }) .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, @@ -139,10 +139,10 @@ mod tests { ::serde_json::to_string(&SyncState { starting_block: 12u32, current_block: 50u32, - highest_block: None, + highest_block: 50u32, }) .unwrap(), - r#"{"startingBlock":12,"currentBlock":50}"#, + r#"{"startingBlock":12,"currentBlock":50,"highestBlock":50}"#, ); } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 77acdf8418ccc..facad7a0b347a 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -123,7 +123,7 @@ fn api>>(sync: T) -> RpcModule> { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, - highest_block: Some(3), + highest_block: 3, }); }, }; @@ -297,10 +297,7 @@ async fn system_node_roles() { async fn system_sync_state() { let sync_state: SyncState = api(None).call("system_syncState", EmptyParams::new()).await.unwrap(); - assert_eq!( - sync_state, - SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } - ); + assert_eq!(sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: 3 }); } #[tokio::test] diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 98bcb17174157..1de45e5527eec 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -264,10 +264,12 @@ async fn build_network_future< sc_rpc::system::Request::SyncState(sender) => { use sc_rpc::system::SyncState; + let best_number = client.info().best_number; + let _ = sender.send(SyncState { starting_block, - current_block: client.info().best_number, - highest_block: network.best_seen_block(), + current_block: best_number, + highest_block: network.best_seen_block().unwrap_or(best_number), }); } } From 8574647f2a3df620ef32eddb050bfe2adb34aeab Mon Sep 17 00:00:00 2001 From: Nikos Kontakis Date: Mon, 8 Aug 2022 11:31:26 +0200 Subject: [PATCH 012/348] Rename --pruning and --keep-blocks to be more similar to one another (#11934) * rename prunning and keep-blocks flags * Add aliases in keep-blocks and pruning for backward compatibility * Rename in code variables from and to and --- bin/node/cli/benches/block_production.rs | 4 +- bin/node/cli/benches/transaction_pool.rs | 4 +- bin/node/testing/src/bench.rs | 2 +- client/cli/src/commands/chain_info_cmd.rs | 2 +- client/cli/src/config.rs | 12 +++--- client/cli/src/params/pruning_params.rs | 22 +++++------ client/db/src/lib.rs | 26 ++++++------- client/network/test/src/lib.rs | 8 ++-- client/network/test/src/sync.rs | 2 +- client/service/src/builder.rs | 2 +- client/service/src/config.rs | 4 +- client/service/src/lib.rs | 2 +- client/service/test/src/client/mod.rs | 8 ++-- client/service/test/src/lib.rs | 6 +-- client/state-db/src/lib.rs | 46 +++++++++++------------ test-utils/client/src/lib.rs | 8 ++-- 16 files changed, 79 insertions(+), 79 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 6d269ccaac271..8420a65f8cb80 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -28,7 +28,7 @@ use sc_consensus::{ }; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, WasmExecutionMethod, WasmtimeInstantiationStrategy, }, BasePath, Configuration, Role, @@ -75,7 +75,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { state_cache_size: 67108864, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 580a10d6a6678..f031f9dae3d21 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -26,7 +26,7 @@ use node_primitives::AccountId; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_service::{ config::{ - DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + BlocksPruning, DatabaseSource, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, PruningMode, TransactionPoolOptions, WasmExecutionMethod, }, BasePath, Configuration, Role, @@ -69,7 +69,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { state_cache_size: 67108864, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 18e979b95737f..de64910125b86 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -392,7 +392,7 @@ impl BenchDb { state_cache_child_ratio: Some((0, 100)), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - keep_blocks: sc_client_db::KeepBlocks::All, + blocks_pruning: sc_client_db::BlocksPruning::All, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index 0e57d1677efbb..c65092290cf2d 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -77,7 +77,7 @@ impl ChainInfoCmd { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks.clone(), + blocks_pruning: config.blocks_pruning.clone(), }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 4ebbc8c72c19a..f513008b17adc 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -31,7 +31,7 @@ use sc_service::{ NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }, - ChainSpec, KeepBlocks, TracingReceiver, + BlocksPruning, ChainSpec, TracingReceiver, }; use sc_tracing::logging::LoggerBuilder; use std::{net::SocketAddr, path::PathBuf}; @@ -257,11 +257,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `KeepBlocks::All`. - fn keep_blocks(&self) -> Result { + /// `BlocksPruning::All`. + fn blocks_pruning(&self) -> Result { self.pruning_params() - .map(|x| x.keep_blocks()) - .unwrap_or_else(|| Ok(KeepBlocks::All)) + .map(|x| x.blocks_pruning()) + .unwrap_or_else(|| Ok(BlocksPruning::All)) } /// Get the chain ID (string). @@ -536,7 +536,7 @@ pub trait CliConfiguration: Sized { state_cache_size: self.state_cache_size()?, state_cache_child_ratio: self.state_cache_child_ratio()?, state_pruning: self.state_pruning()?, - keep_blocks: self.keep_blocks()?, + blocks_pruning: self.blocks_pruning()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 9d471224cc59e..34a0982e63d95 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -18,7 +18,7 @@ use crate::error; use clap::Args; -use sc_service::{KeepBlocks, PruningMode}; +use sc_service::{BlocksPruning, PruningMode}; /// Parameters to define the pruning mode #[derive(Debug, Clone, PartialEq, Args)] @@ -28,37 +28,37 @@ pub struct PruningParams { /// Default is to keep only the last 256 blocks, /// otherwise, the state can be kept for all of the blocks (i.e 'archive'), /// or for all of the canonical blocks (i.e 'archive-canonical'). - #[clap(long, value_name = "PRUNING_MODE")] - pub pruning: Option, + #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] + pub state_pruning: Option, /// Specify the number of finalized blocks to keep in the database. /// /// Default is to keep all blocks. /// /// NOTE: only finalized blocks are subject for removal! - #[clap(long, value_name = "COUNT")] - pub keep_blocks: Option, + #[clap(alias = "keep-blocks", long, value_name = "COUNT")] + pub blocks_pruning: Option, } impl PruningParams { /// Get the pruning value from the parameters pub fn state_pruning(&self) -> error::Result> { - self.pruning + self.state_pruning .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), bc => bc .parse() .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) - .map(PruningMode::keep_blocks), + .map(PruningMode::blocks_pruning), }) .transpose() } /// Get the block pruning value from the parameters - pub fn keep_blocks(&self) -> error::Result { - Ok(match self.keep_blocks { - Some(n) => KeepBlocks::Some(n), - None => KeepBlocks::All, + pub fn blocks_pruning(&self) -> error::Result { + Ok(match self.blocks_pruning { + Some(n) => BlocksPruning::Some(n), + None => BlocksPruning::All, }) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0ce408a39b004..3214ee8f0d738 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -300,12 +300,12 @@ pub struct DatabaseSettings { /// Block pruning mode. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, } /// Block pruning settings. #[derive(Debug, Clone, Copy)] -pub enum KeepBlocks { +pub enum BlocksPruning { /// Keep full block history. All, /// Keep N recent finalized blocks. @@ -1012,7 +1012,7 @@ pub struct Backend { shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, - keep_blocks: KeepBlocks, + blocks_pruning: BlocksPruning, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, @@ -1043,21 +1043,21 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(keep_blocks, canonicalization_delay) + pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(keep_blocks: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(keep_blocks)), + state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), source: DatabaseSource::Custom { db, require_create_flag: true }, - keep_blocks: KeepBlocks::Some(keep_blocks), + blocks_pruning: BlocksPruning::Some(blocks_pruning), }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1124,7 +1124,7 @@ impl Backend { is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), }; @@ -1698,9 +1698,9 @@ impl Backend { finalized: NumberFor, displaced: &FinalizationDisplaced>, ) -> ClientResult<()> { - if let KeepBlocks::Some(keep_blocks) = self.keep_blocks { + if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { // Always keep the last finalized block - let keep = std::cmp::max(keep_blocks, 1); + let keep = std::cmp::max(blocks_pruning, 1); if finalized >= keep.into() { let number = finalized.saturating_sub(keep.into()); self.prune_block(transaction, BlockId::::number(number))?; @@ -2465,9 +2465,9 @@ pub(crate) mod tests { DatabaseSettings { state_cache_size: 16777216, state_cache_child_ratio: Some((50, 100)), - state_pruning: Some(PruningMode::keep_blocks(1)), + state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, }, 0, ) diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 494ff2048f6c4..09b4139c213a6 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -676,7 +676,7 @@ pub struct FullPeerConfig { /// Pruning window size. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: Option, + pub blocks_pruning: Option, /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. @@ -742,10 +742,10 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { - let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), + let mut test_client_builder = match (config.blocks_pruning, config.storage_chain) { + (Some(blocks_pruning), true) => TestClientBuilder::with_tx_storage(blocks_pruning), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), + (Some(blocks_pruning), false) => TestClientBuilder::with_pruning_window(blocks_pruning), (None, false) => TestClientBuilder::with_default_backend(), }; if let Some(storage) = config.extra_storage { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 84a5c2ca13fa5..c0778767b75af 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -544,7 +544,7 @@ fn syncs_header_only_forks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); + net.add_full_peer_with_config(FullPeerConfig { blocks_pruning: Some(3), ..Default::default() }); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 998ec12c1d05b..55c0006de33fb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -207,7 +207,7 @@ where state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - keep_blocks: config.keep_blocks, + blocks_pruning: config.blocks_pruning, }; let backend = new_db_backend(db_config)?; diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 0eeb6e05cee16..7860ff2281fe4 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -19,7 +19,7 @@ //! Service configuration. pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; -pub use sc_client_db::{Database, DatabaseSource, KeepBlocks, PruningMode}; +pub use sc_client_db::{BlocksPruning, Database, DatabaseSource, PruningMode}; pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; @@ -79,7 +79,7 @@ pub struct Configuration { /// Number of blocks to keep in the db. /// /// NOTE: only finalized blocks are subject for removal! - pub keep_blocks: KeepBlocks, + pub blocks_pruning: BlocksPruning, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 1de45e5527eec..5291d219e8102 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -60,7 +60,7 @@ pub use self::{ error::Error, }; pub use config::{ - BasePath, Configuration, DatabaseSource, KeepBlocks, PruningMode, Role, RpcMethods, TaskType, + BasePath, BlocksPruning, Configuration, DatabaseSource, PruningMode, Role, RpcMethods, TaskType, }; pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 136efad088fae..f363b621d5bcd 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -23,7 +23,7 @@ use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, }; -use sc_client_db::{Backend, DatabaseSettings, DatabaseSource, KeepBlocks, PruningMode}; +use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() { state_cache_size: 1 << 20, state_cache_child_ratio: None, state_pruning: Some(PruningMode::ArchiveAll), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1426,8 +1426,8 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { state_cache_size: 1 << 20, state_cache_child_ratio: None, - state_pruning: Some(PruningMode::keep_blocks(1)), - keep_blocks: KeepBlocks::All, + state_pruning: Some(PruningMode::blocks_pruning(1)), + blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 749c83c6eeac7..2d63362daffba 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -29,8 +29,8 @@ use sc_network::{ use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, - ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, - SpawnTaskHandle, TaskManager, + BlocksPruning, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, + RuntimeGenesis, SpawnTaskHandle, TaskManager, }; use sc_transaction_pool_api::TransactionPool; use sp_api::BlockId; @@ -234,7 +234,7 @@ fn node_config< state_cache_size: 16777216, state_cache_child_ratio: None, state_pruning: Default::default(), - keep_blocks: KeepBlocks::All, + blocks_pruning: BlocksPruning::All, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index d5cca9a342187..1c7140777e16e 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -227,7 +227,7 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. - pub fn keep_blocks(n: u32) -> PruningMode { + pub fn blocks_pruning(n: u32) -> PruningMode { PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } @@ -835,34 +835,34 @@ mod tests { #[test] fn pruning_mode_compatibility() { for (created, reopened, expected) in [ - (None, None, Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(256)), Ok(PruningMode::keep_blocks(256))), - (None, Some(PruningMode::keep_blocks(128)), Ok(PruningMode::keep_blocks(128))), - (None, Some(PruningMode::keep_blocks(512)), Ok(PruningMode::keep_blocks(512))), + (None, None, Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(256)), Ok(PruningMode::blocks_pruning(256))), + (None, Some(PruningMode::blocks_pruning(128)), Ok(PruningMode::blocks_pruning(128))), + (None, Some(PruningMode::blocks_pruning(512)), Ok(PruningMode::blocks_pruning(512))), (None, Some(PruningMode::ArchiveAll), Err(())), (None, Some(PruningMode::ArchiveCanonical), Err(())), - (Some(PruningMode::keep_blocks(256)), None, Ok(PruningMode::keep_blocks(256))), + (Some(PruningMode::blocks_pruning(256)), None, Ok(PruningMode::blocks_pruning(256))), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(256)), - Ok(PruningMode::keep_blocks(256)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(256)), + Ok(PruningMode::blocks_pruning(256)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(128)), - Ok(PruningMode::keep_blocks(128)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(128)), + Ok(PruningMode::blocks_pruning(128)), ), ( - Some(PruningMode::keep_blocks(256)), - Some(PruningMode::keep_blocks(512)), - Ok(PruningMode::keep_blocks(512)), + Some(PruningMode::blocks_pruning(256)), + Some(PruningMode::blocks_pruning(512)), + Ok(PruningMode::blocks_pruning(512)), ), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveAll), Err(())), - (Some(PruningMode::keep_blocks(256)), Some(PruningMode::ArchiveCanonical), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveAll), Err(())), + (Some(PruningMode::blocks_pruning(256)), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveAll), None, Ok(PruningMode::ArchiveAll)), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveAll), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveAll), Some(PruningMode::blocks_pruning(512)), Err(())), ( Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveAll), @@ -870,9 +870,9 @@ mod tests { ), (Some(PruningMode::ArchiveAll), Some(PruningMode::ArchiveCanonical), Err(())), (Some(PruningMode::ArchiveCanonical), None, Ok(PruningMode::ArchiveCanonical)), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(256)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(128)), Err(())), - (Some(PruningMode::ArchiveCanonical), Some(PruningMode::keep_blocks(512)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(256)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(128)), Err(())), + (Some(PruningMode::ArchiveCanonical), Some(PruningMode::blocks_pruning(512)), Err(())), (Some(PruningMode::ArchiveCanonical), Some(PruningMode::ArchiveAll), Err(())), ( Some(PruningMode::ArchiveCanonical), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 148f34246044d..3115be58425e6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -95,14 +95,14 @@ impl } /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test(keep_blocks, 0)); + pub fn with_pruning_window(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test(blocks_pruning, 0)); Self::with_backend(backend) } /// Create new `TestClientBuilder` with default backend and storage chain mode - pub fn with_tx_storage(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(keep_blocks, 0)); + pub fn with_tx_storage(blocks_pruning: u32) -> Self { + let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); Self::with_backend(backend) } } From 4ad36db74762629c65d5c1af1084f7e7683bc7a8 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 8 Aug 2022 17:46:57 +0800 Subject: [PATCH 013/348] Fix 16bit func_id (#11985) --- frame/contracts/src/chain_extension.rs | 4 ++-- frame/contracts/src/tests.rs | 29 +++++++++++++------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 23242a2a542c1..57101668f794d 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -38,7 +38,7 @@ //! //! However, only extensions implementing [`RegisteredChainExtension`] can be put into a tuple. //! This is because the [`RegisteredChainExtension::ID`] is used to decide which of those extensions -//! should should be used when the contract calls a chain extensions. Extensions which are generally +//! should be used when the contract calls a chain extensions. Extensions which are generally //! useful should claim their `ID` with [the registry](https://github.com/paritytech/chainextension-registry) //! so that no collisions with other vendors will occur. //! @@ -215,7 +215,7 @@ where /// It returns the two least significant bytes of the `id` passed by a contract as the other /// two bytes represent the chain extension itself (the code which is calling this function). pub fn func_id(&self) -> u16 { - (self.inner.id & 0x00FF) as u16 + (self.inner.id & 0x0000FFFF) as u16 } /// The chain extension id within the `id` passed by a contract. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 0febfec929b6e..30417d8544489 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -158,14 +158,14 @@ impl ChainExtension for TestExtension { let func_id = env.func_id(); let id = env.ext_id() as u32 | func_id as u32; match func_id { - 0 => { + 0x8000 => { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); Ok(RetVal::Converging(id)) }, - 1 => { + 0x8001 => { let env = env.only_in(); TEST_EXTENSION.with(|e| { e.borrow_mut().last_seen_inputs = @@ -173,13 +173,13 @@ impl ChainExtension for TestExtension { }); Ok(RetVal::Converging(id)) }, - 2 => { + 0x8002 => { let mut env = env.buf_in_buf_out(); let weight = env.read(5)?[4].into(); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, - 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), + 0x8003 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { panic!("Passed unknown id to test chain extension: {}", func_id); }, @@ -1646,21 +1646,22 @@ fn chain_extension_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - // 0 = read input buffer and pass it through as output - let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); + // 0x8000 = read input buffer and pass it through as output + let input: Vec = + ExtensionInput { extension_id: 0, func_id: 0x8000, extra: &[99] }.into(); let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, Bytes(input)); - // 1 = treat inputs as integer primitives and store the supplied integers + // 0x8001 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8001, extra: &[] }.into(), false, ) .result @@ -1668,14 +1669,14 @@ fn chain_extension_works() { // those values passed in the fixture assert_eq!(TestExtension::last_seen_inputs(), (4, 4, 16, 12)); - // 2 = charge some extra weight (amount supplied in the fifth byte) + // 0x8002 = charge some extra weight (amount supplied in the fifth byte) let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[0] }.into(), false, ); assert_ok!(result.result); @@ -1686,7 +1687,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[42] }.into(), false, ); assert_ok!(result.result); @@ -1697,20 +1698,20 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[95] }.into(), false, ); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 95); - // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + // 0x8003 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 0x8003, extra: &[] }.into(), false, ) .result From ef890f5598e0d3fd1b6a5fcc8c3322f7570089d1 Mon Sep 17 00:00:00 2001 From: yjh Date: Mon, 8 Aug 2022 17:49:00 +0800 Subject: [PATCH 014/348] chore: fix typos and sort trait impls (#11989) --- frame/contracts/src/wasm/code_cache.rs | 6 +++--- frame/contracts/src/wasm/env_def/mod.rs | 6 +++--- frame/contracts/src/wasm/prepare.rs | 2 +- frame/contracts/src/wasm/runtime.rs | 18 +++++++++--------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 10de436bfb155..61826c7c323aa 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -215,7 +215,7 @@ impl Token for CodeToken { use self::CodeToken::*; // In case of `Load` we already covered the general costs of // calling the storage but still need to account for the actual size of the - // contract code. This is why we substract `T::*::(0)`. We need to do this at this + // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. match *self { @@ -223,8 +223,8 @@ impl Token for CodeToken { Load(len) => { let computation = T::WeightInfo::call_with_code_per_byte(len) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwith = T::ContractAccessWeight::get().saturating_mul(len.into()); - computation.max(bandwith) + let bandwidth = T::ContractAccessWeight::get().saturating_mul(len.into()); + computation.max(bandwidth) }, } } diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index b4c5ffe81e7c1..c904445f8aea8 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -31,8 +31,8 @@ pub trait ConvertibleToWasm: Sized { fn from_typed_value(_: Value) -> Option; } impl ConvertibleToWasm for i32 { - type NativeType = i32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = i32; fn to_typed_value(self) -> Value { Value::I32(self) } @@ -41,8 +41,8 @@ impl ConvertibleToWasm for i32 { } } impl ConvertibleToWasm for u32 { - type NativeType = u32; const VALUE_TYPE: ValueType = ValueType::I32; + type NativeType = u32; fn to_typed_value(self) -> Value { Value::I32(self as i32) } @@ -54,8 +54,8 @@ impl ConvertibleToWasm for u32 { } } impl ConvertibleToWasm for u64 { - type NativeType = u64; const VALUE_TYPE: ValueType = ValueType::I64; + type NativeType = u64; fn to_typed_value(self) -> Value { Value::I64(self as i64) } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index f6fff20de6b1a..aec5dc2db611d 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -540,7 +540,7 @@ mod tests { [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, - // new version of nop with other data type for argumebt + // new version of nop with other data type for argument [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, ); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 3d5e62f2c1333..562b29fbaeb1b 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1209,7 +1209,7 @@ define_env!(Env, , // // # Parameters // - // - flags: See [`CallFlags`] for a documenation of the supported flags. + // - flags: See [`CallFlags`] for a documentation of the supported flags. // - callee_ptr: a pointer to the address of the callee contract. // Should be decodable as an `T::AccountId`. Traps otherwise. // - gas: how much gas to devote to the execution. @@ -1753,7 +1753,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity + // backwards compatibility [seal0] seal_restore_to( ctx, _dest_ptr: u32, @@ -1774,7 +1774,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity + // backwards compatibility [seal1] seal_restore_to( ctx, _dest_ptr: u32, @@ -1857,7 +1857,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) @@ -1868,7 +1868,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) @@ -1879,7 +1879,7 @@ define_env!(Env, , // # Note // // The state rent functionality was removed. This is stub only exists for - // backwards compatiblity. + // backwards compatibility. [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); @@ -2085,15 +2085,15 @@ define_env!(Env, , // // # Return Value // - // Returns `ReturnCode::Success` when the dispatchable was succesfully executed and - // returned `Ok`. When the dispatchable was exeuted but returned an error + // Returns `ReturnCode::Success` when the dispatchable was successfully executed and + // returned `Ok`. When the dispatchable was executed but returned an error // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not // provided because it is not guaranteed to be stable. // // # Comparison with `ChainExtension` // // Just as a chain extension this API allows the runtime to extend the functionality - // of contracts. While making use of this function is generelly easier it cannot be + // of contracts. While making use of this function is generally easier it cannot be // used in call cases. Consider writing a chain extension if you need to do perform // one of the following tasks: // From 74a6370e805ebaf88b7939e496818531f762cadf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 8 Aug 2022 15:32:00 +0100 Subject: [PATCH 015/348] contracts: Apply depth limit when decoding (#11991) --- frame/contracts/src/wasm/runtime.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 562b29fbaeb1b..f21dc7c76e44c 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,7 +26,7 @@ use crate::{ }; use bitflags::bitflags; -use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; +use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use sp_core::{crypto::UncheckedFrom, Bytes}; @@ -36,6 +36,9 @@ use sp_sandbox::SandboxMemory; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::ValueType; +/// The maximum nesting depth a contract can use when encoding types. +const MAX_DECODE_NESTING: u32 = 256; + /// Type of a storage key. #[allow(dead_code)] enum KeyType { @@ -575,7 +578,7 @@ where ptr: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } @@ -597,7 +600,7 @@ where len: u32, ) -> Result { let buf = self.read_sandbox_memory(ptr, len)?; - let decoded = D::decode_all(&mut &buf[..]) + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; Ok(decoded) } From 8e600ee0515122667cd091d632104708d343ca34 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 9 Aug 2022 20:04:28 +0900 Subject: [PATCH 016/348] Restore `wasmtime`'s default stack size limit to 1MB (#11993) * Restore `wasmtime`'s default stack size limit to 1MB * Add extra comments * Enforce different maximum call depth in release mode * Split the call depth limit in two --- client/executor/wasmtime/src/runtime.rs | 20 ++++--- client/executor/wasmtime/src/tests.rs | 79 +++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 7 deletions(-) diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 2feb9d0eea502..871de4e2300d2 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -324,13 +324,19 @@ fn common_config(semantics: &Semantics) -> std::result::Result native_stack_max, + + // In `wasmtime` 0.35 the default stack size limit was changed from 1MB to 512KB. + // + // This broke at least one parachain which depended on the original 1MB limit, + // so here we restore it to what it was originally. + None => 1024 * 1024, + }; + + config + .max_wasm_stack(native_stack_max as usize) + .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {:#}", e)))?; config.parallel_compilation(semantics.parallel_compilation); diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index e0fd9fbce0c57..1ca5dde4c2b93 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -174,6 +174,85 @@ impl RuntimeBuilder { } } +fn deep_call_stack_wat(depth: usize) -> String { + format!( + r#" + (module + (memory $0 32) + (export "memory" (memory $0)) + (global (export "__heap_base") i32 (i32.const 0)) + (func (export "overflow") call 0) + + (func $overflow (param $0 i32) + (block $label$1 + (br_if $label$1 + (i32.ge_u + (local.get $0) + (i32.const {depth}) + ) + ) + (call $overflow + (i32.add + (local.get $0) + (i32.const 1) + ) + ) + ) + ) + + (func (export "main") + (param i32 i32) (result i64) + (call $overflow (i32.const 0)) + (i64.const 0) + ) + ) + "# + ) +} + +// These two tests ensure that the `wasmtime`'s stack size limit and the amount of +// stack space used by a single stack frame doesn't suddenly change without us noticing. +// +// If they do (e.g. because we've pulled in a new version of `wasmtime`) we want to know +// that it did, regardless of how small the change was. +// +// If these tests starting failing it doesn't necessarily mean that something is broken; +// what it means is that one (or multiple) of the following has to be done: +// a) the tests may need to be updated for the new call depth, +// b) the stack limit may need to be changed to maintain backwards compatibility, +// c) the root cause of the new call depth limit determined, and potentially fixed, +// d) the new call depth limit may need to be validated to ensure it doesn't prevent any +// existing chain from syncing (if it was effectively decreased) + +// We need two limits here since depending on whether the code is compiled in debug +// or in release mode the maximum call depth is slightly different. +const CALL_DEPTH_LOWER_LIMIT: usize = 65478; +const CALL_DEPTH_UPPER_LIMIT: usize = 65514; + +test_wasm_execution!(test_consume_under_1mb_of_stack_does_not_trap); +fn test_consume_under_1mb_of_stack_does_not_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_LOWER_LIMIT); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + instance.call_export("main", &[]).unwrap(); +} + +test_wasm_execution!(test_consume_over_1mb_of_stack_does_trap); +fn test_consume_over_1mb_of_stack_does_trap(instantiation_strategy: InstantiationStrategy) { + let wat = deep_call_stack_wat(CALL_DEPTH_UPPER_LIMIT + 1); + let mut builder = RuntimeBuilder::new(instantiation_strategy).use_wat(wat); + let runtime = builder.build(); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); + match instance.call_export("main", &[]).unwrap_err() { + Error::AbortedDueToTrap(error) => { + let expected = "wasm trap: call stack exhausted"; + assert_eq!(error.message, expected); + }, + error => panic!("unexpected error: {:?}", error), + } +} + test_wasm_execution!(test_nan_canonicalization); fn test_nan_canonicalization(instantiation_strategy: InstantiationStrategy) { let mut builder = RuntimeBuilder::new(instantiation_strategy).canonicalize_nans(true); From fece0657f20e15df94be5833b164dfacd44823eb Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Tue, 9 Aug 2022 21:28:32 +0300 Subject: [PATCH 017/348] Network sync refactoring (part 6) (#11940) * Extract `NetworkKVProvider` trait in `sc-authority-discovery` and remove unnecessary dependency * Extract `NetworkSyncForkRequest` trait in `sc-finality-grandpa` * Relax requirements on `SyncOracle` trait, remove extra native methods from `NetworkService` that are already provided by trait impls * Move `NetworkSigner` trait from `sc-authority-discovery` into `sc-network-common` and de-duplicate methods on `NetworkService` * Move `NetworkKVProvider` trait from `sc-authority-discovery` into `sc-network-common` and de-duplicate methods on `NetworkService` * Minimize `sc-authority-discovery` dependency on `sc-network` * Move `NetworkSyncForkRequest` trait from `sc-finality-grandpa` to `sc-network-common` and de-duplicate methods in `NetworkService` * Extract `NetworkStatusProvider` trait and de-duplicate methods on `NetworkService` * Extract `NetworkPeers` trait and de-duplicate methods on `NetworkService` * Extract `NetworkEventStream` trait and de-duplicate methods on `NetworkService` * Move more methods from `NetworkService` into `NetworkPeers` trait * Move `NetworkStateInfo` trait into `sc-network-common` * Extract `NetworkNotification` trait and de-duplicate methods on `NetworkService` * Extract `NetworkRequest` trait and de-duplicate methods on `NetworkService` * Remove `NetworkService::local_peer_id()`, it is already provided by `NetworkStateInfo` impl * Extract `NetworkTransaction` trait and de-duplicate methods on `NetworkService` * Extract `NetworkBlock` trait and de-duplicate methods on `NetworkService` * Remove dependencies on `NetworkService` from most of the methods of `sc-service` * Address simple review comments --- Cargo.lock | 11 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 3 +- client/authority-discovery/Cargo.toml | 2 +- client/authority-discovery/src/lib.rs | 3 +- client/authority-discovery/src/service.rs | 2 +- client/authority-discovery/src/tests.rs | 4 +- client/authority-discovery/src/worker.rs | 76 +- .../src/worker/addr_cache.rs | 7 +- .../src/worker/schema/tests.rs | 5 +- .../authority-discovery/src/worker/tests.rs | 63 +- client/consensus/pow/src/lib.rs | 2 +- client/consensus/slots/src/lib.rs | 2 +- .../src/communication/gossip.rs | 3 +- .../finality-grandpa/src/communication/mod.rs | 45 +- .../src/communication/tests.rs | 143 +++- client/informant/Cargo.toml | 2 +- client/informant/src/display.rs | 8 +- client/informant/src/lib.rs | 7 +- client/network-gossip/Cargo.toml | 1 + client/network-gossip/src/bridge.rs | 138 ++- client/network-gossip/src/lib.rs | 79 +- client/network-gossip/src/state_machine.rs | 126 ++- client/network-gossip/src/validator.rs | 3 +- client/network/common/Cargo.toml | 3 + client/network/common/src/lib.rs | 2 + client/network/common/src/protocol.rs | 19 + .../{ => common}/src/protocol/event.rs | 4 +- .../network/common/src/request_responses.rs | 39 +- client/network/common/src/service.rs | 660 +++++++++++++++ .../{ => common}/src/service/signature.rs | 5 +- client/network/src/behaviour.rs | 12 +- client/network/src/lib.rs | 58 +- client/network/src/protocol.rs | 3 +- client/network/src/request_responses.rs | 41 +- client/network/src/service.rs | 794 +++++++----------- client/network/src/service/out_events.rs | 3 +- client/network/src/service/tests.rs | 59 +- client/network/src/transactions.rs | 10 +- client/network/test/src/lib.rs | 16 +- client/offchain/Cargo.toml | 1 + client/offchain/src/api.rs | 79 +- client/offchain/src/lib.rs | 104 ++- client/service/src/builder.rs | 58 +- client/service/src/lib.rs | 2 + client/service/src/metrics.rs | 8 +- client/service/test/Cargo.toml | 1 + client/service/test/src/lib.rs | 9 +- primitives/consensus/common/src/lib.rs | 18 +- 49 files changed, 1802 insertions(+), 942 deletions(-) create mode 100644 client/network/common/src/protocol.rs rename client/network/{ => common}/src/protocol/event.rs (96%) create mode 100644 client/network/common/src/service.rs rename client/network/{ => common}/src/service/signature.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index 703eeffce54a1..38063e8060835 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4686,6 +4686,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-rpc", "sc-service", "sc-service-test", @@ -7702,7 +7703,6 @@ dependencies = [ name = "sc-authority-discovery" version = "0.10.0-dev" dependencies = [ - "async-trait", "futures", "futures-timer", "ip_network", @@ -7715,6 +7715,7 @@ dependencies = [ "rand 0.7.3", "sc-client-api", "sc-network", + "sc-network-common", "sp-api", "sp-authority-discovery", "sp-blockchain", @@ -8316,7 +8317,7 @@ dependencies = [ "log", "parity-util-mem", "sc-client-api", - "sc-network", + "sc-network-common", "sc-transaction-pool-api", "sp-blockchain", "sp-runtime", @@ -8398,7 +8399,9 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ + "async-trait", "bitflags", + "bytes", "futures", "libp2p", "parity-scale-codec", @@ -8409,6 +8412,7 @@ dependencies = [ "sp-consensus", "sp-finality-grandpa", "sp-runtime", + "thiserror", ] [[package]] @@ -8424,6 +8428,7 @@ dependencies = [ "lru", "quickcheck", "sc-network", + "sc-network-common", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -8533,6 +8538,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-network", + "sc-network-common", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", @@ -8741,6 +8747,7 @@ dependencies = [ "sc-consensus", "sc-executor", "sc-network", + "sc-network-common", "sc-service", "sc-transaction-pool-api", "sp-api", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index fe7c5332e2b45..89ca3ebb45576 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -66,6 +66,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/commo sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } +sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } sc-consensus-uncles = { version = "0.10.0-dev", path = "../../../client/consensus/uncles" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e0644f462cf20..b20a3ac59a96a 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -29,7 +29,8 @@ use node_primitives::Block; use sc_client_api::{BlockBackend, ExecutorProvider}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; -use sc_network::{Event, NetworkService}; +use sc_network::NetworkService; +use sc_network_common::{protocol::event::Event, service::NetworkEventStream}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 012e6096aab80..544df4c8d4812 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -async-trait = "0.1" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } futures = "0.3.21" futures-timer = "3.0.1" @@ -29,6 +28,7 @@ rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 8522da9984a6f..f0ef374551617 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -39,8 +39,9 @@ use futures::{ Stream, }; +use libp2p::{Multiaddr, PeerId}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sc_network_common::protocol::event::DhtEvent; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; diff --git a/client/authority-discovery/src/service.rs b/client/authority-discovery/src/service.rs index c240e5d0c2287..df09b6ea43216 100644 --- a/client/authority-discovery/src/service.rs +++ b/client/authority-discovery/src/service.rs @@ -25,7 +25,7 @@ use futures::{ SinkExt, }; -use sc_network::{Multiaddr, PeerId}; +use libp2p::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; /// Service to interact with the [`crate::Worker`]. diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index e9f94b6a186db..334b2638ca58c 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -87,12 +87,12 @@ fn get_addresses_and_authority_id() { fn cryptos_are_compatible() { use sp_core::crypto::Pair; - let libp2p_secret = sc_network::Keypair::generate_ed25519(); + let libp2p_secret = libp2p::identity::Keypair::generate_ed25519(); let libp2p_public = libp2p_secret.public(); let sp_core_secret = { let libp2p_ed_secret = match libp2p_secret.clone() { - sc_network::Keypair::Ed25519(x) => x, + libp2p::identity::Keypair::Ed25519(x) => x, _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), }; sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index 87cc72ba7a69c..f13a1cf33581b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -32,19 +32,22 @@ use std::{ use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; -use async_trait::async_trait; use codec::Decode; use ip_network::IpNetwork; use libp2p::{ core::multiaddr, multihash::{Multihash, MultihashDigest}, + Multiaddr, PeerId, }; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sc_network_common::{ + protocol::event::DhtEvent, + service::{KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature}, +}; use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, @@ -136,7 +139,7 @@ pub struct Worker { /// Queue of throttled lookups pending to be passed to the network. pending_lookups: Vec, /// Set of in-flight lookups. - in_flight_lookups: HashMap, + in_flight_lookups: HashMap, addr_cache: addr_cache::AddrCache, @@ -464,10 +467,7 @@ where } } - fn handle_dht_value_found_event( - &mut self, - values: Vec<(sc_network::KademliaKey, Vec)>, - ) -> Result<()> { + fn handle_dht_value_found_event(&mut self, values: Vec<(KademliaKey, Vec)>) -> Result<()> { // Ensure `values` is not empty and all its keys equal. let remote_key = single(values.iter().map(|(key, _)| key.clone())) .map_err(|_| Error::ReceivingDhtValueFoundEventWithDifferentKeys)? @@ -523,11 +523,11 @@ where // properly signed by the owner of the PeerId if let Some(peer_signature) = peer_signature { - let public_key = - sc_network::PublicKey::from_protobuf_encoding(&peer_signature.public_key) - .map_err(Error::ParsingLibp2pIdentity)?; - let signature = - sc_network::Signature { public_key, bytes: peer_signature.signature }; + let public_key = libp2p::identity::PublicKey::from_protobuf_encoding( + &peer_signature.public_key, + ) + .map_err(Error::ParsingLibp2pIdentity)?; + let signature = Signature { public_key, bytes: peer_signature.signature }; if !signature.verify(record, &remote_peer_id) { return Err(Error::VerifyingDhtPayload) @@ -590,55 +590,15 @@ where } } -pub trait NetworkSigner { - /// Sign a message in the name of `self.local_peer_id()` - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result; -} - /// NetworkProvider provides [`Worker`] with all necessary hooks into the /// underlying Substrate networking. Using this trait abstraction instead of -/// [`sc_network::NetworkService`] directly is necessary to unit test [`Worker`]. -#[async_trait] -pub trait NetworkProvider: NetworkStateInfo + NetworkSigner { - /// Start putting a value in the Dht. - fn put_value(&self, key: sc_network::KademliaKey, value: Vec); - - /// Start getting a value from the Dht. - fn get_value(&self, key: &sc_network::KademliaKey); -} +/// `sc_network::NetworkService` directly is necessary to unit test [`Worker`]. +pub trait NetworkProvider: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -impl NetworkSigner for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> std::result::Result { - self.sign_with_local_identity(msg) - } -} - -#[async_trait::async_trait] -impl NetworkProvider for sc_network::NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { - self.put_value(key, value) - } - fn get_value(&self, key: &sc_network::KademliaKey) { - self.get_value(key) - } -} +impl NetworkProvider for T where T: NetworkDHTProvider + NetworkStateInfo + NetworkSigner {} -fn hash_authority_id(id: &[u8]) -> sc_network::KademliaKey { - sc_network::KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) +fn hash_authority_id(id: &[u8]) -> KademliaKey { + KademliaKey::new(&libp2p::multihash::Code::Sha2_256.digest(id).digest()) } // Makes sure all values are the same and returns it @@ -685,7 +645,7 @@ async fn sign_record_with_authority_ids( peer_signature: Option, key_store: &dyn CryptoStore, keys: Vec, -) -> Result)>> { +) -> Result)>> { let signatures = key_store .sign_with_all(key_types::AUTHORITY_DISCOVERY, keys.clone(), &serialized_record) .await diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index f768b9c4e66a7..19bbbf0b62e7e 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -16,9 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::core::multiaddr::{Multiaddr, Protocol}; - -use sc_network::PeerId; +use libp2p::{ + core::multiaddr::{Multiaddr, Protocol}, + PeerId, +}; use sp_authority_discovery::AuthorityId; use std::collections::{hash_map::Entry, HashMap, HashSet}; diff --git a/client/authority-discovery/src/worker/schema/tests.rs b/client/authority-discovery/src/worker/schema/tests.rs index b85a4ce37447d..60147d6762e50 100644 --- a/client/authority-discovery/src/worker/schema/tests.rs +++ b/client/authority-discovery/src/worker/schema/tests.rs @@ -21,9 +21,8 @@ mod schema_v1 { } use super::*; -use libp2p::multiaddr::Multiaddr; +use libp2p::{multiaddr::Multiaddr, PeerId}; use prost::Message; -use sc_network::PeerId; #[test] fn v2_decodes_v1() { @@ -56,7 +55,7 @@ fn v2_decodes_v1() { #[test] fn v1_decodes_v2() { - let peer_secret = sc_network::Keypair::generate_ed25519(); + let peer_secret = libp2p::identity::Keypair::generate_ed25519(); let peer_public = peer_secret.public(); let peer_id = peer_public.to_peer_id(); let multiaddress: Multiaddr = diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index a1a699bc30dd2..5a60d3353db52 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -22,7 +22,6 @@ use std::{ task::Poll, }; -use async_trait::async_trait; use futures::{ channel::mpsc::{self, channel}, executor::{block_on, LocalPool}, @@ -30,9 +29,10 @@ use futures::{ sink::SinkExt, task::LocalSpawn, }; -use libp2p::{core::multiaddr, PeerId}; +use libp2p::{core::multiaddr, identity::Keypair, PeerId}; use prometheus_endpoint::prometheus::default_registry; +use sc_network_common::service::{KademliaKey, Signature, SigningError}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::KeyStore, CryptoStore}; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; @@ -111,18 +111,18 @@ sp_api::mock_impl_runtime_apis! { #[derive(Debug)] pub enum TestNetworkEvent { - GetCalled(sc_network::KademliaKey), - PutCalled(sc_network::KademliaKey, Vec), + GetCalled(KademliaKey), + PutCalled(KademliaKey, Vec), } pub struct TestNetwork { peer_id: PeerId, - identity: sc_network::Keypair, + identity: Keypair, external_addresses: Vec, // Whenever functions on `TestNetwork` are called, the function arguments are added to the // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, event_sender: mpsc::UnboundedSender, event_receiver: Option>, } @@ -136,7 +136,7 @@ impl TestNetwork { impl Default for TestNetwork { fn default() -> Self { let (tx, rx) = mpsc::unbounded(); - let identity = sc_network::Keypair::generate_ed25519(); + let identity = Keypair::generate_ed25519(); TestNetwork { peer_id: identity.public().to_peer_id(), identity, @@ -153,21 +153,20 @@ impl NetworkSigner for TestNetwork { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, &self.identity) + ) -> std::result::Result { + Signature::sign_message(msg, &self.identity) } } -#[async_trait] -impl NetworkProvider for TestNetwork { - fn put_value(&self, key: sc_network::KademliaKey, value: Vec) { +impl NetworkDHTProvider for TestNetwork { + fn put_value(&self, key: KademliaKey, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); self.event_sender .clone() .unbounded_send(TestNetworkEvent::PutCalled(key, value)) .unwrap(); } - fn get_value(&self, key: &sc_network::KademliaKey) { + fn get_value(&self, key: &KademliaKey) { self.get_value_call.lock().unwrap().push(key.clone()); self.event_sender .clone() @@ -186,12 +185,16 @@ impl NetworkStateInfo for TestNetwork { } } -impl NetworkSigner for sc_network::Keypair { +struct TestSigner<'a> { + keypair: &'a Keypair, +} + +impl<'a> NetworkSigner for TestSigner<'a> { fn sign_with_local_identity( &self, msg: impl AsRef<[u8]>, - ) -> std::result::Result { - sc_network::Signature::sign_message(msg, self) + ) -> std::result::Result { + Signature::sign_message(msg, self.keypair) } } @@ -200,7 +203,7 @@ async fn build_dht_event( public_key: AuthorityId, key_store: &dyn CryptoStore, network: Option<&Signer>, -) -> Vec<(sc_network::KademliaKey, Vec)> { +) -> Vec<(KademliaKey, Vec)> { let serialized_record = serialize_authority_record(serialize_addresses(addresses.into_iter())).unwrap(); @@ -313,7 +316,7 @@ fn publish_discover_cycle() { let dht_event = { let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) + DhtEvent::ValueFound(vec![(key, value)]) }; // Node B discovering node A's address. @@ -469,7 +472,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -487,7 +490,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { struct DhtValueFoundTester { pub remote_key_store: KeyStore, pub remote_authority_public: sp_core::sr25519::Public, - pub remote_node_key: sc_network::Keypair, + pub remote_node_key: Keypair, pub local_worker: Option< Worker< TestApi, @@ -496,7 +499,7 @@ struct DhtValueFoundTester { sp_runtime::generic::Header, substrate_test_runtime_client::runtime::Extrinsic, >, - std::pin::Pin>>, + std::pin::Pin>>, >, >, } @@ -508,7 +511,7 @@ impl DhtValueFoundTester { block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) .unwrap(); - let remote_node_key = sc_network::Keypair::generate_ed25519(); + let remote_node_key = Keypair::generate_ed25519(); Self { remote_key_store, remote_authority_public, remote_node_key, local_worker: None } } @@ -523,7 +526,7 @@ impl DhtValueFoundTester { fn process_value_found( &mut self, strict_record_validation: bool, - values: Vec<(sc_network::KademliaKey, Vec)>, + values: Vec<(KademliaKey, Vec)>, ) -> Option<&HashSet> { let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = @@ -583,7 +586,7 @@ fn strict_accept_address_with_peer_signature() { vec![addr.clone()], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(true, kv_pairs); @@ -598,12 +601,12 @@ fn strict_accept_address_with_peer_signature() { #[test] fn reject_address_with_rogue_peer_signature() { let mut tester = DhtValueFoundTester::new(); - let rogue_remote_node_key = sc_network::Keypair::generate_ed25519(); + let rogue_remote_node_key = Keypair::generate_ed25519(); let kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&rogue_remote_node_key), + Some(&TestSigner { keypair: &rogue_remote_node_key }), )); let cached_remote_addresses = tester.process_value_found(false, kv_pairs); @@ -621,7 +624,7 @@ fn reject_address_with_invalid_peer_signature() { vec![tester.multiaddr_with_peer_id(1)], tester.remote_authority_public.clone().into(), &tester.remote_key_store, - Some(&tester.remote_node_key), + Some(&TestSigner { keypair: &tester.remote_node_key }), )); // tamper with the signature let mut record = schema::SignedAuthorityRecord::decode(kv_pairs[0].1.as_slice()).unwrap(); @@ -808,7 +811,7 @@ fn lookup_throttling() { None, ) .await; - sc_network::DhtEvent::ValueFound(kv_pairs) + DhtEvent::ValueFound(kv_pairs) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -822,7 +825,7 @@ fn lookup_throttling() { // Make second one fail. let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + let dht_event = DhtEvent::ValueNotFound(remote_hash); dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); // Assert worker to trigger another lookup. diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 6f9ee6f864ad8..f63e453a48026 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -518,7 +518,7 @@ pub fn start_mining_worker( select_chain: S, algorithm: Algorithm, mut env: E, - mut sync_oracle: SO, + sync_oracle: SO, justification_sync_link: L, pre_runtime: Option>, create_inherent_data_providers: CIDP, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7c3de13444c1a..39b40a32f18ca 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -490,7 +490,7 @@ pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, - mut sync_oracle: SO, + sync_oracle: SO, create_inherent_data_providers: CIDP, can_author_with: CAW, ) where diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 65d7dfb783aa3..9e9f2fb98b0d1 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -89,7 +89,8 @@ use log::{debug, trace}; use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; -use sc_network::{ObservedRole, PeerId, ReputationChange}; +use sc_network::{PeerId, ReputationChange}; +use sc_network_common::protocol::event::ObservedRole; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 378501cffdd62..7a47ebe214950 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -45,7 +45,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, Encode}; -use sc_network::{NetworkService, ReputationChange}; +use sc_network::ReputationChange; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::SyncCryptoStorePtr; @@ -58,6 +58,7 @@ use crate::{ use gossip::{ FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; +use sc_network_common::service::{NetworkBlock, NetworkSyncForkRequest}; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; @@ -156,34 +157,26 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// /// Something that provides both the capabilities needed for the `gossip_network::Network` trait as /// well as the ability to set a fork sync request for a particular block. -pub trait Network: GossipNetwork + Clone + Send + 'static { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request( - &self, - peers: Vec, - hash: Block::Hash, - number: NumberFor, - ); +pub trait Network: + NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static +{ } -impl Network for Arc> +impl Network for T where - B: BlockT, - H: sc_network::ExHashT, + Block: BlockT, + T: NetworkSyncForkRequest> + + NetworkBlock> + + GossipNetwork + + Clone + + Send + + 'static, { - fn set_sync_fork_request( - &self, - peers: Vec, - hash: B::Hash, - number: NumberFor, - ) { - NetworkService::set_sync_fork_request(self, peers, hash, number) - } } /// Create a unique topic for a round and set-id combo. @@ -467,7 +460,7 @@ impl> NetworkBridge { hash: B::Hash, number: NumberFor, ) { - Network::set_sync_fork_request(&self.service, peers, hash, number) + self.service.set_sync_fork_request(peers, hash, number) } } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 0ec5092a2a047..59935cef6a095 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -25,7 +25,14 @@ use super::{ use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; use parity_scale_codec::Encode; -use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; +use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; +use sc_network_common::{ + protocol::event::{Event as NetworkEvent, ObservedRole}, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NetworkSyncForkRequest, NotificationSender, NotificationSenderError, + }, +}; use sc_network_gossip::Validator; use sc_network_test::{Block, Hash}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -34,6 +41,7 @@ use sp_keyring::Ed25519Keyring; use sp_runtime::traits::NumberFor; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -42,8 +50,8 @@ use std::{ #[derive(Debug)] pub(crate) enum Event { EventStream(TracingUnboundedSender), - WriteNotification(sc_network::PeerId, Vec), - Report(sc_network::PeerId, sc_network::ReputationChange), + WriteNotification(PeerId, Vec), + Report(PeerId, ReputationChange), Announce(Hash), } @@ -52,49 +60,122 @@ pub(crate) struct TestNetwork { sender: TracingUnboundedSender, } -impl sc_network_gossip::Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) +impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); } - fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) {} + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set(&self, _protocol: Cow<'static, str>, _peers: Vec) {} - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn write_notification(&self, who: PeerId, _: Cow<'static, str>, message: Vec) { - let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); + fn sync_num_connected(&self) -> usize { + unimplemented!(); } +} - fn announce(&self, block: Hash, _associated_data: Option>) { - let _ = self.sender.unbounded_send(Event::Announce(block)); +impl NetworkEventStream for TestNetwork { + fn event_stream( + &self, + _name: &'static str, + ) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test"); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) } } -impl super::Network for TestNetwork { - fn set_sync_fork_request( +impl NetworkNotification for TestNetwork { + fn write_notification(&self, target: PeerId, _protocol: Cow<'static, str>, message: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); + } + + fn notification_sender( &self, - _peers: Vec, - _hash: Hash, - _number: NumberFor, - ) { + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } +} + +impl NetworkBlock> for TestNetwork { + fn announce_block(&self, hash: Hash, _data: Option>) { + let _ = self.sender.unbounded_send(Event::Announce(hash)); + } + + fn new_best_block_imported(&self, _hash: Hash, _number: NumberFor) { + unimplemented!(); } } +impl NetworkSyncForkRequest> for TestNetwork { + fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} +} + impl sc_network_gossip::ValidatorContext for TestNetwork { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { - >::write_notification( + fn send_message(&mut self, who: &PeerId, data: Vec) { + ::write_notification( self, who.clone(), grandpa_protocol_name::NAME.into(), @@ -102,7 +183,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { ); } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -207,8 +288,8 @@ struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { fn broadcast_topic(&mut self, _: Hash, _: bool) {} fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} + fn send_message(&mut self, _: &PeerId, _: Vec) {} + fn send_topic(&mut self, _: &PeerId, _: Hash, _: bool) {} } #[test] @@ -252,7 +333,7 @@ fn good_commit_leads_to_relay() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -301,7 +382,7 @@ fn good_commit_leads_to_relay() { }); // Add a random peer which will be the recipient of this message - let receiver_id = sc_network::PeerId::random(); + let receiver_id = PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), protocol: grandpa_protocol_name::NAME.into(), @@ -403,7 +484,7 @@ fn bad_commit_leads_to_report() { }) .encode(); - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let global_topic = super::global_topic::(set_id); let test = make_test_network() @@ -484,7 +565,7 @@ fn bad_commit_leads_to_report() { #[test] fn peer_with_higher_view_leads_to_catch_up_request() { - let id = sc_network::PeerId::random(); + let id = PeerId::random(); let (tester, mut net) = make_test_network(); let test = tester diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 528365d62c18b..b3ac5d892fd58 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -19,7 +19,7 @@ futures-timer = "3.0.1" log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 446ddf47b4cab..0441011b0ec42 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -20,7 +20,13 @@ use crate::OutputFormat; use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; -use sc_network::{NetworkStatus, SyncState, WarpSyncPhase, WarpSyncProgress}; +use sc_network_common::{ + service::NetworkStatus, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + SyncState, + }, +}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{fmt, time::Instant}; diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 5dca77f1a7433..52f1c95fe0198 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -24,7 +24,7 @@ use futures_timer::Delay; use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; -use sc_network::NetworkService; +use sc_network_common::service::NetworkStatusProvider; use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; @@ -53,12 +53,13 @@ impl Default for OutputFormat { } /// Builds the informant and returns a `Future` that drives the informant. -pub async fn build( +pub async fn build( client: Arc, - network: Arc::Hash>>, + network: N, pool: Arc

, format: OutputFormat, ) where + N: NetworkStatusProvider, C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, P: TransactionPool + MallocSizeOf, diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 144c5ad168996..185c37985b585 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -23,6 +23,7 @@ lru = "0.7.5" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 2d086e89b4a10..8a6c3358e4409 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,7 +21,8 @@ use crate::{ Network, Validator, }; -use sc_network::{Event, ReputationChange}; +use sc_network::ReputationChange; +use sc_network_common::protocol::event::Event; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -85,7 +86,7 @@ impl GossipEngine { B: 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream(); + let network_event_stream = network.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), @@ -162,7 +163,7 @@ impl GossipEngine { /// Note: this method isn't strictly related to gossiping and should eventually be moved /// somewhere else. pub fn announce(&self, block: B::Hash, associated_data: Option>) { - self.network.announce(block, associated_data); + self.network.announce_block(block, associated_data); } } @@ -181,7 +182,10 @@ impl Future for GossipEngine { this.network.add_set_reserved(remote, this.protocol.clone()); }, Event::SyncDisconnected { remote } => { - this.network.remove_set_reserved(remote, this.protocol.clone()); + this.network.remove_peers_from_reserved_set( + this.protocol.clone(), + vec![remote], + ); }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { @@ -304,7 +308,7 @@ impl futures::future::FusedFuture for GossipEngine { #[cfg(test)] mod tests { use super::*; - use crate::{ValidationResult, ValidatorContext}; + use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; use async_std::task::spawn; use futures::{ channel::mpsc::{unbounded, UnboundedSender}, @@ -312,10 +316,20 @@ mod tests { future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use sc_network::ObservedRole; - use sp_runtime::{testing::H256, traits::Block as BlockT}; + use sc_network_common::{ + protocol::event::ObservedRole, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sp_runtime::{ + testing::H256, + traits::{Block as BlockT, NumberFor}, + }; use std::{ borrow::Cow, + collections::HashSet, sync::{Arc, Mutex}, }; use substrate_test_runtime_client::runtime::Block; @@ -330,29 +344,119 @@ mod tests { event_senders: Vec>, } - impl Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for TestNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } + + impl NetworkEventStream for TestNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { let (tx, rx) = unbounded(); self.inner.lock().unwrap().event_senders.push(tx); Box::pin(rx) } + } - fn report_peer(&self, _: PeerId, _: ReputationChange) {} - - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + impl NetworkNotification for TestNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkBlock<::Hash, NumberFor> for TestNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { unimplemented!(); } - fn announce(&self, _: B::Hash, _: Option>) { + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 4b83708702466..b9dff0bcd4d00 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -41,9 +41,9 @@ //! messages. //! //! The [`GossipEngine`] will automatically use [`Network::add_set_reserved`] and -//! [`Network::remove_set_reserved`] to maintain a set of peers equal to the set of peers the -//! node is syncing from. See the documentation of `sc-network` for more explanations about the -//! concepts of peer sets. +//! [`NetworkPeers::remove_peers_from_reserved_set`] to maintain a set of peers equal to the set of +//! peers the node is syncing from. See the documentation of `sc-network` for more explanations +//! about the concepts of peer sets. //! //! # What is a validator? //! @@ -67,74 +67,35 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use futures::prelude::*; -use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::traits::Block as BlockT; -use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; +use sc_network::{multiaddr, PeerId}; +use sc_network_common::service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, iter}; mod bridge; mod state_machine; mod validator; /// Abstraction over a network. -pub trait Network { - /// Returns a stream of events representing what happens on the network. - fn event_stream(&self) -> Pin + Send>>; - - /// Adjust the reputation of a node. - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); - - /// Adds the peer to the set of peers to be connected to with this protocol. - fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Removes the peer from the set of peers to be connected to with this protocol. - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); - - /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec); - - /// Notify everyone we're connected to that we have the given block. - /// - /// Note: this method isn't strictly related to gossiping and should eventually be moved - /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Option>); -} - -impl Network for Arc> { - fn event_stream(&self) -> Pin + Send>> { - Box::pin(NetworkService::event_stream(self, "network-gossip")) - } - - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { - NetworkService::report_peer(self, peer_id, reputation); - } - +pub trait Network: + NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> +{ fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); - let result = - NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } +} - fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(who).collect()); - } - - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { - NetworkService::disconnect_peer(self, who, protocol) - } - - fn write_notification(&self, who: PeerId, protocol: Cow<'static, str>, message: Vec) { - NetworkService::write_notification(self, who, protocol, message) - } - - fn announce(&self, block: B::Hash, associated_data: Option>) { - NetworkService::announce_block(self, block, associated_data) - } +impl Network for T where + T: NetworkPeers + + NetworkEventStream + + NetworkNotification + + NetworkBlock> +{ } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 8a016cbaab3da..4cc4e25529af4 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,7 +22,7 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network::ObservedRole; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use std::{borrow::Cow, collections::HashMap, iter, sync::Arc, time, time::Instant}; @@ -511,11 +511,23 @@ impl Metrics { #[cfg(test)] mod tests { use super::*; + use crate::multiaddr::Multiaddr; use futures::prelude::*; - use sc_network::{Event, ReputationChange}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use sc_network::ReputationChange; + use sc_network_common::{ + protocol::event::Event, + service::{ + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSender, NotificationSenderError, + }, + }; + use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + traits::NumberFor, + }; use std::{ borrow::Cow, + collections::HashSet, pin::Pin, sync::{Arc, Mutex}, }; @@ -569,28 +581,118 @@ mod tests { peer_reports: Vec<(PeerId, ReputationChange)>, } - impl Network for NoOpNetwork { - fn event_stream(&self) -> Pin + Send>> { + impl NetworkPeers for NoOpNetwork { + fn set_authorized_peers(&self, _peers: HashSet) { + unimplemented!(); + } + + fn set_authorized_only(&self, _reserved_only: bool) { + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn report_peer(&self, peer_id: PeerId, reputation_change: ReputationChange) { - self.inner.lock().unwrap().peer_reports.push((peer_id, reputation_change)); + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { } - fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} + fn sync_num_connected(&self) -> usize { + unimplemented!(); + } + } - fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { + impl NetworkEventStream for NoOpNetwork { + fn event_stream(&self, _name: &'static str) -> Pin + Send>> { unimplemented!(); } + } - fn announce(&self, _: B::Hash, _: Option>) { + impl NetworkNotification for NoOpNetwork { + fn write_notification( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + _message: Vec, + ) { + unimplemented!(); + } + + fn notification_sender( + &self, + _target: PeerId, + _protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + unimplemented!(); + } + } + + impl NetworkBlock<::Hash, NumberFor> for NoOpNetwork { + fn announce_block(&self, _hash: ::Hash, _data: Option>) { + unimplemented!(); + } + + fn new_best_block_imported( + &self, + _hash: ::Hash, + _number: NumberFor, + ) { unimplemented!(); } } diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 7d60f7b31397f..a98c62ab5a9eb 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -16,7 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{ObservedRole, PeerId}; +use sc_network::PeerId; +use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index b0e3a8fe42a83..1b10bd248292c 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,7 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +async-trait = "0.1.50" bitflags = "1.3.2" +bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } @@ -29,3 +31,4 @@ sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +thiserror = "1.0" diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 9fbedc542c123..3a30d24900199 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -20,5 +20,7 @@ pub mod config; pub mod message; +pub mod protocol; pub mod request_responses; +pub mod service; pub mod sync; diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs new file mode 100644 index 0000000000000..0fd36cb511112 --- /dev/null +++ b/client/network/common/src/protocol.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub mod event; diff --git a/client/network/src/protocol/event.rs b/client/network/common/src/protocol/event.rs similarity index 96% rename from client/network/src/protocol/event.rs rename to client/network/common/src/protocol/event.rs index 26c9544960605..c6fb4a2faf9f9 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -67,14 +67,14 @@ pub enum Event { remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. /// This is always equal to the value of - /// [`crate::config::NonDefaultSetConfig::notifications_protocol`] of one of the + /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the /// configured sets. protocol: Cow<'static, str>, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// Always contains a value equal to the value in - /// [`crate::config::NonDefaultSetConfig::fallback_names`]. + /// `sc_network::config::NonDefaultSetConfig::fallback_names`. negotiated_fallback: Option>, /// Role of the remote. role: ObservedRole, diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index f409d1ac16d61..a216c9c2d254d 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -19,7 +19,7 @@ //! Collection of generic data structures for request-response protocols. use futures::channel::{mpsc, oneshot}; -use libp2p::PeerId; +use libp2p::{request_response::OutboundFailure, PeerId}; use sc_peerset::ReputationChange; use std::{borrow::Cow, time::Duration}; @@ -115,3 +115,40 @@ pub struct OutgoingResponse { /// > written to the buffer managed by the operating system. pub sent_feedback: Option>, } + +/// When sending a request, what to do on a disconnected recipient. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum IfDisconnected { + /// Try to connect to the peer. + TryConnect, + /// Just fail if the destination is not yet connected. + ImmediateError, +} + +/// Convenience functions for `IfDisconnected`. +impl IfDisconnected { + /// Shall we connect to a disconnected peer? + pub fn should_connect(self) -> bool { + match self { + Self::TryConnect => true, + Self::ImmediateError => false, + } + } +} + +/// Error in a request. +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RequestFailure { + #[error("We are not currently connected to the requested peer.")] + NotConnected, + #[error("Given protocol hasn't been registered.")] + UnknownProtocol, + #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] + Refused, + #[error("The remote replied, but the local node is no longer interested in the response.")] + Obsolete, + /// Problem on the network. + #[error("Problem on the network: {0}")] + Network(OutboundFailure), +} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs new file mode 100644 index 0000000000000..0fff32b12d66c --- /dev/null +++ b/client/network/common/src/service.rs @@ -0,0 +1,660 @@ +// This file is part of Substrate. +// +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +// +// If you read this, you are very thorough, congratulations. + +use crate::{ + protocol::event::Event, + request_responses::{IfDisconnected, RequestFailure}, + sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, +}; +use futures::{channel::oneshot, Stream}; +pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; +use libp2p::{Multiaddr, PeerId}; +use sc_peerset::ReputationChange; +pub use signature::Signature; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{borrow::Cow, collections::HashSet, future::Future, pin::Pin, sync::Arc}; + +mod signature; + +/// Signer with network identity +pub trait NetworkSigner { + /// Signs the message with the `KeyPair` that defines the local [`PeerId`]. + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result; +} + +impl NetworkSigner for Arc +where + T: ?Sized, + T: NetworkSigner, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + T::sign_with_local_identity(self, msg) + } +} + +/// Provides access to the networking DHT. +pub trait NetworkDHTProvider { + /// Start getting a value from the DHT. + fn get_value(&self, key: &KademliaKey); + + /// Start putting a value in the DHT. + fn put_value(&self, key: KademliaKey, value: Vec); +} + +impl NetworkDHTProvider for Arc +where + T: ?Sized, + T: NetworkDHTProvider, +{ + fn get_value(&self, key: &KademliaKey) { + T::get_value(self, key) + } + + fn put_value(&self, key: KademliaKey, value: Vec) { + T::put_value(self, key, value) + } +} + +/// Provides an ability to set a fork sync request for a particular block. +pub trait NetworkSyncForkRequest { + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber); +} + +impl NetworkSyncForkRequest for Arc +where + T: ?Sized, + T: NetworkSyncForkRequest, +{ + fn set_sync_fork_request(&self, peers: Vec, hash: BlockHash, number: BlockNumber) { + T::set_sync_fork_request(self, peers, hash, number) + } +} + +/// Overview status of the network. +#[derive(Clone)] +pub struct NetworkStatus { + /// Current global sync state. + pub sync_state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, + /// State sync in progress. + pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option>, +} + +/// Provides high-level status information about network. +#[async_trait::async_trait] +pub trait NetworkStatusProvider { + /// High-level network status information. + /// + /// Returns an error if the `NetworkWorker` is no longer running. + async fn status(&self) -> Result, ()>; +} + +// Manual implementation to avoid extra boxing here +impl NetworkStatusProvider for Arc +where + T: ?Sized, + T: NetworkStatusProvider, +{ + fn status<'life0, 'async_trait>( + &'life0 self, + ) -> Pin, ()>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::status(self) + } +} + +/// Provides low-level API for manipulating network peers. +pub trait NetworkPeers { + /// Set authorized peers. + /// + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + fn set_authorized_peers(&self, peers: HashSet); + + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + fn set_authorized_only(&self, reserved_only: bool); + + /// Adds an address known to a node. + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); + + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + /// + /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also + /// prevents the local node from re-establishing an outgoing substream to this peer until it + /// is added again. + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); + + /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. + fn accept_unreserved_peers(&self); + + /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing + /// purposes. + fn deny_unreserved_peers(&self); + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + /// + /// Returns an `Err` if the given string is not a valid multiaddress + /// or contains an invalid peer ID (which includes the local peer ID). + fn add_reserved_peer(&self, peer: String) -> Result<(), String>; + + /// Removes a `PeerId` from the list of reserved peers. + fn remove_reserved_peer(&self, peer_id: PeerId); + + /// Sets the reserved set of a protocol to the given set of peers. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// The node will start establishing/accepting connections and substreams to/from peers in this + /// set, if it doesn't have any substream open with them yet. + /// + /// Note however, if a call to this function results in less peers on the reserved set, they + /// will not necessarily get disconnected (depending on available free slots in the peer set). + /// If you want to also disconnect those removed peers, you will have to call + /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit + /// this step if the peer set is in reserved only mode. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Add peers to a peer set. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Add a peer to a set of peers. + /// + /// If the set has slots available, it will try to open a substream with this peer. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String>; + + /// Remove peers from a peer set. + /// + /// If we currently have an open substream with this peer, it will soon be closed. + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec); + + /// Returns the number of peers in the sync peer set we're connected to. + fn sync_num_connected(&self) -> usize; +} + +// Manual implementation to avoid extra boxing here +impl NetworkPeers for Arc +where + T: ?Sized, + T: NetworkPeers, +{ + fn set_authorized_peers(&self, peers: HashSet) { + T::set_authorized_peers(self, peers) + } + + fn set_authorized_only(&self, reserved_only: bool) { + T::set_authorized_only(self, reserved_only) + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + T::add_known_address(self, peer_id, addr) + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + T::report_peer(self, who, cost_benefit) + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + T::disconnect_peer(self, who, protocol) + } + + fn accept_unreserved_peers(&self) { + T::accept_unreserved_peers(self) + } + + fn deny_unreserved_peers(&self) { + T::deny_unreserved_peers(self) + } + + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + T::add_reserved_peer(self, peer) + } + + fn remove_reserved_peer(&self, peer_id: PeerId) { + T::remove_reserved_peer(self, peer_id) + } + + fn set_reserved_peers( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::set_reserved_peers(self, protocol, peers) + } + + fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_peers_to_reserved_set(self, protocol, peers) + } + + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_peers_from_reserved_set(self, protocol, peers) + } + + fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { + T::add_to_peers_set(self, protocol, peers) + } + + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + T::remove_from_peers_set(self, protocol, peers) + } + + fn sync_num_connected(&self) -> usize { + T::sync_num_connected(self) + } +} + +/// Provides access to network-level event stream. +pub trait NetworkEventStream { + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + fn event_stream(&self, name: &'static str) -> Pin + Send>>; +} + +impl NetworkEventStream for Arc +where + T: ?Sized, + T: NetworkEventStream, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + T::event_stream(self, name) + } +} + +/// Trait for providing information about the local network state +pub trait NetworkStateInfo { + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; +} + +impl NetworkStateInfo for Arc +where + T: ?Sized, + T: NetworkStateInfo, +{ + fn external_addresses(&self) -> Vec { + T::external_addresses(self) + } + + fn local_peer_id(&self) -> PeerId { + T::local_peer_id(self) + } +} + +/// Reserved slot in the notifications buffer, ready to accept data. +pub trait NotificationSenderReady { + /// Consumes this slots reservation and actually queues the notification. + /// + /// NOTE: Traits can't consume itself, but calling this method second time will return an error. + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError>; +} + +/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. +#[async_trait::async_trait] +pub trait NotificationSender { + /// Returns a future that resolves when the `NotificationSender` is ready to send a + /// notification. + async fn ready(&self) + -> Result, NotificationSenderError>; +} + +/// Error returned by [`NetworkNotification::notification_sender`]. +#[derive(Debug, thiserror::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection + /// closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkNotification::notification_sender`]. + #[error("The notification receiver has been closed")] + Closed, + /// Protocol name hasn't been registered. + #[error("Protocol name hasn't been registered")] + BadProtocol, +} + +/// Provides ability to send network notifications. +pub trait NetworkNotification { + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. + /// + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkNotification::notification_sender`] instead. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec); + + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. + /// + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two + /// steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkEventStream::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with + /// `crate::config::NetworkConfiguration::notifications_protocols`. + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkPeers::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + /// + /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError>; +} + +impl NetworkNotification for Arc +where + T: ?Sized, + T: NetworkNotification, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + T::write_notification(self, target, protocol, message) + } + + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + T::notification_sender(self, target, protocol) + } +} + +/// Provides ability to send network requests. +#[async_trait::async_trait] +pub trait NetworkRequest { + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way + /// you will get an error immediately for disconnected peers, instead of waiting for a + /// potentially very long connection attempt, which would suggest that something is wrong + /// anyway, as you are supposed to be connected because of the notification protocol. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// `NetworkConfiguration::request_response_protocols`. + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure>; + + /// Variation of `request` which starts a request whose response is delivered on a provided + /// channel. + /// + /// Instead of blocking and waiting for a reply, this function returns immediately, sending + /// responses via the passed in sender. This alternative API exists to make it easier to + /// integrate with message passing APIs. + /// + /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a + /// closing connection. This is expected behaviour. With `request` you would get a + /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ); +} + +// Manual implementation to avoid extra boxing here +impl NetworkRequest for Arc +where + T: ?Sized, + T: NetworkRequest, +{ + fn request<'life0, 'async_trait>( + &'life0 self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Pin, RequestFailure>> + Send + 'async_trait>> + where + 'life0: 'async_trait, + Self: 'async_trait, + { + T::request(self, target, protocol, request, connect) + } + + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + T::start_request(self, target, protocol, request, tx, connect) + } +} + +/// Provides ability to propagate transactions over the network. +pub trait NetworkTransaction { + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn trigger_repropagate(&self); + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + fn propagate_transaction(&self, hash: H); +} + +impl NetworkTransaction for Arc +where + T: ?Sized, + T: NetworkTransaction, +{ + fn trigger_repropagate(&self) { + T::trigger_repropagate(self) + } + + fn propagate_transaction(&self, hash: H) { + T::propagate_transaction(self, hash) + } +} + +/// Provides ability to announce blocks to the network. +pub trait NetworkBlock { + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + fn announce_block(&self, hash: BlockHash, data: Option>); + + /// Inform the network service about new best imported block. + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber); +} + +impl NetworkBlock for Arc +where + T: ?Sized, + T: NetworkBlock, +{ + fn announce_block(&self, hash: BlockHash, data: Option>) { + T::announce_block(self, hash, data) + } + + fn new_best_block_imported(&self, hash: BlockHash, number: BlockNumber) { + T::new_best_block_imported(self, hash, number) + } +} diff --git a/client/network/src/service/signature.rs b/client/network/common/src/service/signature.rs similarity index 96% rename from client/network/src/service/signature.rs rename to client/network/common/src/service/signature.rs index d21d28a3007b5..602ef3d82979a 100644 --- a/client/network/src/service/signature.rs +++ b/client/network/common/src/service/signature.rs @@ -18,7 +18,10 @@ // // If you read this, you are very thorough, congratulations. -use super::*; +use libp2p::{ + identity::{error::SigningError, Keypair, PublicKey}, + PeerId, +}; /// A result of signing a message with a network identity. Since `PeerId` is potentially a hash of a /// `PublicKey`, you need to reveal the `PublicKey` next to the signature, so the verifier can check diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 515608df13d0f..4ff415788f4ea 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -21,7 +21,7 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - request_responses, DhtEvent, ObservedRole, + request_responses, }; use bytes::Bytes; @@ -41,7 +41,11 @@ use log::debug; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::import_queue::{IncomingBlock, Origin}; -use sc_network_common::{config::ProtocolId, request_responses::ProtocolConfig}; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{DhtEvent, ObservedRole}, + request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, +}; use sc_peerset::PeersetHandle; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::BlockOrigin; @@ -57,9 +61,7 @@ use std::{ time::Duration, }; -pub use crate::request_responses::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, -}; +pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 83bc1075b8bad..b1d70c28289bd 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -262,22 +262,26 @@ pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{ - event::{DhtEvent, Event, ObservedRole}, - PeerInfo, -}; -pub use sc_network_common::sync::{ - warp::{WarpSyncPhase, WarpSyncProgress}, - StateDownloadProgress, SyncState, +pub use protocol::PeerInfo; +pub use sc_network_common::{ + protocol::event::{DhtEvent, Event, ObservedRole}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NetworkTransaction, Signature, SigningError, + }, + sync::{ + warp::{WarpSyncPhase, WarpSyncProgress}, + StateDownloadProgress, SyncState, + }, }; pub use service::{ - DecodingError, IfDisconnected, KademliaKey, Keypair, NetworkService, NetworkWorker, - NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, RequestFailure, - Signature, SigningError, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, + NotificationSenderReady, OutboundFailure, PublicKey, }; pub use sc_peerset::ReputationChange; -use sp_runtime::traits::{Block as BlockT, NumberFor}; /// The maximum allowed number of established connections per peer. /// @@ -296,35 +300,3 @@ pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -/// Trait for providing information about the local network state -pub trait NetworkStateInfo { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec; - - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId; -} - -/// Overview status of the network. -#[derive(Clone)] -pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers - pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// The total number of bytes received. - pub total_bytes_inbound: u64, - /// The total number of bytes sent. - pub total_bytes_outbound: u64, - /// State sync in progress. - pub state_sync: Option, - /// Warp sync in progress. - pub warp_sync: Option>, -} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c51667017d15c..351e7d207ad1e 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,7 +18,6 @@ use crate::{ config, error, - request_responses::RequestFailure, utils::{interval, LruHashSet}, }; @@ -45,6 +44,7 @@ use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, + request_responses::RequestFailure, sync::{ message::{ BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, @@ -76,7 +76,6 @@ use std::{ mod notifications; -pub mod event; pub mod message; pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 0d8c6c33b1c95..9eab85a4c1ce1 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -50,7 +50,9 @@ use libp2p::{ NetworkBehaviourAction, PollParameters, }, }; -use sc_network_common::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; +use sc_network_common::request_responses::{ + IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, +}; use std::{ borrow::Cow, collections::{hash_map::Entry, HashMap}, @@ -118,26 +120,6 @@ impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { } } -/// When sending a request, what to do on a disconnected recipient. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum IfDisconnected { - /// Try to connect to the peer. - TryConnect, - /// Just fail if the destination is not yet connected. - ImmediateError, -} - -/// Convenience functions for `IfDisconnected`. -impl IfDisconnected { - /// Shall we connect to a disconnected peer? - pub fn should_connect(self) -> bool { - match self { - Self::TryConnect => true, - Self::ImmediateError => false, - } - } -} - /// Implementation of `NetworkBehaviour` that provides support for request-response protocols. pub struct RequestResponsesBehaviour { /// The multiple sub-protocols, by name. @@ -787,23 +769,6 @@ pub enum RegisterError { DuplicateProtocol(Cow<'static, str>), } -/// Error in a request. -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RequestFailure { - #[error("We are not currently connected to the requested peer.")] - NotConnected, - #[error("Given protocol hasn't been registered.")] - UnknownProtocol, - #[error("Remote has closed the substream before answering, thereby signaling that it considers the request as valid, but refused to answer it.")] - Refused, - #[error("The remote replied, but the local node is no longer interested in the response.")] - Obsolete, - /// Problem on the network. - #[error("Problem on the network: {0}")] - Network(OutboundFailure), -} - /// Error when processing a request sent by a remote. #[derive(Debug, thiserror::Error)] pub enum ResponseFailure { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 1210b0ca64224..bf15a9250d82c 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -37,16 +37,17 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, protocol::{ - self, event::Event, message::generic::Roles, NotificationsSink, NotifsHandlerError, - PeerInfo, Protocol, Ready, + self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, + Ready, }, - transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, + transactions, transport, ExHashT, ReputationChange, }; use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, + kad::record::Key as KademliaKey, multiaddr, ping::Failure as PingFailure, swarm::{ @@ -60,7 +61,17 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; -use sc_network_common::sync::{SyncState, SyncStatus}; +use sc_network_common::{ + protocol::event::{DhtEvent, Event}, + request_responses::{IfDisconnected, RequestFailure}, + service::{ + NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NotificationSender as NotificationSenderT, NotificationSenderError, + NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, + }, + sync::{SyncState, SyncStatus}, +}; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -81,24 +92,15 @@ use std::{ task::Poll, }; -pub use behaviour::{ - IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, -}; +pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; mod metrics; mod out_events; -mod signature; #[cfg(test)] mod tests; -pub use libp2p::{ - identity::{ - error::{DecodingError, SigningError}, - Keypair, PublicKey, - }, - kad::record::Key as KademliaKey, -}; -pub use signature::Signature; +pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; +use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -723,289 +725,6 @@ where } impl NetworkService { - /// Returns the local `PeerId`. - pub fn local_peer_id(&self) -> &PeerId { - &self.local_peer_id - } - - /// Signs the message with the `KeyPair` that defined the local `PeerId`. - pub fn sign_with_local_identity( - &self, - msg: impl AsRef<[u8]>, - ) -> Result { - Signature::sign_message(msg.as_ref(), &self.local_identity) - } - - /// Set authorized peers. - /// - /// Need a better solution to manage authorized peers, but now just use reserved peers for - /// prototyping. - pub fn set_authorized_peers(&self, peers: HashSet) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); - } - - /// Set authorized_only flag. - /// - /// Need a better solution to decide authorized_only, but now just use reserved_only flag for - /// prototyping. - pub fn set_authorized_only(&self, reserved_only: bool) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); - } - - /// Adds an address known to a node. - pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - } - - /// Appends a notification to the buffer of pending outgoing notifications with the given peer. - /// Has no effect if the notifications channel with this protocol name is not open. - /// - /// If the buffer of pending outgoing notifications with that peer is full, the notification - /// is silently dropped and the connection to the remote will start being shut down. This - /// happens if you call this method at a higher rate than the rate at which the peer processes - /// these notifications, or if the available network bandwidth is too low. - /// - /// For this reason, this method is considered soft-deprecated. You are encouraged to use - /// [`NetworkService::notification_sender`] instead. - /// - /// > **Note**: The reason why this is a no-op in the situation where we have no channel is - /// > that we don't guarantee message delivery anyway. Networking issues can cause - /// > connections to drop at any time, and higher-level logic shouldn't differentiate - /// > between the remote voluntarily closing a substream or a network error - /// > preventing the message from being delivered. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - pub fn write_notification( - &self, - target: PeerId, - protocol: Cow<'static, str>, - message: Vec, - ) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } - - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); - } - - /// Obtains a [`NotificationSender`] for a connected peer, if it exists. - /// - /// A `NotificationSender` is scoped to a particular connection to the peer that holds - /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two - /// steps: - /// - /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready - /// for another notification, yielding a [`NotificationSenderReady`] token. - /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation - /// can only fail if the underlying notification substream or connection has suddenly closed. - /// - /// An error is returned by [`NotificationSenderReady::send`] if there exists no open - /// notifications substream with that combination of peer and protocol, or if the remote - /// has asked to close the notifications substream. If that happens, it is guaranteed that an - /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by - /// [`NetworkService::event_stream`]. - /// - /// If the remote requests to close the notifications substream, all notifications successfully - /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the - /// substream actually gets closed, but attempting to enqueue more notifications will now - /// return an error. It is however possible for the entire connection to be abruptly closed, - /// in which case enqueued notifications will be lost. - /// - /// The protocol must have been registered with - /// `crate::config::NetworkConfiguration::notifications_protocols`. - /// - /// # Usage - /// - /// This method returns a struct that allows waiting until there is space available in the - /// buffer of messages towards the given peer. If the peer processes notifications at a slower - /// rate than we send them, this buffer will quickly fill up. - /// - /// As such, you should never do something like this: - /// - /// ```ignore - /// // Do NOT do this - /// for peer in peers { - /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } - /// } - /// } - /// ``` - /// - /// Doing so would slow down all peers to the rate of the slowest one. A malicious or - /// malfunctioning peer could intentionally process notifications at a very slow rate. - /// - /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one - /// maintained by `sc-network`, and use `notification_sender` to progressively send out - /// elements from your buffer. If this additional buffer is full (which will happen at some - /// point if the peer is too slow to process notifications), appropriate measures can be taken, - /// such as removing non-critical notifications from the buffer or disconnecting the peer - /// using [`NetworkService::disconnect_peer`]. - /// - /// - /// Notifications Per-peer buffer - /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet - /// ^ (not covered by - /// | sc-network) - /// + - /// Notifications should be dropped - /// if buffer is full - /// - /// - /// See also the `sc-network-gossip` crate for a higher-level way to send notifications. - pub fn notification_sender( - &self, - target: PeerId, - protocol: Cow<'static, str>, - ) -> Result { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); - - Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) - } - - /// Returns a stream containing the events that happen on the network. - /// - /// If this method is called multiple times, the events are duplicated. - /// - /// The stream never ends (unless the `NetworkWorker` gets shut down). - /// - /// The name passed is used to identify the channel in the Prometheus metrics. Note that the - /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having - /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory - pub fn event_stream(&self, name: &'static str) -> impl Stream { - let (tx, rx) = out_events::channel(name); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); - rx - } - - /// Sends a single targeted request to a specific peer. On success, returns the response of - /// the peer. - /// - /// Request-response protocols are a way to complement notifications protocols, but - /// notifications should remain the default ways of communicating information. For example, a - /// peer can announce something through a notification, after which the recipient can obtain - /// more information by performing a request. - /// As such, call this function with `IfDisconnected::ImmediateError` for `connect`. This way - /// you will get an error immediately for disconnected peers, instead of waiting for a - /// potentially very long connection attempt, which would suggest that something is wrong - /// anyway, as you are supposed to be connected because of the notification protocol. - /// - /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. - /// Such restrictions, if desired, need to be enforced at the call site(s). - /// - /// The protocol must have been registered through - /// [`NetworkConfiguration::request_response_protocols`]( - /// crate::config::NetworkConfiguration::request_response_protocols). - pub async fn request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - connect: IfDisconnected, - ) -> Result, RequestFailure> { - let (tx, rx) = oneshot::channel(); - - self.start_request(target, protocol, request, tx, connect); - - match rx.await { - Ok(v) => v, - // The channel can only be closed if the network worker no longer exists. If the - // network worker no longer exists, then all connections to `target` are necessarily - // closed, and we legitimately report this situation as a "ConnectionClosed". - Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), - } - } - - /// Variation of `request` which starts a request whose response is delivered on a provided - /// channel. - /// - /// Instead of blocking and waiting for a reply, this function returns immediately, sending - /// responses via the passed in sender. This alternative API exists to make it easier to - /// integrate with message passing APIs. - /// - /// Keep in mind that the connected receiver might receive a `Canceled` event in case of a - /// closing connection. This is expected behaviour. With `request` you would get a - /// `RequestFailure::Network(OutboundFailure::ConnectionClosed)` in that case. - pub fn start_request( - &self, - target: PeerId, - protocol: impl Into>, - request: Vec, - tx: oneshot::Sender, RequestFailure>>, - connect: IfDisconnected, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { - target, - protocol: protocol.into(), - request, - pending_response: tx, - connect, - }); - } - - /// High-level network status information. - /// - /// Returns an error if the `NetworkWorker` is no longer running. - pub async fn status(&self) -> Result, ()> { - let (tx, rx) = oneshot::channel(); - - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); - - match rx.await { - Ok(v) => v.map_err(|_| ()), - // The channel can only be closed if the network worker no longer exists. - Err(_) => Err(()), - } - } - /// Get network state. /// /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally @@ -1026,74 +745,97 @@ impl NetworkService { } } - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - /// You must call when new transaction is imported by the transaction pool. + /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Option>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in peer set.".to_string()) + } else { + Ok((peer, addr)) + } + }) + .collect::, String>>() } +} - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); +impl sp_consensus::SyncOracle for NetworkService { + fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) } - /// Disconnect from a node as soon as possible. - /// - /// This triggers the same effects as if the connection had closed itself spontaneously. - /// - /// See also [`NetworkService::remove_from_peers_set`], which has the same effect but also - /// prevents the local node from re-establishing an outgoing substream to this peer until it - /// is added again. - pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); + fn is_offline(&self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 } +} +impl sc_consensus::JustificationSyncLink for NetworkService { /// Request a justification for the given block from the network. /// /// On success, the justification will be passed to the import queue that was part at /// initialization as part of the configuration. - pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + fn request_justification(&self, hash: &B::Hash, number: NumberFor) { let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); } - /// Clear all pending justification requests. - pub fn clear_justification_requests(&self) { + fn clear_justification_requests(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); } +} - /// Are we in the process of downloading the chain? - pub fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) +impl NetworkStateInfo for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() + } + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id } +} +impl NetworkSigner for NetworkService +where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + fn sign_with_local_identity(&self, msg: impl AsRef<[u8]>) -> Result { + Signature::sign_message(msg.as_ref(), &self.local_identity) + } +} + +impl NetworkDHTProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ /// Start getting a value from the DHT. /// /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn get_value(&self, key: &KademliaKey) { + fn get_value(&self, key: &KademliaKey) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } @@ -1101,27 +843,86 @@ impl NetworkService { /// /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an /// item on the [`NetworkWorker`] stream. - pub fn put_value(&self, key: KademliaKey, value: Vec) { + fn put_value(&self, key: KademliaKey, value: Vec) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } +} + +impl NetworkSyncForkRequest> for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } +} - /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. - pub fn accept_unreserved_peers(&self) { +#[async_trait::async_trait] +impl NetworkStatusProvider for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn status(&self) -> Result, ()> { + let (tx, rx) = oneshot::channel(); + + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); + + match rx.await { + Ok(v) => v.map_err(|_| ()), + // The channel can only be closed if the network worker no longer exists. + Err(_) => Err(()), + } + } +} + +impl NetworkPeers for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn set_authorized_peers(&self, peers: HashSet) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + } + + fn set_authorized_only(&self, reserved_only: bool) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); + } + + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); + } + + fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); + } + + fn accept_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } - /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing - /// purposes. - pub fn deny_unreserved_peers(&self) { + fn deny_unreserved_peers(&self) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - /// - /// Returns an `Err` if the given string is not a valid multiaddress - /// or contains an invalid peer ID (which includes the local peer ID). - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, peer: String) -> Result<(), String> { let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { @@ -1135,28 +936,11 @@ impl NetworkService { Ok(()) } - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { + fn remove_reserved_peer(&self, peer_id: PeerId) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Sets the reserved set of a protocol to the given set of peers. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// The node will start establishing/accepting connections and substreams to/from peers in this - /// set, if it doesn't have any substream open with them yet. - /// - /// Note however, if a call to this function results in less peers on the reserved set, they - /// will not necessarily get disconnected (depending on available free slots in the peer set). - /// If you want to also disconnect those removed peers, you will have to call - /// `remove_from_peers_set` on those in addition to updating the reserved set. You can omit - /// this step if the peer set is in reserved only mode. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn set_reserved_peers( + fn set_reserved_peers( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1187,14 +971,7 @@ impl NetworkService { Ok(()) } - /// Add peers to a peer set. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_peers_to_reserved_set( + fn add_peers_to_reserved_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1220,8 +997,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - pub fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1229,26 +1005,7 @@ impl NetworkService { } } - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); - } - - /// Add a peer to a set of peers. - /// - /// If the set has slots available, it will try to open a substream with this peer. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also - /// consist of only `/p2p/`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - pub fn add_to_peers_set( + fn add_to_peers_set( &self, protocol: Cow<'static, str>, peers: HashSet, @@ -1274,10 +1031,7 @@ impl NetworkService { Ok(()) } - /// Remove peers from a peer set. - /// - /// If we currently have an open substream with this peer, it will soon be closed. - pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1285,90 +1039,158 @@ impl NetworkService { } } - /// Returns the number of peers we're connected to. - pub fn num_connected(&self) -> usize { + fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } +} - /// Inform the network service about new best imported block. - pub fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); +impl NetworkEventStream for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn event_stream(&self, name: &'static str) -> Pin + Send>> { + let (tx, rx) = out_events::channel(name); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + Box::pin(rx) } +} - /// Utility function to extract `PeerId` from each `Multiaddr` for peer set updates. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id( - &self, - peers: HashSet, - ) -> Result, String> { - peers - .into_iter() - .map(|mut addr| { - let peer = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| "Invalid PeerId format".to_string())?, - _ => return Err("Missing PeerId from address".to_string()), - }; +impl NetworkNotification for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + // Notification silently discarded, as documented. + debug!( + target: "sub-libp2p", + "Attempted to send notification on missing or closed substream: {}, {:?}", + target, protocol, + ); + return + } + }; - // Make sure the local peer ID is never added to the PSM - // or added as a "known address", even if given. - if peer == self.local_peer_id { - Err("Local peer ID in peer set.".to_string()) - } else { - Ok((peer, addr)) - } - }) - .collect::, String>>() - } -} + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &protocol]) + .observe(message.len() as f64); + } -impl sp_consensus::SyncOracle for NetworkService { - fn is_major_syncing(&mut self) -> bool { - Self::is_major_syncing(self) + // Sending is communicated to the `NotificationsSink`. + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?}, {} bytes)", + target, protocol, message.len() + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); + sink.send_sync_notification(message); } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn notification_sender( + &self, + target: PeerId, + protocol: Cow<'static, str>, + ) -> Result, NotificationSenderError> { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed) + } + }; + + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); + + Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { - fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) +#[async_trait::async_trait] +impl NetworkRequest for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + async fn request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + connect: IfDisconnected, + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + + self.start_request(target, protocol, request, tx, connect); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 + fn start_request( + &self, + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + tx: oneshot::Sender, RequestFailure>>, + connect: IfDisconnected, + ) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx, + connect, + }); } } -impl sc_consensus::JustificationSyncLink for NetworkService { - fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - Self::request_justification(self, hash, number); +impl NetworkTransaction for NetworkService +where + B: BlockT + 'static, + H: ExHashT, +{ + fn trigger_repropagate(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); } - fn clear_justification_requests(&self) { - Self::clear_justification_requests(self); + fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); } } -impl NetworkStateInfo for NetworkService +impl NetworkBlock> for NetworkService where - B: sp_runtime::traits::Block, + B: BlockT + 'static, H: ExHashT, { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() + fn announce_block(&self, hash: B::Hash, data: Option>) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); } - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId { - self.local_peer_id + fn new_best_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NewBestBlockImported(hash, number)); } } @@ -1385,26 +1207,27 @@ pub struct NotificationSender { notification_size_metric: Option, } -impl NotificationSender { - /// Returns a future that resolves when the `NotificationSender` is ready to send a - /// notification. - pub async fn ready(&self) -> Result, NotificationSenderError> { - Ok(NotificationSenderReady { +#[async_trait::async_trait] +impl NotificationSenderT for NotificationSender { + async fn ready( + &self, + ) -> Result, NotificationSenderError> { + Ok(Box::new(NotificationSenderReady { ready: match self.sink.reserve_notification().await { - Ok(r) => r, + Ok(r) => Some(r), Err(()) => return Err(NotificationSenderError::Closed), }, peer_id: self.sink.peer_id(), protocol_name: &self.protocol_name, notification_size_metric: self.notification_size_metric.clone(), - }) + })) } } /// Reserved slot in the notifications buffer, ready to accept data. #[must_use] pub struct NotificationSenderReady<'a> { - ready: Ready<'a>, + ready: Option>, /// Target of the notification. peer_id: &'a PeerId, @@ -1417,11 +1240,8 @@ pub struct NotificationSenderReady<'a> { notification_size_metric: Option, } -impl<'a> NotificationSenderReady<'a> { - /// Consumes this slots reservation and actually queues the notification. - pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { - let notification = notification.into(); - +impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { + fn send(&mut self, notification: Vec) -> Result<(), NotificationSenderError> { if let Some(notification_size_metric) = &self.notification_size_metric { notification_size_metric.observe(notification.len() as f64); } @@ -1433,26 +1253,14 @@ impl<'a> NotificationSenderReady<'a> { ); trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); - self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) + self.ready + .take() + .ok_or(NotificationSenderError::Closed)? + .send(notification) + .map_err(|()| NotificationSenderError::Closed) } } -/// Error returned by [`NetworkService::send_notification`]. -#[derive(Debug, thiserror::Error)] -pub enum NotificationSenderError { - /// The notification receiver has been closed, usually because the underlying connection - /// closed. - /// - /// Some of the notifications most recently sent may not have been received. However, - /// the peer may still be connected and a new `NotificationSender` for the same - /// protocol obtained from [`NetworkService::notification_sender`]. - #[error("The notification receiver has been closed")] - Closed, - /// Protocol name hasn't been registered. - #[error("Protocol name hasn't been registered")] - BadProtocol, -} - /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index c95b46af4cefa..4144d7f19551e 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -31,11 +31,10 @@ //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. -use crate::Event; - use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; +use sc_network_common::protocol::event::Event; use std::{ cell::RefCell, fmt, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index f757bf4891fbc..6ccca17650b67 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -16,11 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, Event, NetworkService, NetworkWorker}; +use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::ProtocolId, + protocol::event::Event, + service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, +}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, @@ -192,7 +196,7 @@ fn build_nodes_one_proto() -> ( set_config: config::SetConfig { reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, @@ -214,18 +218,10 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification( - node2.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node1.write_notification(node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification( - node1.local_peer_id().clone(), - PROTOCOL_NAME, - b"hello world".to_vec(), - ); + node2.write_notification(node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); } async_std::task::block_on(async move { @@ -249,14 +245,14 @@ fn notifications_state_consistent() { // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); } if rand::random::() % 5 >= 3 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -264,10 +260,10 @@ fn notifications_state_consistent() { // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone(), PROTOCOL_NAME); + node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone(), PROTOCOL_NAME); + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -295,7 +291,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. @@ -304,7 +300,7 @@ fn notifications_state_consistent() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. @@ -312,7 +308,7 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node1_to_node2_open); node1_to_node2_open = false; - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); }, future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. @@ -320,14 +316,14 @@ fn notifications_state_consistent() { if protocol == PROTOCOL_NAME { assert!(node2_to_node1_open); node2_to_node1_open = false; - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(remote, node2.local_peer_id()); if rand::random::() % 5 >= 4 { node1.write_notification( - node2.local_peer_id().clone(), + node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -335,10 +331,10 @@ fn notifications_state_consistent() { }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(remote, node1.local_peer_id()); if rand::random::() % 5 >= 4 { node2.write_notification( - node1.local_peer_id().clone(), + node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec(), ); @@ -373,7 +369,7 @@ fn lots_of_incoming_peers_works() { ..config::NetworkConfiguration::new_local() }); - let main_node_peer_id = *main_node.local_peer_id(); + let main_node_peer_id = main_node.local_peer_id(); // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. @@ -476,8 +472,13 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id.clone(), PROTOCOL_NAME).unwrap(); - notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); + let notif = node1.notification_sender(node2_id, PROTOCOL_NAME).unwrap(); + notif + .ready() + .await + .unwrap() + .send(format!("hello #{}", num).into_bytes()) + .unwrap(); } receiver.await; @@ -514,7 +515,7 @@ fn fallback_name_working() { set_config: config::SetConfig { reserved_nodes: vec![config::MultiaddrWithPeerId { multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), + peer_id: node1.local_peer_id(), }], ..Default::default() }, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 342b6a0430272..f7e4d774ca812 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -32,7 +32,7 @@ use crate::{ protocol::message, service::NetworkService, utils::{interval, LruHashSet}, - Event, ExHashT, ObservedRole, + ExHashT, }; use codec::{Decode, Encode}; @@ -40,7 +40,11 @@ use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{ + config::ProtocolId, + protocol::event::{Event, ObservedRole}, + service::{NetworkEventStream, NetworkNotification, NetworkPeers}, +}; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -176,7 +180,7 @@ impl TransactionsHandlerPrototype { transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let event_stream = service.event_stream("transactions-handler").boxed(); + let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); let gossip_enabled = Arc::new(AtomicBool::new(false)); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 09b4139c213a6..3ba663aba2267 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -45,7 +45,8 @@ use sc_client_api::{ }; use sc_consensus::{ BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, - ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, + ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, + Verifier, }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ @@ -56,8 +57,9 @@ use sc_network::{ Multiaddr, NetworkService, NetworkWorker, }; pub use sc_network_common::config::ProtocolId; -use sc_network_common::sync::warp::{ - AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider, +use sc_network_common::{ + service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, + sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -71,7 +73,7 @@ use sp_blockchain::{ }; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, - BlockOrigin, Error as ConsensusError, + BlockOrigin, Error as ConsensusError, SyncOracle, }; use sp_core::H256; use sp_runtime::{ @@ -243,7 +245,7 @@ where { /// Get this peer ID. pub fn id(&self) -> PeerId { - *self.network.service().local_peer_id() + self.network.service().local_peer_id() } /// Returns true if we're major syncing. @@ -797,7 +799,7 @@ where let addrs = connect_to .iter() .map(|v| { - let peer_id = *self.peer(*v).network_service().local_peer_id(); + let peer_id = self.peer(*v).network_service().local_peer_id(); let multiaddr = self.peer(*v).listen_addr.clone(); MultiaddrWithPeerId { peer_id, multiaddr } }) @@ -893,7 +895,7 @@ where self.mut_peers(move |peers| { for peer in peers.iter_mut() { peer.network - .add_known_address(*network.service().local_peer_id(), listen_addr.clone()); + .add_known_address(network.service().local_peer_id(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 8da2d4be3adde..ab26c0c38596c 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -29,6 +29,7 @@ threadpool = "1.7" tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index c80b511c84d17..f379ebad17bbf 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -325,19 +325,88 @@ impl AsyncApi { mod tests { use super::*; use sc_client_db::offchain::LocalStorage; - use sc_network::{NetworkStateInfo, PeerId}; + use sc_network::{PeerId, ReputationChange}; + use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; use sp_core::offchain::{DbExternalities, Externalities}; - use std::time::SystemTime; + use std::{borrow::Cow, time::SystemTime}; pub(super) struct TestNetwork(); - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index d54d491b04c43..14a2408e61a70 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,14 +35,14 @@ #![warn(missing_docs)] -use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, marker::PhantomData, sync::Arc}; use futures::{ future::{ready, Future}, prelude::*, }; use parking_lot::Mutex; -use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; +use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; use sp_runtime::{ @@ -60,27 +60,9 @@ const LOG_TARGET: &str = "offchain-worker"; /// NetworkProvider provides [`OffchainWorkers`] with all necessary hooks into the /// underlying Substrate networking. -pub trait NetworkProvider: NetworkStateInfo { - /// Set the authorized peers. - fn set_authorized_peers(&self, peers: HashSet); +pub trait NetworkProvider: NetworkStateInfo + NetworkPeers {} - /// Set the authorized only flag. - fn set_authorized_only(&self, reserved_only: bool); -} - -impl NetworkProvider for NetworkService -where - B: traits::Block + 'static, - H: ExHashT, -{ - fn set_authorized_peers(&self, peers: HashSet) { - NetworkService::set_authorized_peers(self, peers) - } - - fn set_authorized_only(&self, reserved_only: bool) { - NetworkService::set_authorized_only(self, reserved_only) - } -} +impl NetworkProvider for T where T: NetworkStateInfo + NetworkPeers {} /// Options for [`OffchainWorkers`] pub struct OffchainWorkerOptions { @@ -266,11 +248,11 @@ mod tests { use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network::{Multiaddr, PeerId}; + use sc_network::{Multiaddr, PeerId, ReputationChange}; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use std::sync::Arc; + use std::{borrow::Cow, collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, @@ -288,13 +270,81 @@ mod tests { } } - impl NetworkProvider for TestNetwork { + impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { - unimplemented!() + unimplemented!(); } fn set_authorized_only(&self, _reserved_only: bool) { - unimplemented!() + unimplemented!(); + } + + fn add_known_address(&self, _peer_id: PeerId, _addr: Multiaddr) { + unimplemented!(); + } + + fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + unimplemented!(); + } + + fn accept_unreserved_peers(&self) { + unimplemented!(); + } + + fn deny_unreserved_peers(&self) { + unimplemented!(); + } + + fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + unimplemented!(); + } + + fn remove_reserved_peer(&self, _peer_id: PeerId) { + unimplemented!(); + } + + fn set_reserved_peers( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn add_peers_to_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_peers_from_reserved_set( + &self, + _protocol: Cow<'static, str>, + _peers: Vec, + ) { + unimplemented!(); + } + + fn add_to_peers_set( + &self, + _protocol: Cow<'static, str>, + _peers: HashSet, + ) -> Result<(), String> { + unimplemented!(); + } + + fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + unimplemented!(); + } + + fn sync_num_connected(&self) -> usize { + unimplemented!(); } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 55c0006de33fb..f81143583eacf 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -38,7 +38,10 @@ use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; -use sc_network_common::sync::warp::WarpSyncProvider; +use sc_network_common::{ + service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + sync::warp::WarpSyncProvider, +}; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, @@ -319,6 +322,31 @@ where ) } +/// Shared network instance implementing a set of mandatory traits. +pub trait SpawnTaskNetwork: + sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static +{ +} + +impl SpawnTaskNetwork for T +where + Block: BlockT, + T: sc_offchain::NetworkProvider + + NetworkStateInfo + + NetworkTransaction + + NetworkStatusProvider + + Send + + Sync + + 'static, +{ +} + /// Parameters to pass into `build`. pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. @@ -337,7 +365,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub rpc_builder: Box Result, Error>>, /// A shared network instance. - pub network: Arc::Hash>>, + pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, /// Telemetry instance for this node. @@ -349,7 +377,7 @@ pub fn build_offchain_workers( config: &Configuration, spawn_handle: SpawnTaskHandle, client: Arc, - network: Arc::Hash>>, + network: Arc, ) -> Option>> where TBl: BlockT, @@ -516,13 +544,14 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( - transaction_pool: Arc, - network: Arc::Hash>>, +async fn transaction_notifications( + transaction_pool: Arc, + network: Network, telemetry: Option, ) where - TBl: BlockT, - TExPool: MaintainedTransactionPool::Hash>, + Block: BlockT, + ExPool: MaintainedTransactionPool::Hash>, + Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool @@ -542,13 +571,18 @@ async fn transaction_notifications( .await; } -fn init_telemetry>( +fn init_telemetry( config: &mut Configuration, - network: Arc::Hash>>, - client: Arc, + network: Network, + client: Arc, telemetry: &mut Telemetry, sysinfo: Option, -) -> sc_telemetry::Result { +) -> sc_telemetry::Result +where + Block: BlockT, + Client: BlockBackend, + Network: NetworkStateInfo, +{ let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { name: config.network.node_name.to_owned(), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 5291d219e8102..2f35a04451e79 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,9 +42,11 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; +use sc_network_common::service::NetworkBlock; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; +use sp_consensus::SyncOracle; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index ef3132f61ab99..13b249a7b9563 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -22,7 +22,8 @@ use crate::config::Configuration; use futures_timer::Delay; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::{config::Role, NetworkService, NetworkStatus}; +use sc_network::config::Role; +use sc_network_common::service::{NetworkStatus, NetworkStatusProvider}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sc_utils::metrics::register_globals; @@ -182,15 +183,16 @@ impl MetricsService { /// Returns a never-ending `Future` that performs the /// metric and telemetry updates with information from /// the given sources. - pub async fn run( + pub async fn run( mut self, client: Arc, transactions: Arc, - network: Arc::Hash>>, + network: TNet, ) where TBl: Block, TCl: ProvideRuntimeApi + UsageProvider, TExPool: MaintainedTransactionPool::Hash>, + TNet: NetworkStatusProvider, { let mut timer = Delay::new(Duration::from_secs(0)); let timer_interval = Duration::from_secs(5); diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index d003db57eb7ac..01c3ee2348ef5 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -27,6 +27,7 @@ sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../.. sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sc-network = { version = "0.10.0-dev", path = "../../network" } +sc-network-common = { version = "0.10.0-dev", path = "../../network/common" } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 2d63362daffba..9c720c6fedea0 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -26,6 +26,7 @@ use sc_network::{ config::{NetworkConfiguration, TransportConfig}, multiaddr, Multiaddr, }; +use sc_network_common::service::{NetworkBlock, NetworkPeers, NetworkStateInfo}; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, @@ -320,7 +321,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -340,7 +341,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((*service.network().local_peer_id()).into())); + addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -387,7 +388,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); @@ -422,7 +423,7 @@ where } network.run_until_all_full(move |_index, service| { - let connected = service.network().num_connected(); + let connected = service.network().sync_num_connected(); debug!("Got {}/{} full connections...", connected, expected_full_connections); connected == expected_full_connections }); diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 4539cec2c8e0a..043533cbf2258 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -236,10 +236,10 @@ pub trait Proposer { pub trait SyncOracle { /// Whether the synchronization service is undergoing major sync. /// Returns true if so. - fn is_major_syncing(&mut self) -> bool; + fn is_major_syncing(&self) -> bool; /// Whether the synchronization service is offline. /// Returns true if so. - fn is_offline(&mut self) -> bool; + fn is_offline(&self) -> bool; } /// A synchronization oracle for when there is no network. @@ -247,10 +247,10 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { + fn is_major_syncing(&self) -> bool { false } - fn is_offline(&mut self) -> bool { + fn is_offline(&self) -> bool { false } } @@ -258,14 +258,14 @@ impl SyncOracle for NoNetwork { impl SyncOracle for Arc where T: ?Sized, - for<'r> &'r T: SyncOracle, + T: SyncOracle, { - fn is_major_syncing(&mut self) -> bool { - <&T>::is_major_syncing(&mut &**self) + fn is_major_syncing(&self) -> bool { + T::is_major_syncing(self) } - fn is_offline(&mut self) -> bool { - <&T>::is_offline(&mut &**self) + fn is_offline(&self) -> bool { + T::is_offline(self) } } From ef433a8c31947c73e9bad212a9672d1f841a9c0d Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 10 Aug 2022 10:39:24 +0200 Subject: [PATCH 018/348] Add BoundedVec::sort_by_key (#11998) Signed-off-by: Oliver Tale-Yazdi --- primitives/runtime/src/bounded/bounded_vec.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/runtime/src/bounded/bounded_vec.rs index d5f3f2da0d615..aed1a156ad699 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/runtime/src/bounded/bounded_vec.rs @@ -319,6 +319,17 @@ impl BoundedVec { self.0.sort_by(compare) } + /// Exactly the same semantics as [`slice::sort_by_key`]. + /// + /// This is safe since sorting cannot change the number of elements in the vector. + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: sp_std::cmp::Ord, + { + self.0.sort_by_key(f) + } + /// Exactly the same semantics as [`slice::sort`]. /// /// This is safe since sorting cannot change the number of elements in the vector. @@ -1189,4 +1200,12 @@ pub mod test { b1.iter().map(|x| x + 1).rev().take(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn bounded_vec_sort_by_key_works() { + let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; + // Sort by absolute value. + v.sort_by_key(|k| k.abs()); + assert_eq!(v, vec![1, 2, -3, 4, -5]); + } } From edeba3f760fe54600ea38c8765a0d099e58d4799 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Wed, 10 Aug 2022 12:36:16 +0200 Subject: [PATCH 019/348] Transaction payment runtime api: query call info and fee details (#11819) * Transaction payment RPC calls: query call info * transaction payment pallet - runtime api - add query_call info and fee_details * remove unused deps * separate call runtime api * undo fmt for unchanged code * system config call bounded to GetDispatchInfo, drop Call generic for query call info/fee * impl GetDispatchInfo for Extrinsics within runtime test-utils * introduced runtime api methods accept encoded Call instead of Call type * replace Bytes by Vec, docs for for new api, drop len argument, drop GetDispatchInfo bound from system_Config::Call * clean up toml and extra impl for dropped bound * panic if Call can not be decoded * revert to 6d0ca79 * fmt and docs * rustfmt --- bin/node-template/runtime/src/lib.rs | 17 +++++ bin/node/runtime/src/lib.rs | 11 ++++ .../rpc/runtime-api/src/lib.rs | 12 ++++ frame/transaction-payment/src/lib.rs | 63 +++++++++++++++++++ 4 files changed, 103 insertions(+) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 88fc86db02ef9..b43fbde52dcdc 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -467,6 +467,23 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: Call, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: Call, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn benchmark_metadata(extra: bool) -> ( diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2ac1e6444f119..6bfbe3413058a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1965,6 +1965,17 @@ impl_runtime_apis! { } } + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + } + impl pallet_mmr::primitives::MmrApi< Block, mmr::Hash, diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 5a0c70138db24..6944593daa57a 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -31,4 +31,16 @@ sp_api::decl_runtime_apis! { fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } + + pub trait TransactionPaymentCallApi + where + Balance: Codec + MaybeDisplay, + Call: Codec, + { + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo; + + /// Query fee details of a given encoded `Call`. + fn query_call_fee_details(call: Call, len: u32) -> FeeDetails; + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index fe37acb214452..ff65c0d2735fd 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -445,6 +445,32 @@ where } } + /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. + pub fn query_call_info(call: T::Call, len: u32) -> RuntimeDispatchInfo> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let DispatchInfo { weight, class, .. } = dispatch_info; + + RuntimeDispatchInfo { + weight, + class, + partial_fee: Self::compute_fee(len, &dispatch_info, 0u32.into()), + } + } + + /// Query fee details of a given encoded `Call`. + pub fn query_call_fee_details(call: T::Call, len: u32) -> FeeDetails> + where + T::Call: Dispatchable + GetDispatchInfo, + { + let dispatch_info = ::get_dispatch_info(&call); + let tip = 0u32.into(); + + Self::compute_fee_details(len, &dispatch_info, tip) + } + /// Compute the final fee value for a particular transaction. pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf where @@ -1206,6 +1232,43 @@ mod tests { }); } + #[test] + fn query_call_info_and_fee_details_works() { + let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let info = call.get_dispatch_info(); + let encoded_call = call.encode(); + let len = encoded_call.len() as u32; + + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + + len as u64 /* len * 1 */ + + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ + }, + ); + + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * + 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); + } + #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() From 1fd71c7845d6c28c532795ec79106d959dd1fe30 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 10 Aug 2022 14:58:52 +0100 Subject: [PATCH 020/348] Use `#[pallet::unbounded]` tag in FRAME System (#11946) * use unbounded in system * update ui tests --- frame/sudo/src/mock.rs | 12 ++++++------ frame/support/src/weights.rs | 4 ++-- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/lib.rs | 15 ++++++++++----- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 2e2a4abafcd98..71f0f26b1a1d5 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -44,7 +44,6 @@ pub mod logger { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(PhantomData); #[pallet::call] @@ -57,7 +56,7 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is `Root`. ensure_root(origin)?; - >::append(i); + >::try_append(i).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32 { value: i, weight }); Ok(().into()) } @@ -70,8 +69,8 @@ pub mod logger { ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; - >::append(i); - >::append(sender.clone()); + >::try_append(i).map_err(|_| "could not append")?; + >::try_append(sender.clone()).map_err(|_| "could not append")?; Self::deposit_event(Event::AppendI32AndAccount { sender, value: i, weight }); Ok(().into()) } @@ -86,11 +85,12 @@ pub mod logger { #[pallet::storage] #[pallet::getter(fn account_log)] - pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; + pub(super) type AccountLog = + StorageValue<_, BoundedVec>, ValueQuery>; #[pallet::storage] #[pallet::getter(fn i32_log)] - pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; + pub(super) type I32Log = StorageValue<_, BoundedVec>, ValueQuery>; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index c37a72536bddf..fb34484416063 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -128,7 +128,7 @@ use crate::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, traits::Get, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -676,7 +676,7 @@ where } /// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct PerDispatchClass { /// Value for `Normal` extrinsics. normal: T, diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index c2ec8cf7f4d05..6c49e1220a3a5 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 72 others + and 75 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index dbbc426de2906..9a0731683a953 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 72 others + and 75 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 94605c2da59bd..12d415e4e2b42 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -233,7 +233,8 @@ pub mod pallet { + Default + MaybeDisplay + AtLeast32Bit - + Copy; + + Copy + + MaxEncodedLen; /// The block number type used by the runtime. type BlockNumber: Parameter @@ -320,7 +321,7 @@ pub mod pallet { /// Data to be associated with an account (other than nonce/transaction counter, which this /// pallet does regardless). - type AccountData: Member + FullCodec + Clone + Default + TypeInfo; + type AccountData: Member + FullCodec + Clone + Default + TypeInfo + MaxEncodedLen; /// Handler for when a new account has just been created. type OnNewAccount: OnNewAccount; @@ -355,7 +356,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::hooks] @@ -578,6 +578,7 @@ pub mod pallet { /// Extrinsics data for the current block (maps an extrinsic's index to its data). #[pallet::storage] #[pallet::getter(fn extrinsic_data)] + #[pallet::unbounded] pub(super) type ExtrinsicData = StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; @@ -593,6 +594,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn digest)] pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; @@ -604,6 +606,7 @@ pub mod pallet { /// Events have a large in-memory size. Box the events to not go out-of-memory /// just in case someone still reads them from within the runtime. #[pallet::storage] + #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; @@ -623,12 +626,14 @@ pub mod pallet { /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn event_topics)] pub(super) type EventTopics = StorageMap<_, Blake2_128Concat, T::Hash, Vec<(T::BlockNumber, EventIndex)>, ValueQuery>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. #[pallet::storage] + #[pallet::unbounded] pub type LastRuntimeUpgrade = StorageValue<_, LastRuntimeUpgradeInfo>; /// True if we have upgraded so that `type RefCount` is `u32`. False (default) if not. @@ -690,7 +695,7 @@ pub type Key = Vec; pub type KeyValue = (Vec, Vec); /// A phase of a block's execution. -#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub enum Phase { /// Applying an extrinsic. @@ -738,7 +743,7 @@ type EventIndex = u32; pub type RefCount = u32; /// Information of an account. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub struct AccountInfo { /// The number of transactions this account has sent. pub nonce: Index, From a1a9b475c354d873f91135ed5fa028ac9ef6a2c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 10 Aug 2022 22:27:01 +0200 Subject: [PATCH 021/348] transactional: Wrap `pallet::calls` directly in storage layers (#11927) * transactional: Wrap `pallet::calls` directly in storage layers Before this pr we only wrapped `pallet::calls` into storage layers when executing the calls with `dispatch`. This pr is solving that by wrapping each call function inside a storage layer. * Teach `BasicExternalities` transactions support * Fix crates * FMT * Fix benchmarking tests * Use correct span * Support old decl macros * Fix test * Apply suggestions from code review Co-authored-by: Oliver Tale-Yazdi * Update frame/state-trie-migration/src/lib.rs * Update frame/state-trie-migration/src/lib.rs * Update frame/state-trie-migration/src/lib.rs * Feedback * Apply suggestions from code review Co-authored-by: cheme Co-authored-by: Oliver Tale-Yazdi Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 128 ++++++------ .../procedural/src/pallet/expand/call.rs | 26 ++- .../procedural/src/pallet/parse/call.rs | 20 +- frame/support/src/dispatch.rs | 14 +- frame/support/test/tests/construct_runtime.rs | 178 ++++++++-------- frame/support/test/tests/storage_layers.rs | 2 + frame/transaction-storage/src/lib.rs | 4 +- primitives/state-machine/src/basic.rs | 192 +++++++----------- .../src/overlayed_changes/changeset.rs | 31 ++- .../src/overlayed_changes/mod.rs | 15 ++ 10 files changed, 316 insertions(+), 294 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 94f6c1f223b9c..353bc93e5ab94 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -235,7 +235,10 @@ pub mod pallet { /// reading a key, we simply cannot know how many bytes it is. In other words, this should /// not be used in any environment where resources are strictly bounded (e.g. a parachain), /// but it is acceptable otherwise (relay chain, offchain workers). - pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) -> DispatchResult { + pub fn migrate_until_exhaustion( + &mut self, + limits: MigrationLimits, + ) -> Result<(), Error> { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); if limits.item.is_zero() || limits.size.is_zero() { @@ -262,7 +265,7 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// /// This function is *the* core of this entire pallet. - fn migrate_tick(&mut self) -> DispatchResult { + fn migrate_tick(&mut self) -> Result<(), Error> { match (&self.progress_top, &self.progress_child) { (Progress::ToStart, _) => self.migrate_top(), (Progress::LastKey(_), Progress::LastKey(_)) => { @@ -301,7 +304,7 @@ pub mod pallet { /// Migrate the current child key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_child(&mut self) -> DispatchResult { + fn migrate_child(&mut self) -> Result<(), Error> { use sp_io::default_child_storage as child_io; let (maybe_current_child, child_root) = match (&self.progress_child, &self.progress_top) { @@ -350,7 +353,7 @@ pub mod pallet { /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. - fn migrate_top(&mut self) -> DispatchResult { + fn migrate_top(&mut self) -> Result<(), Error> { let maybe_current_top = match &self.progress_top { Progress::LastKey(last_top) => { let maybe_top: Option> = @@ -431,7 +434,7 @@ pub mod pallet { /// The auto migration task finished. AutoMigrationFinished, /// Migration got halted due to an error or miss-configuration. - Halted, + Halted { error: Error }, } /// The outer Pallet struct. @@ -516,8 +519,9 @@ pub mod pallet { pub type SignedMigrationMaxLimits = StorageValue<_, MigrationLimits, OptionQuery>; #[pallet::error] + #[derive(Clone, PartialEq)] pub enum Error { - /// max signed limits not respected. + /// Max signed limits not respected. MaxSignedLimits, /// A key was longer than the configured maximum. /// @@ -529,12 +533,12 @@ pub mod pallet { KeyTooLong, /// submitter does not have enough funds. NotEnoughFunds, - /// bad witness data provided. + /// Bad witness data provided. BadWitness, - /// upper bound of size is exceeded, - SizeUpperBoundExceeded, /// Signed migration is not allowed because the maximum limit is not set yet. SignedMigrationNotAllowed, + /// Bad child root provided. + BadChildRoot, } #[pallet::call] @@ -617,7 +621,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Err(Error::::SizeUpperBoundExceeded.into()) + return Ok(().into()) } Self::deposit_event(Event::::Migrated { @@ -634,13 +638,10 @@ pub mod pallet { MigrationProcess::::put(task); let post_info = PostDispatchInfo { actual_weight, pays_fee: Pays::No }; - match migration { - Ok(_) => Ok(post_info), - Err(error) => { - Self::halt(&error); - Err(DispatchErrorWithPostInfo { post_info, error }) - }, + if let Err(error) = migration { + Self::halt(error); } + Ok(post_info) } /// Migrate the list of top keys by iterating each of them one by one. @@ -679,7 +680,7 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - Err("wrong witness data".into()) + Ok(().into()) } else { Self::deposit_event(Event::::Migrated { top: keys.len() as u32, @@ -741,12 +742,9 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); Self::deposit_event(Event::::Slashed { who, amount: deposit }); - Err(DispatchErrorWithPostInfo { - error: "bad witness".into(), - post_info: PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), - pays_fee: Pays::Yes, - }, + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), + pays_fee: Pays::Yes, }) } else { Self::deposit_event(Event::::Migrated { @@ -806,7 +804,7 @@ pub mod pallet { if let Some(limits) = Self::auto_limits() { let mut task = Self::migration_process(); if let Err(e) = task.migrate_until_exhaustion(limits) { - Self::halt(&e); + Self::halt(e); } let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size); @@ -849,10 +847,10 @@ pub mod pallet { } /// Put a stop to all ongoing migrations and logs an error. - fn halt(msg: &E) { - log!(error, "migration halted due to: {:?}", msg); + fn halt(error: Error) { + log!(error, "migration halted due to: {:?}", error); AutoLimits::::kill(); - Self::deposit_event(Event::::Halted); + Self::deposit_event(Event::::Halted { error }); } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. @@ -871,7 +869,7 @@ pub mod pallet { fn transform_child_key_or_halt(root: &Vec) -> &[u8] { let key = Self::transform_child_key(root); if key.is_none() { - Self::halt("bad child root key"); + Self::halt(Error::::BadChildRoot); } key.unwrap_or_default() } @@ -961,8 +959,16 @@ mod benchmarks { frame_system::RawOrigin::Signed(caller.clone()).into(), vec![b"foo".to_vec()], 1, - ).is_err() - ) + ).is_ok() + ); + + frame_system::Pallet::::assert_last_event( + ::Event::from(crate::Event::Slashed { + who: caller.clone(), + amount: T::SignedDepositBase::get() + .saturating_add(T::SignedDepositPerItem::get().saturating_mul(1u32.into())), + }).into(), + ); } verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); @@ -1005,7 +1011,7 @@ mod benchmarks { StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, - ).is_err() + ).is_ok() ) } verify { @@ -1285,18 +1291,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + ),); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1322,18 +1326,16 @@ mod test { SignedMigrationMaxLimits::::put(MigrationLimits { size: 1 << 20, item: 50 }); // fails if the top key is too long. - frame_support::assert_err_with_weight!( - StateTrieMigration::continue_migrate( - Origin::signed(1), - MigrationLimits { item: 50, size: 1 << 20 }, - Bounded::max_value(), - MigrationProcess::::get() - ), - Error::::KeyTooLong, - Some(2000000), - ); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 50, size: 1 << 20 }, + Bounded::max_value(), + MigrationProcess::::get() + )); // The auto migration halted. - System::assert_last_event(crate::Event::Halted {}.into()); + System::assert_last_event( + crate::Event::Halted { error: Error::::KeyTooLong }.into(), + ); // Limits are killed. assert!(AutoLimits::::get().is_none()); @@ -1484,7 +1486,7 @@ mod test { ..Default::default() } ), - Error::::BadWitness + Error::::BadWitness, ); // migrate all keys in a series of submissions @@ -1547,14 +1549,11 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - frame_support::assert_err!( - StateTrieMigration::migrate_custom_top( - Origin::signed(1), - vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - correct_witness - 1, - ), - "wrong witness data" - ); + frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + correct_witness - 1, + ),); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); @@ -1584,13 +1583,12 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. - assert!(StateTrieMigration::migrate_custom_child( + frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( Origin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness - ) - .is_err()); + )); // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index fe7589a8275d2..a9468451ad1d4 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -140,6 +140,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" }; + // Wrap all calls inside of storage layers + if let Some(syn::Item::Impl(item_impl)) = def + .call + .as_ref() + .map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index]) + { + item_impl.items.iter_mut().for_each(|i| { + if let syn::ImplItem::Method(method) = i { + let block = &method.block; + method.block = syn::parse_quote! {{ + // We execute all dispatchable in a new storage layer, allowing them + // to return an error at any point, and undoing any storage changes. + #frame_support::storage::with_storage_layer(|| #block) + }}; + } + }); + } + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -267,12 +285,8 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); - // We execute all dispatchable in a new storage layer, allowing them - // to return an error at any point, and undoing any storage changes. - #frame_support::storage::with_storage_layer(|| { - <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) - .map(Into::into).map_err(Into::into) - }) + <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) + .map(Into::into).map_err(Into::into) }, )* Self::__Ignore(_, _) => { diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index d8a81d699b8c2..336e08c3d39b7 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -48,8 +48,8 @@ pub struct CallDef { pub docs: Vec, } -#[derive(Clone)] /// Definition of dispatchable typically: `#[weight...] fn foo(origin .., param1: ...) -> ..` +#[derive(Clone)] pub struct CallVariantDef { /// Function name. pub name: syn::Ident, @@ -143,18 +143,18 @@ impl CallDef { index: usize, item: &mut syn::Item, ) -> syn::Result { - let item = if let syn::Item::Impl(item) = item { + let item_impl = if let syn::Item::Impl(item) = item { item } else { return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let instances = vec![ - helper::check_impl_gen(&item.generics, item.impl_token.span())?, - helper::check_pallet_struct_usage(&item.self_ty)?, + helper::check_impl_gen(&item_impl.generics, item_impl.impl_token.span())?, + helper::check_pallet_struct_usage(&item_impl.self_ty)?, ]; - if let Some((_, _, for_)) = item.trait_ { + if let Some((_, _, for_)) = item_impl.trait_ { let msg = "Invalid pallet::call, expected no trait ident as in \ `impl<..> Pallet<..> { .. }`"; return Err(syn::Error::new(for_.span(), msg)) @@ -163,8 +163,8 @@ impl CallDef { let mut methods = vec![]; let mut indices = HashMap::new(); let mut last_index: Option = None; - for impl_item in &mut item.items { - if let syn::ImplItem::Method(method) = impl_item { + for item in &mut item_impl.items { + if let syn::ImplItem::Method(method) = item { if !matches!(method.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::call, dispatchable function must be public: \ `pub fn`"; @@ -290,7 +290,7 @@ impl CallDef { }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)) + return Err(syn::Error::new(item.span(), msg)) } } @@ -299,8 +299,8 @@ impl CallDef { attr_span, instances, methods, - where_clause: item.generics.where_clause.clone(), - docs: get_doc_literals(&item.attrs), + where_clause: item_impl.generics.where_clause.clone(), + docs: get_doc_literals(&item_impl.attrs), }) } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 2b05478e0b02a..fb0b658cd9303 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1787,9 +1787,11 @@ macro_rules! decl_module { $vis fn $name( $origin: $origin_ty $(, $param: $param_ty )* ) -> $crate::dispatch::DispatchResult { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - { $( $impl )* } - Ok(()) + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + { $( $impl )* } + Ok(()) + }) } }; @@ -1805,8 +1807,10 @@ macro_rules! decl_module { ) => { $(#[$fn_attr])* $vis fn $name($origin: $origin_ty $(, $param: $param_ty )* ) -> $result { - $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); - $( $impl )* + $crate::storage::with_storage_layer(|| { + $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!(stringify!($name))); + $( $impl )* + }) } }; diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 63747a9d560dc..d388127f29abd 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -282,94 +282,96 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 31, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 32, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 33, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - NestedModule3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 34, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_3::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 6, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_4::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 3, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_5::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 4, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_6::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 1, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_7::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 2, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_8::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 12, - error: [0; 4], - message: Some("Something") - })), - ); - assert_eq!( - Module1_9::fail(system::Origin::::Root.into()), - Err(DispatchError::Module(ModuleError { - index: 13, - error: [0; 4], - message: Some("Something") - })), - ); + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!( + Module1_1::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 31, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 32, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 33, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + NestedModule3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 34, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_3::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 6, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_4::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 3, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_5::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 4, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_6::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 1, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_7::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 2, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_8::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 12, + error: [0; 4], + message: Some("Something") + })), + ); + assert_eq!( + Module1_9::fail(system::Origin::::Root.into()), + Err(DispatchError::Module(ModuleError { + index: 13, + error: [0; 4], + message: Some("Something") + })), + ); + }); } #[test] diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 05ed60fe90196..7404ccace2c09 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -276,5 +276,7 @@ fn storage_layer_in_decl_pallet_call() { let call2 = Call::DeclPallet(decl_pallet::Call::set_value { value: 1 }); assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); + // Calling the function directly also works with storage layers. + assert_noop!(decl_pallet::Module::::set_value(Origin::signed(1), 1), "Revert!"); }); } diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index f16b8f029662b..681cd29af8222 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -245,9 +245,11 @@ pub mod pallet { let sender = ensure_signed(origin)?; let transactions = >::get(block).ok_or(Error::::RenewedNotFound)?; let info = transactions.get(index as usize).ok_or(Error::::RenewedNotFound)?; + let extrinsic_index = + >::extrinsic_index().ok_or(Error::::BadContext)?; + Self::apply_fee(sender, info.size)?; - let extrinsic_index = >::extrinsic_index().unwrap(); sp_io::transaction_index::renew(extrinsic_index, info.content_hash.into()); let mut index = 0; diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 6efc847bfbdb7..236a515a2412d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -17,14 +17,13 @@ //! Basic implementation for Externalities. -use crate::{Backend, StorageKey, StorageValue}; +use crate::{Backend, OverlayedChanges, StorageKey, StorageValue}; use codec::Encode; use hash_db::Hasher; use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, StorageChild, - TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, StateVersion, Storage, TrackedStorageKey, }, traits::Externalities, Blake2Hasher, @@ -35,20 +34,19 @@ use std::{ any::{Any, TypeId}, collections::BTreeMap, iter::FromIterator, - ops::Bound, }; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - inner: Storage, + overlay: OverlayedChanges, extensions: Extensions, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } + BasicExternalities { overlay: inner.into(), extensions: Default::default() } } /// New basic externalities with empty storage. @@ -57,13 +55,34 @@ impl BasicExternalities { } /// Insert key/value - pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { - self.inner.top.insert(k, v) + pub fn insert(&mut self, k: StorageKey, v: StorageValue) { + self.overlay.set_storage(k, Some(v)); } /// Consume self and returns inner storages pub fn into_storages(self) -> Storage { - self.inner + Storage { + top: self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + children_default: self + .overlay + .children() + .map(|(iter, i)| { + ( + i.storage_key().to_vec(), + sp_core::storage::StorageChild { + data: iter + .filter_map(|(k, v)| v.value().map(|v| (k.to_vec(), v.to_vec()))) + .collect(), + child_info: i.clone(), + }, + ) + }) + .collect(), + } } /// Execute the given closure `f` with the externalities set and initialized with `storage`. @@ -73,13 +92,7 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let mut ext = Self { - inner: Storage { - top: std::mem::take(&mut storage.top), - children_default: std::mem::take(&mut storage.children_default), - }, - extensions: Default::default(), - }; + let mut ext = Self::new(std::mem::take(storage)); let r = ext.execute_with(f); @@ -108,15 +121,26 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) && - self.inner.children_default.eq(&other.inner.children_default) + self.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() == + other.overlay.changes().map(|(k, v)| (k, v.value())).collect::>() && + self.overlay + .children() + .map(|(iter, i)| (i, iter.map(|(k, v)| (k, v.value())).collect::>())) + .collect::>() == + other + .overlay + .children() + .map(|(iter, i)| { + (i, iter.map(|(k, v)| (k, v.value())).collect::>()) + }) + .collect::>() } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { fn from_iter>(iter: I) -> Self { let mut t = Self::default(); - t.inner.top.extend(iter); + iter.into_iter().for_each(|(k, v)| t.insert(k, v)); t } } @@ -128,11 +152,8 @@ impl Default for BasicExternalities { } impl From> for BasicExternalities { - fn from(hashmap: BTreeMap) -> Self { - BasicExternalities { - inner: Storage { top: hashmap, children_default: Default::default() }, - extensions: Default::default(), - } + fn from(map: BTreeMap) -> Self { + Self::from_iter(map.into_iter()) } } @@ -140,7 +161,7 @@ impl Externalities for BasicExternalities { fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) {} fn storage(&self, key: &[u8]) -> Option { - self.inner.top.get(key).cloned() + self.overlay.storage(key).and_then(|v| v.map(|v| v.to_vec())) } fn storage_hash(&self, key: &[u8]) -> Option> { @@ -148,11 +169,7 @@ impl Externalities for BasicExternalities { } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.get(key)) - .cloned() + self.overlay.child_storage(child_info, key).and_then(|v| v.map(|v| v.to_vec())) } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { @@ -160,16 +177,13 @@ impl Externalities for BasicExternalities { } fn next_storage_key(&self, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() + self.overlay.iter_after(key).find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner - .children_default - .get(child_info.storage_key()) - .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) + self.overlay + .child_iter_after(child_info.storage_key(), key) + .find_map(|(k, v)| v.value().map(|_| k.to_vec())) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { @@ -178,14 +192,7 @@ impl Externalities for BasicExternalities { return } - match maybe_value { - Some(value) => { - self.inner.top.insert(key, value); - }, - None => { - self.inner.top.remove(&key); - }, - } + self.overlay.set_storage(key, maybe_value) } fn place_child_storage( @@ -194,19 +201,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self - .inner - .children_default - .entry(child_info.storage_key().to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }); - if let Some(value) = value { - child_map.data.insert(key, value); - } else { - child_map.data.remove(&key); - } + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( @@ -215,12 +210,7 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - let count = self - .inner - .children_default - .remove(child_info.storage_key()) - .map(|c| c.data.len()) - .unwrap_or(0) as u32; + let count = self.overlay.clear_child_storage(child_info); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -239,19 +229,7 @@ impl Externalities for BasicExternalities { return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 } } - let to_remove = self - .inner - .top - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - self.inner.top.remove(&key); - } + let count = self.overlay.clear_prefix(prefix); MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } @@ -262,56 +240,37 @@ impl Externalities for BasicExternalities { _maybe_limit: Option, _maybe_cursor: Option<&[u8]>, ) -> MultiRemovalResults { - if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { - let to_remove = child - .data - .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - let count = to_remove.len() as u32; - for key in to_remove { - child.data.remove(&key); - } - MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } - } else { - MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 } - } + let count = self.overlay.clear_child_prefix(child_info, prefix); + MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count } } fn storage_append(&mut self, key: Vec, value: Vec) { - let current = self.inner.top.entry(key).or_default(); - crate::ext::StorageAppend::new(current).append(value); + let current_value = self.overlay.value_mut_or_insert_with(&key, || Default::default()); + crate::ext::StorageAppend::new(current_value).append(value); } fn storage_root(&mut self, state_version: StateVersion) -> Vec { - let mut top = self.inner.top.clone(); - let prefixed_keys: Vec<_> = self - .inner - .children_default - .iter() - .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) - .collect(); + let mut top = self + .overlay + .changes() + .filter_map(|(k, v)| v.value().map(|v| (k.clone(), v.clone()))) + .collect::>(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for (prefixed_storage_key, child_info) in prefixed_keys { + for child_info in self.overlay.children().map(|d| d.1.clone()).collect::>() { let child_root = self.child_storage_root(&child_info, state_version); if empty_hash[..] == child_root[..] { - top.remove(prefixed_storage_key.as_slice()); + top.remove(child_info.prefixed_storage_key().as_slice()); } else { - top.insert(prefixed_storage_key.into_inner(), child_root); + top.insert(child_info.prefixed_storage_key().into_inner(), child_root); } } match state_version { - StateVersion::V0 => - LayoutV0::::trie_root(self.inner.top.clone()).as_ref().into(), - StateVersion::V1 => - LayoutV1::::trie_root(self.inner.top.clone()).as_ref().into(), + StateVersion::V0 => LayoutV0::::trie_root(top).as_ref().into(), + StateVersion::V1 => LayoutV1::::trie_root(top).as_ref().into(), } } @@ -320,10 +279,11 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, state_version: StateVersion, ) -> Vec { - if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { - let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); + if let Some((data, child_info)) = self.overlay.child_changes(child_info.storage_key()) { + let delta = + data.into_iter().map(|(k, v)| (k.as_ref(), v.value().map(|v| v.as_slice()))); crate::in_memory_backend::new_in_mem::>() - .child_storage_root(&child.child_info, delta, state_version) + .child_storage_root(&child_info, delta, state_version) .0 } else { empty_child_trie_root::>() @@ -332,15 +292,15 @@ impl Externalities for BasicExternalities { } fn storage_start_transaction(&mut self) { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.start_transaction() } fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.rollback_transaction().map_err(drop) } fn storage_commit_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by BasicExternalities"); + self.overlay.commit_transaction().map_err(drop) } fn wipe(&mut self) {} diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index ae5d47f6bb943..e5dad7157c731 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -57,7 +57,7 @@ pub struct NotInRuntime; /// Describes in which mode the node is currently executing. #[derive(Debug, Clone, Copy)] pub enum ExecutionMode { - /// Exeuting in client mode: Removal of all transactions possible. + /// Executing in client mode: Removal of all transactions possible. Client, /// Executing in runtime mode: Transactions started by the client are protected. Runtime, @@ -95,7 +95,7 @@ pub type OverlayedChangeSet = OverlayedMap>; /// Holds a set of changes with the ability modify them using nested transactions. #[derive(Debug, Clone)] -pub struct OverlayedMap { +pub struct OverlayedMap { /// Stores the changes that this overlay constitutes. changes: BTreeMap>, /// Stores which keys are dirty per transaction. Needed in order to determine which @@ -110,7 +110,7 @@ pub struct OverlayedMap { execution_mode: ExecutionMode, } -impl Default for OverlayedMap { +impl Default for OverlayedMap { fn default() -> Self { Self { changes: BTreeMap::new(), @@ -121,6 +121,31 @@ impl Default for OverlayedMap { } } +#[cfg(feature = "std")] +impl From for OverlayedMap> { + fn from(storage: sp_core::storage::StorageMap) -> Self { + Self { + changes: storage + .into_iter() + .map(|(k, v)| { + ( + k, + OverlayedEntry { + transactions: SmallVec::from_iter([InnerValue { + value: Some(v), + extrinsics: Default::default(), + }]), + }, + ) + }) + .collect(), + dirty_keys: Default::default(), + num_client_transactions: 0, + execution_mode: ExecutionMode::Client, + } + } +} + impl Default for ExecutionMode { fn default() -> Self { Self::Client diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 746599a6768e6..001b4b656c34e 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -638,6 +638,21 @@ impl OverlayedChanges { } } +#[cfg(feature = "std")] +impl From for OverlayedChanges { + fn from(storage: sp_core::storage::Storage) -> Self { + Self { + top: storage.top.into(), + children: storage + .children_default + .into_iter() + .map(|(k, v)| (k, (v.data.into(), v.child_info))) + .collect(), + ..Default::default() + } + } +} + #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) where From 389cc11d8d836a28cd4462bf8f63a36ff794a976 Mon Sep 17 00:00:00 2001 From: yjh Date: Thu, 11 Aug 2022 17:59:38 +0800 Subject: [PATCH 022/348] chore: improve runtime docs and remove unused error (#12000) --- Cargo.lock | 1 - client/executor/common/Cargo.toml | 1 - client/executor/common/src/error.rs | 4 ---- client/executor/src/native_executor.rs | 11 +++++++---- primitives/runtime-interface/src/lib.rs | 4 ++-- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38063e8060835..a5cd455e6029b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8188,7 +8188,6 @@ dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", - "sp-serializer", "sp-wasm-interface", "thiserror", "wasm-instrument", diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 9ffdfb788474d..b16ed7e8552ce 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -23,7 +23,6 @@ wasmi = "0.9.1" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } -sp-serializer = { version = "4.0.0-dev", path = "../../../primitives/serializer" } sp-wasm-interface = { version = "6.0.0", path = "../../../primitives/wasm-interface" } [features] diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 09e070bb3bae5..376ac190bd7b7 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -18,7 +18,6 @@ //! Rust executor possible errors. -use sp_serializer; use wasmi; /// Result type alias. @@ -28,9 +27,6 @@ pub type Result = std::result::Result; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum Error { - #[error("Unserializable data encountered")] - InvalidData(#[from] sp_serializer::Error), - #[error(transparent)] Wasmi(#[from] wasmi::Error), diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index d805949d26fd9..4d8f559d700b5 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -130,14 +130,13 @@ where /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. /// - /// `host_functions` - The set of host functions to be available for import provided by this - /// executor. - /// /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. /// /// `cache_path` - A path to a directory where the executor can place its files for purposes of /// caching. This may be important in cases when there are many different modules with the /// compiled execution method is used. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( method: WasmExecutionMethod, default_heap_pages: Option, @@ -209,7 +208,7 @@ where /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// - /// In case of problems with during creation of the runtime or instantation, a `Err` is + /// In case of problems with during creation of the runtime or instantiation, a `Err` is /// returned. that describes the message. #[doc(hidden)] // We use this function for tests across multiple crates. pub fn uncached_call( @@ -405,6 +404,10 @@ impl NativeElseWasmExecutor { /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + /// + /// `max_runtime_instances` - The number of runtime instances to keep in memory ready for reuse. + /// + /// `runtime_cache_size` - The capacity of runtime cache. pub fn new( fallback_method: WasmExecutionMethod, default_heap_pages: Option, diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index f9bf8825f9486..6ebcb7482a779 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -308,10 +308,10 @@ pub use sp_std; /// /// 1. The generated functions are not callable from the native side. /// 2. The trait as shown above is not implemented for [`Externalities`] and is instead -/// implemented for `FunctionExecutor` (from `sp-wasm-interface`). +/// implemented for `FunctionContext` (from `sp-wasm-interface`). /// /// # Disable tracing -/// By addding `no_tracing` to the list of options you can prevent the wasm-side interface from +/// By adding `no_tracing` to the list of options you can prevent the wasm-side interface from /// generating the default `sp-tracing`-calls. Note that this is rarely needed but only meant /// for the case when that would create a circular dependency. You usually _do not_ want to add /// this flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) From 6cd7ac7bd1e9da914adb7c8066d759f0a2d3849f Mon Sep 17 00:00:00 2001 From: lucasvanmol Date: Thu, 11 Aug 2022 06:13:49 -0700 Subject: [PATCH 023/348] Fix broken links in `polkadot --help` (#12010) https://docs.substrate.io/v3/runtime/custom-rpcs/#public-rpcs > https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs --- client/cli/src/commands/run_cmd.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 3a74fdd9700f2..fd236aa0211dd 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -54,7 +54,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[clap(long)] pub rpc_external: bool, @@ -85,7 +85,7 @@ pub struct RunCmd { /// /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC /// proxy server to filter out dangerous methods. More details: - /// . + /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[clap(long)] pub ws_external: bool, From e3512480d5e6ff527b1f82c2b47618aaa824c9bb Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 11 Aug 2022 15:39:56 +0100 Subject: [PATCH 024/348] add decode with depth limit to opaque types (#11947) Co-authored-by: parity-processbot <> --- Cargo.lock | 1 + frame/support/Cargo.toml | 2 ++ frame/support/src/traits/misc.rs | 33 +++++++++++++++++++++++++++++--- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a5cd455e6029b..2db192dcbf1a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2250,6 +2250,7 @@ dependencies = [ "serde", "serde_json", "smallvec", + "sp-api", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index ca26d3a5e32f2..a69133961e970 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -17,6 +17,7 @@ serde = { version = "1.0.136", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-metadata = { version = "15.0.0", default-features = false, features = ["v14"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -49,6 +50,7 @@ default = ["std"] std = [ "once_cell", "serde", + "sp-api/std", "sp-io/std", "codec/std", "scale-info/std", diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index ddb7f6f41b378..fa59fa92f920d 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -18,7 +18,7 @@ //! Smaller traits used in FRAME which don't need their own file. use crate::dispatch::Parameter; -use codec::{CompactLen, Decode, DecodeAll, Encode, EncodeLike, Input, MaxEncodedLen}; +use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; #[doc(hidden)] @@ -745,7 +745,10 @@ impl Encode for WrapperOpaque { impl Decode for WrapperOpaque { fn decode(input: &mut I) -> Result { - Ok(Self(T::decode_all(&mut &>::decode(input)?[..])?)) + Ok(Self(T::decode_all_with_depth_limit( + sp_api::MAX_EXTRINSIC_DEPTH, + &mut &>::decode(input)?[..], + )?)) } fn skip(input: &mut I) -> Result<(), codec::Error> { @@ -807,7 +810,7 @@ impl WrapperKeepOpaque { /// /// Returns `None` if the decoding failed. pub fn try_decode(&self) -> Option { - T::decode_all(&mut &self.data[..]).ok() + T::decode_all_with_depth_limit(sp_api::MAX_EXTRINSIC_DEPTH, &mut &self.data[..]).ok() } /// Returns the length of the encoded `T`. @@ -939,6 +942,30 @@ impl PreimageRecipient for () { #[cfg(test)] mod test { use super::*; + use sp_std::marker::PhantomData; + + #[derive(Encode, Decode)] + enum NestedType { + Nested(Box), + Done, + } + + #[test] + fn test_opaque_wrapper_decode_limit() { + let limit = sp_api::MAX_EXTRINSIC_DEPTH as usize; + let mut ok_bytes = vec![0u8; limit]; + ok_bytes.push(1u8); + let mut err_bytes = vec![0u8; limit + 1]; + err_bytes.push(1u8); + assert!(>::decode(&mut &ok_bytes.encode()[..]).is_ok()); + assert!(>::decode(&mut &err_bytes.encode()[..]).is_err()); + + let ok_keep_opaque = WrapperKeepOpaque { data: ok_bytes, _phantom: PhantomData }; + let err_keep_opaque = WrapperKeepOpaque { data: err_bytes, _phantom: PhantomData }; + + assert!(>::try_decode(&ok_keep_opaque).is_some()); + assert!(>::try_decode(&err_keep_opaque).is_none()); + } #[test] fn test_opaque_wrapper() { From dc45cc29167ee6358b527f3a0bcc0617dc9e4c99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 11 Aug 2022 16:53:40 +0200 Subject: [PATCH 025/348] Use correct length for Pallet and Storage prefix when calculating `KeyLenOf` (#11994) * Use correct length for Pallet and Storage prefix when calculating `KeyLenOf` * Add tests Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- frame/support/src/storage/types/double_map.rs | 19 ++++++++++--- frame/support/src/storage/types/map.rs | 27 +++++++++++++++++-- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index 2e090d30119aa..9ba4cf052e82a 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -91,10 +92,10 @@ impl Key2: MaxEncodedLen, { fn get() -> u32 { - let z = Hasher1::max_len::() + - Hasher2::max_len::() + - Prefix::pallet_prefix().len() + - Prefix::STORAGE_PREFIX.len(); + // The `max_len` of both key hashes plus the pallet prefix and storage prefix (which both + // are hashed with `Twox128`). + let z = + Hasher1::max_len::() + Hasher2::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -755,6 +756,16 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat and Twox64Concat. + type A = StorageDoubleMap; + let size = 16 * 2 // Two Twox128 + + 16 + 8 // Blake2_128Concat = hash + key + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index f4ac83c22663b..0f89e2378a55d 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -25,6 +25,7 @@ use crate::{ KeyLenOf, StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, traits::{Get, GetDefault, StorageInfo, StorageInstance}, + StorageHasher, Twox128, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::SaturatedConversion; @@ -61,8 +62,9 @@ where Key: FullCodec + MaxEncodedLen, { fn get() -> u32 { - let z = - Hasher::max_len::() + Prefix::pallet_prefix().len() + Prefix::STORAGE_PREFIX.len(); + // The `max_len` of the key hash plus the pallet prefix and storage prefix (which both are + // hashed with `Twox128`). + let z = Hasher::max_len::() + Twox128::max_len::<()>() * 2; z as u32 } } @@ -501,6 +503,27 @@ mod test { } } + #[test] + fn keylenof_works() { + // Works with Blake2_128Concat. + type A = StorageMap; + let size = 16 * 2 // Two Twox128 + + 16 + 4; // Blake2_128Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + + // Works with Blake2_256. + type B = StorageMap; + let size = 16 * 2 // Two Twox128 + + 32; // Blake2_256 + assert_eq!(KeyLenOf::::get(), size); + + // Works with Twox64Concat. + type C = StorageMap; + let size = 16 * 2 // Two Twox128 + + 8 + 4; // Twox64Concat = hash + key + assert_eq!(KeyLenOf::::get(), size); + } + #[test] fn test() { type A = StorageMap; From 469189bfe16b61b238f7786c5d2cff82060ec8f8 Mon Sep 17 00:00:00 2001 From: benluelo <57334811+benluelo@users.noreply.github.com> Date: Thu, 11 Aug 2022 14:55:30 -0400 Subject: [PATCH 026/348] Add map and try_map methods to BoundedBTreeMap (#11869) * Add map and try_map methods to BoundedBTreeMap * Undo changes to basic_authorship.rs * Remove unwrap and use unchecked_from instead * Add iter_mut() method * Remove map functions and add docs to iter_mut * fmt Co-authored-by: Shawn Tabrizi --- .../runtime/src/bounded/bounded_btree_map.rs | 22 ++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/primitives/runtime/src/bounded/bounded_btree_map.rs b/primitives/runtime/src/bounded/bounded_btree_map.rs index aefd168632a1e..725864b462694 100644 --- a/primitives/runtime/src/bounded/bounded_btree_map.rs +++ b/primitives/runtime/src/bounded/bounded_btree_map.rs @@ -161,6 +161,13 @@ where { self.0.remove_entry(key) } + + /// Gets a mutable iterator over the entries of the map, sorted by key. + /// + /// See [`BTreeMap::iter_mut`] for more information. + pub fn iter_mut(&mut self) -> sp_std::collections::btree_map::IterMut { + self.0.iter_mut() + } } impl Default for BoundedBTreeMap @@ -508,7 +515,7 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap(); assert_eq!(b2.into_iter().map(|(k, _)| k).collect::>(), vec![2, 3]); - // but these worn't work + // but these won't work let b2: Result>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect(); assert!(b2.is_err()); @@ -517,4 +524,17 @@ pub mod test { b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect(); assert!(b2.is_err()); } + + #[test] + fn test_iter_mut() { + let mut b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + + b1.iter_mut().for_each(|(_, v)| *v *= 2); + + assert_eq!(b1, b2); + } } From 6b8553511112afd5ae7e8e6877dc2f467850f155 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 12 Aug 2022 11:03:27 +0200 Subject: [PATCH 027/348] [ci] Improve cancel-pipeline job (#12008) * [WIP][ci] Improve cancel-pipeline job * fix job name * test that fail works * debug cancel-pipeline * remove artifacts-false from cancel-pipeline jobs * split cancel pipeline jobs * fail test-linux-stable 2/3 * fail test-linux-stable 3/3 * fail cargo-check-benches 1/2 * fail cargo-check-benches 2/2 * fail test-linux-stable-int * fail cargo-check-subkey * fail check-tracing * fail check-tracing * fix pipeline --- .gitlab-ci.yml | 60 ++++++++++++++++++++++------- scripts/ci/gitlab/pipeline/test.yml | 6 +++ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9608d88e9554d..765112bfa9e62 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -82,18 +82,31 @@ default: tags: - kubernetes-parity-build -.rust-info-script: &rust-info-script +.rust-info-script: script: - rustup show - cargo --version - rustup +nightly show - cargo +nightly --version +.pipeline-stopper-vars: + script: + - echo "Collecting env variables for the cancel-pipeline job" + - echo "FAILED_JOB_URL=${CI_JOB_URL}" > pipeline-stopper.env + - echo "FAILED_JOB_NAME=${CI_JOB_NAME}" >> pipeline-stopper.env + - echo "PR_NUM=${CI_COMMIT_REF_NAME}" >> pipeline-stopper.env + +.pipeline-stopper-artifacts: + artifacts: + reports: + dotenv: pipeline-stopper.env + .docker-env: image: "${CI_IMAGE}" before_script: - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] after_script: - !reference [.rusty-cachier, after_script] tags: @@ -246,35 +259,54 @@ rusty-cachier-notify: when: on_failure variables: PROJECT_ID: "${CI_PROJECT_ID}" + PROJECT_NAME: "${CI_PROJECT_NAME}" PIPELINE_ID: "${CI_PIPELINE_ID}" - trigger: "parity/infrastructure/ci_cd/pipeline-stopper" + FAILED_JOB_URL: "${FAILED_JOB_URL}" + FAILED_JOB_NAME: "${FAILED_JOB_NAME}" + PR_NUM: "${PR_NUM}" + trigger: + project: "parity/infrastructure/ci_cd/pipeline-stopper" + # remove branch, when pipeline-stopper for polakdot is updated to the same branch + branch: "as-improve" -cancel-pipeline-test-linux-stable: +# need to copy jobs this way because otherwise gitlab will wait +# for all 3 jobs to finish instead of cancelling if one fails +cancel-pipeline-test-linux-stable1: extends: .cancel-pipeline-template needs: - - job: test-linux-stable - artifacts: false + - job: "test-linux-stable 1/3" + +cancel-pipeline-test-linux-stable2: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 2/3" + +cancel-pipeline-test-linux-stable3: + extends: .cancel-pipeline-template + needs: + - job: "test-linux-stable 3/3" + +cancel-pipeline-cargo-check-benches1: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 1/2" + +cancel-pipeline-cargo-check-benches2: + extends: .cancel-pipeline-template + needs: + - job: "cargo-check-benches 2/2" cancel-pipeline-test-linux-stable-int: extends: .cancel-pipeline-template needs: - job: test-linux-stable-int - artifacts: false cancel-pipeline-cargo-check-subkey: extends: .cancel-pipeline-template needs: - job: cargo-check-subkey - artifacts: false - -cancel-pipeline-cargo-check-benches: - extends: .cancel-pipeline-template - needs: - - job: cargo-check-benches - artifacts: false cancel-pipeline-check-tracing: extends: .cancel-pipeline-template needs: - job: check-tracing - artifacts: false diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index ccf8338236d0a..a58015c6e22d6 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -61,10 +61,12 @@ cargo-check-benches: - .docker-env - .test-refs - .collect-artifacts + - .pipeline-stopper-artifacts before_script: # perform rusty-cachier operations before any further modifications to the git repo to make cargo feel cheated not so much - !reference [.rust-info-script, script] - !reference [.rusty-cachier, before_script] + - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs - if [ $CI_COMMIT_REF_NAME != "master" ]; then git fetch origin +master:master; @@ -131,6 +133,7 @@ cargo-check-subkey: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create - cd ./bin/utils/subkey @@ -199,6 +202,7 @@ test-linux-stable: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -299,6 +303,7 @@ test-linux-stable-int: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts variables: # Enable debug assertions since we are running optimized builds for testing # but still want to have debug assertions. @@ -328,6 +333,7 @@ check-tracing: extends: - .docker-env - .test-refs + - .pipeline-stopper-artifacts script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases From 241fc1f8a7e367420ee8c697a9a85b7ceb78713d Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 12 Aug 2022 16:34:00 +0100 Subject: [PATCH 028/348] better benchmark log (#12016) * better benchmark log * Update lib.rs --- frame/benchmarking/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index afd53915cc397..a1399dc388b09 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1054,7 +1054,9 @@ macro_rules! impl_benchmark { // Time the extrinsic logic. $crate::log::trace!( target: "benchmark", - "Start Benchmark: {:?}", c + "Start Benchmark: {} ({:?})", + extrinsic, + c ); let start_pov = $crate::benchmarking::proof_size(); From bfbbf4c7fb1be8d2304b8da102c89fe286e70804 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 12 Aug 2022 17:39:05 +0200 Subject: [PATCH 029/348] feat(rpc middleware): use custom time buckets (#11950) * feat(rpc middleware): use custom time buckets * cargo fmt * make it compile again * fix bad comment --- client/rpc-servers/src/middleware.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index 1c0660fc3528d..0d77442323241 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -25,6 +25,21 @@ use prometheus_endpoint::{ }; use std::net::SocketAddr; +/// Histogram time buckets in microseconds. +const HISTOGRAM_BUCKETS: [f64; 11] = [ + 5.0, + 25.0, + 100.0, + 500.0, + 1_000.0, + 2_500.0, + 10_000.0, + 25_000.0, + 100_000.0, + 1_000_000.0, + 10_000_000.0, +]; + /// Metrics for RPC middleware storing information about the number of requests started/completed, /// calls started/completed and their timings. #[derive(Debug, Clone)] @@ -75,7 +90,8 @@ impl RpcMetrics { HistogramOpts::new( "substrate_rpc_calls_time", "Total time [μs] of processed RPC calls", - ), + ) + .buckets(HISTOGRAM_BUCKETS.to_vec()), &["protocol", "method"], )?, metrics_registry, From 95a0c6a9d41628c3cc37f59dc42f5f45ca3f3977 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 12 Aug 2022 19:18:27 +0200 Subject: [PATCH 030/348] Add `defer` (#12013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add defer Signed-off-by: Oliver Tale-Yazdi * Convert to macro + review fixes Signed-off-by: Oliver Tale-Yazdi * Apply suggestions from code review Co-authored-by: Bastian Köcher * Move into new file and add panic unwind tests Signed-off-by: Oliver Tale-Yazdi * Fix doc and Clippy Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- primitives/core/src/defer.rs | 140 +++++++++++++++++++++++++++++++++++ primitives/core/src/lib.rs | 1 + 2 files changed, 141 insertions(+) create mode 100644 primitives/core/src/defer.rs diff --git a/primitives/core/src/defer.rs b/primitives/core/src/defer.rs new file mode 100644 index 0000000000000..d14b26d59e4dd --- /dev/null +++ b/primitives/core/src/defer.rs @@ -0,0 +1,140 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the [`crate::defer!`] macro for *deferring* the execution +//! of code until the current scope is dropped. +//! This helps with *always* executing cleanup code. + +/// Executes the wrapped closure on drop. +/// +/// Should be used together with the [`crate::defer!`] macro. +#[must_use] +pub struct DeferGuard(pub Option); + +impl Drop for DeferGuard { + fn drop(&mut self) { + self.0.take().map(|f| f()); + } +} + +/// Executes the given code when the current scope is dropped. +/// +/// Multiple calls to [`crate::defer!`] will execute the passed codes in reverse order. +/// This also applies to panic stack unwinding. +/// +/// # Example +/// +/// ```rust +/// use sp_core::defer; +/// +/// let message = std::cell::RefCell::new("".to_string()); +/// { +/// defer!( +/// message.borrow_mut().push_str("world!"); +/// ); +/// defer!( +/// message.borrow_mut().push_str("Hello "); +/// ); +/// } +/// assert_eq!(*message.borrow(), "Hello world!"); +/// ``` +#[macro_export] +macro_rules! defer( + ( $( $code:tt )* ) => { + let _guard = $crate::defer::DeferGuard(Some(|| { $( $code )* })); + }; +); + +#[cfg(test)] +mod test { + #[test] + fn defer_guard_works() { + let mut called = false; + { + defer!( + called = true; + ); + } + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code in reverse order of being called. + fn defer_guard_order_works() { + let called = std::cell::RefCell::new(1); + + defer!( + assert_eq!(*called.borrow(), 3); + ); + defer!( + assert_eq!(*called.borrow(), 2); + *called.borrow_mut() = 3; + ); + defer!({ + assert_eq!(*called.borrow(), 1); + *called.borrow_mut() = 2; + }); + } + + #[test] + #[allow(unused_braces)] + #[allow(clippy::unnecessary_operation)] + fn defer_guard_syntax_works() { + let called = std::cell::RefCell::new(0); + { + defer!(*called.borrow_mut() += 1); + defer!(*called.borrow_mut() += 1;); // With ; + defer!({ *called.borrow_mut() += 1 }); + defer!({ *called.borrow_mut() += 1 };); // With ; + } + assert_eq!(*called.borrow(), 4); + } + + #[test] + /// `defer` executes the code even in case of a panic. + fn defer_guard_panic_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let mut called = false; + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(called = true); + panic!(); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert!(called, "DeferGuard should have executed the closure"); + } + + #[test] + /// `defer` executes the code even in case another `defer` panics. + fn defer_guard_defer_panics_unwind_works() { + use std::panic::{catch_unwind, AssertUnwindSafe}; + let counter = std::cell::RefCell::new(0); + + let should_panic = catch_unwind(AssertUnwindSafe(|| { + defer!(*counter.borrow_mut() += 1); + defer!( + *counter.borrow_mut() += 1; + panic!(); + ); + defer!(*counter.borrow_mut() += 1); + })); + + assert!(should_panic.is_err(), "DeferGuard should have panicked"); + assert_eq!(*counter.borrow(), 3, "DeferGuard should have executed the closure"); + } +} diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index ab3334f9e4f5a..f48adc274f524 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -56,6 +56,7 @@ pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_6 pub mod crypto; pub mod hexdisplay; +pub mod defer; pub mod ecdsa; pub mod ed25519; pub mod hash; From c1293d4adec3aac82de9620842ae7f3cc0c97915 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Sat, 13 Aug 2022 14:56:40 +0300 Subject: [PATCH 031/348] Runtime API versioning (#11779) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Runtime API versioning Related to issue #11577 Add support for multiple versions of a Runtime API. The purpose is to have one main version of the API, which is considered stable and multiple unstable (aka staging) ones. How it works =========== Some methods of the API trait can be tagged with `#[api_version(N)]` attribute where N is version number bigger than the main one. Let's call them **staging methods** for brevity. The implementor of the API decides which version to implement. Example (from https://github.com/paritytech/substrate/issues/11577#issuecomment-1145347025): ``` decl_runtime_apis! { #{api_version(10)] trait Test { fn something() -> Vec; #[api_version(11)] fn new_cool_function() -> u32; } } ``` ``` impl_runtime_apis! { #[api_version(11)] impl Test for Runtime { fn something() -> Vec { vec![1, 2, 3] } fn new_cool_function() -> u32 { 10 } } } ``` Version safety checks (currently not implemented) ================================================= By default in the API trait all staging methods has got default implementation calling `unimplemented!()`. This is a problem because if the developer wants to implement version 11 in the example above and forgets to add `fn new_cool_function()` in `impl_runtime_apis!` the runtime will crash when the function is executed. Ideally a compilation error should be generated in such cases. TODOs ===== Things not working well at the moment: [ ] Version safety check [ ] Integration tests of `primitives/api` are messed up a bit. More specifically `primitives/api/test/tests/decl_and_impl.rs` [ ] Integration test covering the new functionality. [ ] Some duplicated code * Update primitives/api/proc-macro/src/impl_runtime_apis.rs Code review feedback and formatting Co-authored-by: asynchronous rob * Code review feedback Applying suggestions from @bkchr * fmt * Apply suggestions from code review Co-authored-by: Bastian Köcher * Code review feedback * dummy trait -> versioned trait * Implement only versioned traits (not compiling) * Remove native API calls (still not compiling) * fmt * Fix compilation * Comments * Remove unused code * Remove native runtime tests * Remove unused code * Fix UI tests * Code review feedback * Code review feedback * attribute_names -> common * Rework `append_api_version` * Code review feedback * Apply suggestions from code review Co-authored-by: Bastian Köcher * Code review feedback * Code review feedback * Code review feedback * Use type alias for the default trait - doesn't compile * Fixes * Better error for `method_api_ver < trait_api_version` * fmt * Rework how we call runtime functions * Update UI tests * Fix warnings * Fix doctests * Apply suggestions from code review Co-authored-by: Bastian Köcher * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fix formatting and small compilation errors * Update primitives/api/proc-macro/src/impl_runtime_apis.rs Co-authored-by: Bastian Köcher Co-authored-by: asynchronous rob Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- primitives/api/proc-macro/src/common.rs | 41 + .../api/proc-macro/src/decl_runtime_apis.rs | 699 +++++++----------- .../api/proc-macro/src/impl_runtime_apis.rs | 319 ++++---- primitives/api/proc-macro/src/lib.rs | 1 + .../proc-macro/src/mock_impl_runtime_apis.rs | 134 +++- primitives/api/proc-macro/src/utils.rs | 43 +- primitives/api/src/lib.rs | 82 +- primitives/api/test/tests/decl_and_impl.rs | 39 +- primitives/api/test/tests/runtime_calls.rs | 24 +- primitives/api/test/tests/trybuild.rs | 1 + .../ui/impl_incorrect_method_signature.stderr | 43 -- .../api/test/tests/ui/impl_missing_version.rs | 40 + .../test/tests/ui/impl_missing_version.stderr | 14 + ...pi_version.rs => invalid_api_version_1.rs} | 0 ...on.stderr => invalid_api_version_1.stderr} | 4 +- .../tests/ui/invalid_api_version_2.stderr | 4 +- .../tests/ui/invalid_api_version_3.stderr | 4 +- .../test/tests/ui/invalid_api_version_4.rs | 8 + .../tests/ui/invalid_api_version_4.stderr | 5 + .../ui/method_ver_lower_than_trait_ver.rs | 9 + .../ui/method_ver_lower_than_trait_ver.stderr | 11 + .../test/tests/ui/missing_versioned_method.rs | 39 + .../tests/ui/missing_versioned_method.stderr | 8 + .../missing_versioned_method_multiple_vers.rs | 42 ++ ...sing_versioned_method_multiple_vers.stderr | 8 + .../ui/mock_advanced_block_id_by_value.rs | 2 +- .../ui/mock_advanced_block_id_by_value.stderr | 4 +- .../tests/ui/mock_advanced_missing_blockid.rs | 2 +- .../ui/mock_advanced_missing_blockid.stderr | 4 +- .../tests/ui/mock_only_self_reference.stderr | 74 +- .../tests/ui/positive_cases/default_impls.rs | 41 + ...reference_in_impl_runtime_apis_call.stderr | 43 -- 32 files changed, 937 insertions(+), 855 deletions(-) create mode 100644 primitives/api/proc-macro/src/common.rs create mode 100644 primitives/api/test/tests/ui/impl_missing_version.rs create mode 100644 primitives/api/test/tests/ui/impl_missing_version.stderr rename primitives/api/test/tests/ui/{invalid_api_version.rs => invalid_api_version_1.rs} (100%) rename primitives/api/test/tests/ui/{invalid_api_version.stderr => invalid_api_version_1.stderr} (65%) create mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.rs create mode 100644 primitives/api/test/tests/ui/invalid_api_version_4.stderr create mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs create mode 100644 primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr create mode 100644 primitives/api/test/tests/ui/missing_versioned_method.rs create mode 100644 primitives/api/test/tests/ui/missing_versioned_method.stderr create mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs create mode 100644 primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr create mode 100644 primitives/api/test/tests/ui/positive_cases/default_impls.rs diff --git a/primitives/api/proc-macro/src/common.rs b/primitives/api/proc-macro/src/common.rs new file mode 100644 index 0000000000000..10887be613278 --- /dev/null +++ b/primitives/api/proc-macro/src/common.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2024 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// The ident used for the block generic parameter. +pub const BLOCK_GENERIC_IDENT: &str = "Block"; + +/// Unique identifier used to make the hidden includes unique for this macro. +pub const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; + +/// The `core_trait` attribute. +pub const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; +/// The `api_version` attribute. +/// +/// Is used to set the current version of the trait. +pub const API_VERSION_ATTRIBUTE: &str = "api_version"; +/// The `changed_in` attribute. +/// +/// Is used when the function signature changed between different versions of a trait. +/// This attribute should be placed on the old signature of the function. +pub const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; +/// The `renamed` attribute. +/// +/// Is used when a trait method was renamed. +pub const RENAMED_ATTRIBUTE: &str = "renamed"; +/// All attributes that we support in the declaration of a runtime api trait. +pub const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index b031c0f8bb1cc..aac4491720c34 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -16,11 +16,15 @@ // limitations under the License. use crate::utils::{ - extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, - generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, - generate_runtime_mod_name_for_trait, prefix_function_with_trait, - replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, generate_crate_access, + generate_hidden_includes, generate_runtime_mod_name_for_trait, parse_runtime_api_version, + prefix_function_with_trait, replace_wild_card_parameter_names, return_type_extract_type, + versioned_trait_name, AllowSelfRefInParameters, +}; + +use crate::common::{ + API_VERSION_ATTRIBUTE, BLOCK_GENERIC_IDENT, CHANGED_IN_ATTRIBUTE, CORE_TRAIT_ATTRIBUTE, + HIDDEN_INCLUDES_ID, RENAMED_ATTRIBUTE, SUPPORTED_ATTRIBUTE_NAMES, }; use proc_macro2::{Span, TokenStream}; @@ -33,36 +37,11 @@ use syn::{ parse_macro_input, parse_quote, spanned::Spanned, visit::{self, Visit}, - Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, - TraitBound, TraitItem, TraitItemMethod, Type, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, TraitBound, + TraitItem, TraitItemMethod, }; -use std::collections::HashMap; - -/// The ident used for the block generic parameter. -const BLOCK_GENERIC_IDENT: &str = "Block"; - -/// Unique identifier used to make the hidden includes unique for this macro. -const HIDDEN_INCLUDES_ID: &str = "DECL_RUNTIME_APIS"; - -/// The `core_trait` attribute. -const CORE_TRAIT_ATTRIBUTE: &str = "core_trait"; -/// The `api_version` attribute. -/// -/// Is used to set the current version of the trait. -const API_VERSION_ATTRIBUTE: &str = "api_version"; -/// The `changed_in` attribute. -/// -/// Is used when the function signature changed between different versions of a trait. -/// This attribute should be placed on the old signature of the function. -const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; -/// The `renamed` attribute. -/// -/// Is used when a trait method was renamed. -const RENAMED_ATTRIBUTE: &str = "renamed"; -/// All attributes that we support in the declaration of a runtime api trait. -const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = - &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; +use std::collections::{BTreeMap, HashMap}; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -119,20 +98,6 @@ impl<'ast> Visit<'ast> for IsUsingBlock { } } -/// Visits the ast and checks if `Block` ident is used somewhere. -fn type_is_using_block(ty: &Type) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_type(ty); - visitor.result -} - -/// Visits the ast and checks if `Block` ident is used somewhere. -fn return_type_is_using_block(ty: &ReturnType) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_return_type(ty); - visitor.result -} - /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} @@ -146,146 +111,69 @@ impl Fold for ReplaceBlockWithNodeBlock { } } -/// Replace all occurrences of `Block` with `NodeBlock` -fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_fn_arg(&mut replace, fn_arg) -} - -/// Replace all occurrences of `Block` with `NodeBlock` -fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_return_type(&mut replace, return_type) -} - -/// Generate the functions that generate the native call closure for each trait method. -fn generate_native_call_generators(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some(&m.sig), - _ => None, - }); - - let mut result = Vec::new(); - let trait_ = &decl.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Auxiliary function that is used to convert between types that use different block types. - // The function expects that both are convertible by encoding the one and decoding the other. - result.push(quote!( - #[cfg(any(feature = "std", test))] - fn convert_between_block_types - #crate_::ApiError>( - input: &I, - map_error: F, - ) -> std::result::Result - { - ::decode_with_depth_limit( - #crate_::MAX_EXTRINSIC_DEPTH, - &mut &#crate_::Encode::encode(input)[..], - ).map_err(map_error) +/// Versioned API traits are used to catch missing methods when implementing a specific version of a +/// versioned API. They contain all non-versioned methods (aka stable methods) from the main trait +/// and all versioned methods for the specific version. This means that there is one trait for each +/// version mentioned in the trait definition. For example: +/// ```ignore +/// // The trait version implicitly is 1 +/// decl_runtime_apis!( +/// trait SomeApi { +/// fn method1(); // this is a 'stable method' +/// +/// #[api_version(2)] +/// fn method2(); +/// +/// #[api_version(2)] +/// fn method3(); +/// +/// #[api_version(3)] +/// fn method4(); +/// } +/// ); +/// ``` +/// This trait has got three different versions. The function below will generate the following +/// code: +/// ``` +/// trait SomeApiV1 { +/// // in V1 only the stable methods are required. The rest has got default implementations. +/// fn method1(); +/// } +/// +/// trait SomeApiV2 { +/// // V2 contains all methods from V1 and V2. V3 not required so they are skipped. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// } +/// +/// trait SomeApiV3 { +/// // And V3 contains all methods from the trait. +/// fn method1(); +/// fn method2(); +/// fn method3(); +/// fn method4(); +/// } +/// ``` +fn generate_versioned_api_traits( + api: ItemTrait, + methods: BTreeMap>, +) -> Vec { + let mut result = Vec::::new(); + for (version, _) in &methods { + let mut versioned_trait = api.clone(); + versioned_trait.ident = versioned_trait_name(&versioned_trait.ident, *version); + versioned_trait.items = Vec::new(); + // Add the methods from the current version and all previous one. Versions are sorted so + // it's safe to stop early. + for (_, m) in methods.iter().take_while(|(v, _)| v <= &version) { + versioned_trait.items.extend(m.iter().cloned().map(|m| TraitItem::Method(m))); } - )); - - // Generate a native call generator for each function of the given trait. - for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(fn_, AllowSelfRefInParameters::No)?; - let trait_fn_name = &fn_.ident; - let function_name_str = fn_.ident.to_string(); - let fn_name = generate_native_call_generator_fn_name(&fn_.ident); - let output = return_type_replace_block_with_node_block(fn_.output.clone()); - let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); - - // Every type that is using the `Block` generic parameter, we need to encode/decode, - // to make it compatible between the runtime/node. - let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let param_name = quote!(#n).to_string(); - - quote!( - let #n: #t = convert_between_block_types( - &#n, - |e| #crate_::ApiError::FailedToConvertParameter { - function: #function_name_str, - parameter: #param_name, - error: e, - }, - )?; - ) - }); - // Same as for the input types, we need to check if we also need to convert the output, - // before returning it. - let output_conversion = if return_type_is_using_block(&fn_.output) { - quote!( - convert_between_block_types( - &res, - |e| #crate_::ApiError::FailedToConvertReturnValue { - function: #function_name_str, - error: e, - }, - ) - ) - } else { - quote!(Ok(res)) - }; - - let input_names = params.iter().map(|v| &v.0); - // If the type is using the block generic type, we will encode/decode it to make it - // compatible. To ensure that we forward it by ref/value, we use the value given by the - // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = - params.iter().map(|v| if type_is_using_block(&v.1) { v.2 } else { None }); - - // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect - // all the function inputs. - let fn_inputs = fn_ - .inputs - .iter() - .map(|v| fn_arg_replace_block_with_node_block(v.clone())) - .map(|v| match v { - FnArg::Typed(ref arg) => { - let mut arg = arg.clone(); - if let Type::Reference(ref mut r) = *arg.ty { - r.lifetime = Some(parse_quote!( 'a )); - } - FnArg::Typed(arg) - }, - r => r, - }); - - let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); - // We need to parse them again, to get an easy access to the actual parameters. - let impl_generics: Generics = parse_quote!( #impl_generics ); - let impl_generics_params = impl_generics.params.iter().map(|p| { - match p { - GenericParam::Type(ref ty) => { - let mut ty = ty.clone(); - ty.bounds.push(parse_quote!( 'a )); - GenericParam::Type(ty) - }, - // We should not see anything different than type params here. - r => r.clone(), - } - }); - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT - #(, #impl_generics_params)* - >( - #( #fn_inputs ),* - ) -> impl FnOnce() -> #output + 'a #where_clause { - move || { - #( #conversions )* - let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); - #output_conversion - } - } - )); + result.push(versioned_trait); } - Ok(quote!( #( #result )* )) + result } /// Try to parse the given `Attribute` as `renamed` attribute. @@ -323,126 +211,13 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { } } -/// Generate the functions that call the api at a given block for a given trait method. -fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), - _ => None, - }); - - let mut result = Vec::new(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate a native call generator for each function of the given trait. - for (attrs, fn_) in fns { - let trait_name = &decl.ident; - let trait_fn_name = prefix_function_with_trait(trait_name, &fn_.ident); - let fn_name = generate_call_api_at_fn_name(&fn_.ident); - - let attrs = remove_supported_attributes(&mut attrs.clone()); - - if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - return Err(Error::new( - fn_.span(), - format!( - "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE - ), - )) - } - - // We do not need to generate this function for a method that signature was changed. - if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue - } - - // Parse the renamed attributes. - let mut renames = Vec::new(); - if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { - let (old_name, version) = parse_renamed_attribute(a)?; - renames.push((version, prefix_function_with_trait(trait_name, &old_name))); - } - - renames.sort_by(|l, r| r.cmp(l)); - let (versions, old_names) = renames.into_iter().fold( - (Vec::new(), Vec::new()), - |(mut versions, mut old_names), (version, old_name)| { - versions.push(version); - old_names.push(old_name); - (versions, old_names) - }, - ); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - #[allow(clippy::too_many_arguments)] - pub fn #fn_name< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, - Block: #crate_::BlockT, - T: #crate_::CallApiAt, - >( - call_runtime_at: &T, - at: &#crate_::BlockId, - args: std::vec::Vec, - changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: &std::cell::RefCell< - #crate_::StorageTransactionCache - >, - native_call: std::option::Option, - context: #crate_::ExecutionContext, - recorder: &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { - let version = call_runtime_at.runtime_version_at(at)?; - - #( - // Check if we need to call the function by an old name. - if version.apis.iter().any(|(s, v)| { - s == &ID && *v < #versions - }) { - let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { - at, - function: #old_names, - native_call: None, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - let ret = #crate_::CallApiAt::::call_api_at(call_runtime_at, params)?; - - return Ok(ret) - } - )* - - let params = #crate_::CallApiAtParams { - at, - function: #trait_fn_name, - native_call, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - context, - recorder, - }; - - #crate_::CallApiAt::::call_api_at(call_runtime_at, params) - } - )); - } - - Ok(quote!( #( #result )* )) -} - /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { let mut result = Vec::new(); for decl in decls { let mut decl = decl.clone(); + let decl_span = decl.span(); extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); @@ -450,30 +225,73 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); - let call_api_at_calls = generate_call_api_at_calls(&decl)?; - - // Remove methods that have the `changed_in` attribute as they are not required for the - // runtime anymore. - decl.items = decl - .items - .iter_mut() - .filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs) - .contains_key(CHANGED_IN_ATTRIBUTE) - { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - }, - r => Some(r.clone()), - }) - .collect(); + let trait_api_version = get_api_version(&found_attributes)?; + + let mut methods_by_version: BTreeMap> = BTreeMap::new(); + + // Process the items in the declaration. The filter_map function below does a lot of stuff + // because the method attributes are stripped at this point + decl.items.iter_mut().for_each(|i| match i { + TraitItem::Method(ref mut method) => { + let method_attrs = remove_supported_attributes(&mut method.attrs); + let mut method_version = trait_api_version; + // validate the api version for the method (if any) and generate default + // implementation for versioned methods + if let Some(version_attribute) = method_attrs.get(API_VERSION_ATTRIBUTE) { + method_version = match parse_runtime_api_version(version_attribute) { + Ok(method_api_ver) if method_api_ver < trait_api_version => { + let method_ver = method_api_ver.to_string(); + let trait_ver = trait_api_version.to_string(); + let mut err1 = Error::new( + version_attribute.span(), + format!( + "Method version `{}` is older than (or equal to) trait version `{}`.\ + Methods can't define versions older than the trait version.", + method_ver, + trait_ver, + ), + ); + + let err2 = match found_attributes.get(&API_VERSION_ATTRIBUTE) { + Some(attr) => Error::new(attr.span(), "Trait version is set here."), + None => Error::new( + decl_span, + "Trait version is not set so it is implicitly equal to 1.", + ), + }; + err1.combine(err2); + result.push(err1.to_compile_error()); + + trait_api_version + }, + Ok(method_api_ver) => method_api_ver, + Err(e) => { + result.push(e.to_compile_error()); + trait_api_version + }, + }; + } - let native_call_generators = generate_native_call_generators(&decl)?; + // Any method with the `changed_in` attribute isn't required for the runtime + // anymore. + if !method_attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + + // partition methods by api version + methods_by_version.entry(method_version).or_default().push(method.clone()); + } + }, + _ => (), + }); + + let versioned_api_traits = generate_versioned_api_traits(decl.clone(), methods_by_version); + + let main_api_ident = decl.ident.clone(); + let versioned_ident = &versioned_api_traits + .first() + .expect("There should always be at least one version.") + .ident; result.push(quote!( #[doc(hidden)] @@ -482,15 +300,13 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { pub mod #mod_name { use super::*; - #decl + #( #versioned_api_traits )* + + pub use #versioned_ident as #main_api_ident; pub #api_version pub #id - - #native_call_generators - - #call_api_at_calls } )); } @@ -509,18 +325,45 @@ struct ToClientSideDecl<'a> { } impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items(&mut self, items: Vec) -> Vec { + /// Process the given [`ItemTrait`]. + fn process(mut self, decl: ItemTrait) -> ItemTrait { + let mut decl = self.fold_item_trait(decl); + + let block_id = self.block_id; + let crate_ = self.crate_; + + // Add the special method that will be implemented by the `impl_runtime_apis!` macro + // to enable functions to call into the runtime. + decl.items.push(parse_quote! { + /// !!INTERNAL USE ONLY!! + #[doc(hidden)] + fn __runtime_api_internal_call_api_at( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError>; + }); + + decl + } +} + +impl<'a> ToClientSideDecl<'a> { + fn fold_item_trait_items( + &mut self, + items: Vec, + trait_generics_num: usize, + ) -> Vec { let mut result = Vec::new(); items.into_iter().for_each(|i| match i { TraitItem::Method(method) => { - let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); + let (fn_decl, fn_decl_ctx) = + self.fold_trait_item_method(method, trait_generics_num); result.push(fn_decl.into()); result.push(fn_decl_ctx.into()); - - if let Some(fn_impl) = fn_impl { - result.push(fn_impl.into()); - } }, r => result.push(r), }); @@ -531,20 +374,24 @@ impl<'a> ToClientSideDecl<'a> { fn fold_trait_item_method( &mut self, method: TraitItemMethod, - ) -> (TraitItemMethod, Option, TraitItemMethod) { + trait_generics_num: usize, + ) -> (TraitItemMethod, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context); - let fn_decl_ctx = self.create_method_decl_with_context(method); + let fn_decl = self.create_method_decl(method.clone(), context, trait_generics_num); + let fn_decl_ctx = self.create_method_decl_with_context(method, trait_generics_num); - (fn_decl, fn_impl, fn_decl_ctx) + (fn_decl, fn_decl_ctx) } - fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { + fn create_method_decl_with_context( + &mut self, + method: TraitItemMethod, + trait_generics_num: usize, + ) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context), trait_generics_num); fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); @@ -552,52 +399,6 @@ impl<'a> ToClientSideDecl<'a> { fn_decl_ctx } - /// Takes the given method and creates a `method_runtime_api_impl` method that will be - /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl( - &mut self, - mut method: TraitItemMethod, - ) -> Option { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None - } - - let fn_sig = &method.sig; - let ret_type = return_type_extract_type(&fn_sig.output); - - // Get types and if the value is borrowed from all parameters. - // If there is an error, we push it as the block to the user. - let param_types = - match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { - Ok(res) => res - .into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - }, - }; - let name = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); - let block_id = self.block_id; - let crate_ = self.crate_; - - Some(parse_quote! { - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; - }) - } - /// Takes the method declared by the user and creates the declaration we require for the runtime /// api client side. This method will call by default the `method_runtime_api_impl` for doing /// the actual call into the runtime. @@ -605,6 +406,7 @@ impl<'a> ToClientSideDecl<'a> { &mut self, mut method: TraitItemMethod, context: TokenStream, + trait_generics_num: usize, ) -> TraitItemMethod { let params = match extract_parameter_names_types_and_borrows( &method.sig, @@ -616,18 +418,42 @@ impl<'a> ToClientSideDecl<'a> { Vec::new() }, }; - let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); fold_fn_decl_for_client_side(&mut method.sig, self.block_id, self.crate_); - let name_impl = generate_method_runtime_api_impl_name(self.trait_, &method.sig.ident); let crate_ = self.crate_; let found_attributes = remove_supported_attributes(&mut method.attrs); + + // Parse the renamed attributes. + let mut renames = Vec::new(); + for (_, a) in found_attributes.iter().filter(|a| a.0 == &RENAMED_ATTRIBUTE) { + match parse_renamed_attribute(a) { + Ok((old_name, version)) => { + renames.push((version, prefix_function_with_trait(&self.trait_, &old_name))); + }, + Err(e) => self.errors.push(e.to_compile_error()), + } + } + + renames.sort_by(|l, r| r.cmp(l)); + let (versions, old_names) = renames.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut versions, mut old_names), (version, old_name)| { + versions.push(version); + old_names.push(old_name); + (versions, old_names) + }, + ); + + // Generate the function name before we may rename it below to + // `function_name_before_version_{}`. + let function_name = prefix_function_with_trait(&self.trait_, &method.sig.ident); + // If the method has a `changed_in` attribute, we need to alter the method name to // `method_before_version_VERSION`. - let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { + match get_changed_in(&found_attributes) { Ok(Some(version)) => { // Make sure that the `changed_in` version is at least the current `api_version`. if get_api_version(self.found_attributes).ok() < Some(version) { @@ -646,47 +472,51 @@ impl<'a> ToClientSideDecl<'a> { ); method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); - - let panic = - format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), + Ok(None) => {}, Err(e) => { self.errors.push(e.to_compile_error()); - (quote!(unimplemented!()), quote!(None)) }, }; - let function_name = method.sig.ident.to_string(); + // The module where the runtime relevant stuff is declared. + let trait_name = &self.trait_; + let runtime_mod = generate_runtime_mod_name_for_trait(trait_name); + let underscores = (0..trait_generics_num).map(|_| quote!(_)); // Generate the default implementation that calls the `method_runtime_api_impl` method. method.default = Some(parse_quote! { { - let runtime_api_impl_params_encoded = + let __runtime_api_impl_params_encoded__ = #crate_::Encode::encode(&( #( &#params ),* )); - self.#name_impl( + >::__runtime_api_internal_call_api_at( + self, __runtime_api_at_param__, #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - std::result::Result::map_err( - <#ret_type as #crate_::Decode>::decode(&mut &r[..]), - |err| #crate_::ApiError::FailedToDecodeReturnValue { - function: #function_name, - error: err, - } - ) - } + __runtime_api_impl_params_encoded__, + &|version| { + #( + // Check if we need to call the function by an old name. + if version.apis.iter().any(|(s, v)| { + s == &#runtime_mod::ID && *v < #versions + }) { + return #old_names + } + )* + + #function_name } ) + .and_then(|r| + std::result::Result::map_err( + <#ret_type as #crate_::Decode>::decode(&mut &r[..]), + |err| #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) + ) } }); @@ -714,37 +544,12 @@ impl<'a> Fold for ToClientSideDecl<'a> { // The client side trait is only required when compiling with the feature `std` or `test`. input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items); + input.items = self.fold_item_trait_items(input.items, input.generics.params.len()); fold::fold_item_trait(self, input) } } -/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. -fn parse_runtime_api_version(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ), - )); - - match meta { - Meta::List(list) => - if list.nested.len() != 1 { - err - } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { - i.base10_parse() - } else { - err - }, - _ => err, - } -} - /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { @@ -821,16 +626,14 @@ fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { let mut errors = Vec::new(); let trait_ = decl.ident.clone(); - let decl = { - let mut to_client_side = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - trait_: &trait_, - }; - to_client_side.fold_item_trait(decl) - }; + let decl = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + trait_: &trait_, + } + .process(decl); let api_version = get_api_version(&found_attributes); diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 02ef37370ffeb..0087a1ad37ab9 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -17,13 +17,13 @@ use crate::utils::{ extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, - generate_hidden_includes, generate_method_runtime_api_impl_name, - generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, - prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_runtime_mod_name_for_trait, parse_runtime_api_version, prefix_function_with_trait, + versioned_trait_name, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; +use crate::common::API_VERSION_ATTRIBUTE; + use proc_macro2::{Span, TokenStream}; use quote::quote; @@ -33,8 +33,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, - TypePath, + Attribute, Ident, ImplItem, ItemImpl, Path, Signature, Type, TypePath, }; use std::collections::HashSet; @@ -105,8 +104,10 @@ fn generate_impl_calls( let mut impl_calls = Vec::new(); for impl_ in impls { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); + let impl_trait = extend_with_api_version(impl_trait, trait_api_ver); let impl_trait_ident = &impl_trait_path .segments .last() @@ -326,35 +327,6 @@ fn generate_runtime_api_base_structures() -> Result { #[cfg(any(feature = "std", test))] impl> RuntimeApiImpl { - fn call_api_at< - R: #crate_::Encode + #crate_::Decode + std::cmp::PartialEq, - F: FnOnce( - &C, - &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::option::Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, E>, - E, - >( - &self, - call_api_at: F, - ) -> std::result::Result<#crate_::NativeOrEncoded, E> { - if *std::cell::RefCell::borrow(&self.commit_on_success) { - #crate_::OverlayedChanges::start_transaction( - &mut std::cell::RefCell::borrow_mut(&self.changes) - ); - } - let res = call_api_at( - &self.call, - &self.changes, - &self.storage_transaction_cache, - &self.recorder, - ); - - self.commit_or_rollback(std::result::Result::is_ok(&res)); - res - } - fn commit_or_rollback(&self, commit: bool) { let proof = "\ We only close a transaction when we opened one ourself. @@ -398,6 +370,24 @@ fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { trait_ } +fn extend_with_api_version(mut trait_: Path, version: Option) -> Path { + let version = if let Some(v) = version { + v + } else { + // nothing to do + return trait_ + }; + + let trait_name = &mut trait_ + .segments + .last_mut() + .expect("Trait path should always contain at least one item; qed") + .ident; + *trait_name = versioned_trait_name(trait_name, version); + + trait_ +} + /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { let mut impls_prepared = Vec::new(); @@ -405,9 +395,12 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { // We put `runtime` before each trait to get the trait that is intended for the runtime and // we put the `RuntimeBlock` as first argument for the trait generics. for impl_ in impls.iter() { + let trait_api_ver = extract_api_version(&impl_.attrs, impl_.span())?; + let mut impl_ = impl_.clone(); let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); let trait_ = extend_with_runtime_decl_path(trait_); + let trait_ = extend_with_api_version(trait_, trait_api_ver); impl_.trait_.as_mut().unwrap().1 = trait_; impl_.attrs = filter_cfg_attrs(&impl_.attrs); @@ -424,121 +417,70 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { runtime_block: &'a TypePath, - runtime_mod_path: &'a Path, - runtime_type: &'a Type, - trait_generic_arguments: &'a [GenericArgument], - impl_trait: &'a Ident, } -impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = - if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; - - fold::fold_type_path(self, new_ty_path) - } +impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { + /// Process the given item implementation. + fn process(mut self, input: ItemImpl) -> ItemImpl { + let mut input = self.fold_item_impl(input); - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let runtime_mod_path = self.runtime_mod_path; - let runtime = self.runtime_type; - let native_call_generator_ident = - generate_native_call_generator_fn_name(&input.sig.ident); - let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); - let trait_generic_arguments = self.trait_generic_arguments; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate the access to the native parameters - let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![quote!(p)] - } else { - input - .sig - .inputs - .iter() - .enumerate() - .map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }) - .collect::>() - }; - - let (param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => ( - res.into_iter() - .map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }) - .collect::>(), - None, - ), - Err(e) => (Vec::new(), Some(e.to_compile_error())), - }; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - // Rewrite the input parameters. - input.sig.inputs = parse_quote! { + // Delete all functions, because all of them are default implemented by + // `decl_runtime_apis!`. We only need to implement the `__runtime_api_internal_call_api_at` + // function. + input.items.clear(); + input.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( &self, at: &#crate_::BlockId<__SR_API_BLOCK__>, context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - }; - - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - let ret_type = return_type_extract_type(&input.sig.output); - - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> - ); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #error - - self.call_api_at( - | - call_runtime_at, - changes, - storage_transaction_cache, - recorder - | { - #runtime_mod_path #call_api_at_call( - call_runtime_at, - at, - params_encoded, - changes, - storage_transaction_cache, - params.map(|p| { - #runtime_mod_path #native_call_generator_ident :: - <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( - #( #param_tuple_access ),* - ) - }), - context, - recorder, - ) - } - ) + params: std::vec::Vec, + fn_name: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + if *std::cell::RefCell::borrow(&self.commit_on_success) { + #crate_::OverlayedChanges::start_transaction( + &mut std::cell::RefCell::borrow_mut(&self.changes) + ); } - ) - }; - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; + let res = (|| { + let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at(self.call, at)?; + + let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { + at, + function: (*fn_name)(version), + native_call: None, + arguments: params, + overlayed_changes: &self.changes, + storage_transaction_cache: &self.storage_transaction_cache, + context, + recorder: &self.recorder, + }; + + #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at::<#crate_::NeverNativeValue, _>( + self.call, + params, + ) + })(); + + self.commit_or_rollback(std::result::Result::is_ok(&res)); + + res.map(#crate_::NativeOrEncoded::into_encoded) + } + }); + input } +} + +impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; + + fold::fold_type_path(self, new_ty_path) + } fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { // All this `UnwindSafe` magic below here is required for this rust bug: @@ -594,45 +536,55 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result for impl_ in impls { let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .clone(); let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; - let runtime_type = &impl_.self_ty; let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let trait_generic_arguments = match impl_trait.arguments { - PathArguments::Parenthesized(_) | PathArguments::None => vec![], - PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), - }; - - let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { - runtime_block, - runtime_mod_path: &runtime_mod_path, - runtime_type, - trait_generic_arguments: &trait_generic_arguments, - impl_trait: &impl_trait.ident, - }; + let processed_impl = + ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(processed_impl); } Ok(quote!( #( #result )* )) } +fn populate_runtime_api_versions( + result: &mut Vec, + sections: &mut Vec, + attrs: Vec, + id: Path, + version: TokenStream, + crate_access: &TokenStream, +) { + result.push(quote!( + #( #attrs )* + (#id, #version) + )); + + sections.push(quote!( + #( #attrs )* + const _: () = { + // All sections with the same name are going to be merged by concatenation. + #[cfg(not(feature = "std"))] + #[link_section = "runtime_apis"] + static SECTION_CONTENTS: [u8; 12] = #crate_access::serialize_runtime_api_info(#id, #version); + }; + )); +} + /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut sections = Vec::with_capacity(impls.len()); + let mut result = Vec::::with_capacity(impls.len()); + let mut sections = Vec::::with_capacity(impls.len()); let mut processed_traits = HashSet::new(); let c = generate_crate_access(HIDDEN_INCLUDES_ID); for impl_ in impls { + let api_ver = extract_api_version(&impl_.attrs, impl_.span())?.map(|a| a as u32); + let mut path = extend_with_runtime_decl_path( extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?.clone(), ); @@ -655,23 +607,11 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { } let id: Path = parse_quote!( #path ID ); - let version: Path = parse_quote!( #path VERSION ); + let version = quote!( #path VERSION ); let attrs = filter_cfg_attrs(&impl_.attrs); - result.push(quote!( - #( #attrs )* - (#id, #version) - )); - - sections.push(quote!( - #( #attrs )* - const _: () = { - // All sections with the same name are going to be merged by concatenation. - #[cfg(not(feature = "std"))] - #[link_section = "runtime_apis"] - static SECTION_CONTENTS: [u8; 12] = #c::serialize_runtime_api_info(#id, #version); - }; - )); + let api_ver = api_ver.map(|a| quote!( #a )).unwrap_or_else(|| version); + populate_runtime_api_versions(&mut result, &mut sections, attrs, id, api_ver, &c) } Ok(quote!( @@ -726,6 +666,33 @@ fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { attrs.iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() } +// Extracts the value of `API_VERSION_ATTRIBUTE` and handles errors. +// Returns: +// - Err if the version is malformed +// - Some(u64) if the version is set +// - None if the version is not set (this is valid). +fn extract_api_version(attrs: &Vec, span: Span) -> Result> { + // First fetch all `API_VERSION_ATTRIBUTE` values (should be only one) + let api_ver = attrs + .iter() + .filter(|a| a.path.is_ident(API_VERSION_ATTRIBUTE)) + .collect::>(); + + if api_ver.len() > 1 { + return Err(Error::new( + span, + format!( + "Found multiple #[{}] attributes for an API implementation. \ + Each runtime API can have only one version.", + API_VERSION_ATTRIBUTE + ), + )) + } + + // Parse the runtime version if there exists one. + api_ver.first().map(|v| parse_runtime_api_version(v)).transpose() +} + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 20a2f76f2c83d..31636b8e2d545 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -21,6 +21,7 @@ use proc_macro::TokenStream; +mod common; mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 6098f8d6bd741..4de0f6b9e571b 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -18,8 +18,7 @@ use crate::utils::{ extract_block_type_from_trait_path, extract_impl_trait, extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, - RequireQualifiedTraitPath, + return_type_extract_type, AllowSelfRefInParameters, RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -31,7 +30,7 @@ use syn::{ parse::{Error, Parse, ParseStream, Result}, parse_macro_input, parse_quote, spanned::Spanned, - Attribute, Ident, ItemImpl, Pat, Type, TypePath, + Attribute, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -40,7 +39,7 @@ const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; /// The `advanced` attribute. /// /// If this attribute is given to a function, the function gets access to the `BlockId` as first -/// parameter and needs to return a `Result` with the appropiate error type. +/// parameter and needs to return a `Result` with the appropriate error type. const ADVANCED_ATTRIBUTE: &str = "advanced"; /// The structure used for parsing the runtime api implementations. @@ -126,34 +125,63 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result for #self_ty { - fn Core_version_runtime_api_impl( + fn __runtime_api_internal_call_api_at( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!("`__runtime_api_internal_call_api_at` not implemented for runtime api mocks") } - fn Core_execute_block_runtime_api_impl( + fn version( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn version_with_context( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + ) -> std::result::Result<#crate_::RuntimeVersion, #crate_::ApiError> { + unimplemented!("`Core::version` not implemented for runtime api mocks") + } + + fn execute_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") + } + + fn execute_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<#block_type>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: #block_type, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::execute_block` not implemented for runtime api mocks") } - fn Core_initialize_block_runtime_api_impl( + fn initialize_block( + &self, + _: &#crate_::BlockId<#block_type>, + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") + } + + fn initialize_block_with_context( &self, _: &#crate_::BlockId<#block_type>, _: #crate_::ExecutionContext, - _: Option<&<#block_type as #crate_::BlockT>::Header>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { - unimplemented!("Not required for testing!") + _: &<#block_type as #crate_::BlockT>::Header, + ) -> std::result::Result<(), #crate_::ApiError> { + unimplemented!("`Core::initialize_block` not implemented for runtime api mocks") } } )) @@ -216,14 +244,53 @@ fn get_at_param_name( } } -/// Auxialiry structure to fold a runtime api trait implementation into the expected format. +/// Auxiliary structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. struct FoldRuntimeApiImpl<'a> { /// The block type that is being used. block_type: &'a TypePath, - /// The identifier of the trait being implemented. - impl_trait: &'a Ident, +} + +impl<'a> FoldRuntimeApiImpl<'a> { + /// Process the given [`syn::ItemImpl`]. + fn process(mut self, impl_item: syn::ItemImpl) -> syn::ItemImpl { + let mut impl_item = self.fold_item_impl(impl_item); + + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // We also need to overwrite all the `_with_context` methods. To do this, + // we clone all methods and add them again with the new name plus one more argument. + impl_item.items.extend(impl_item.items.clone().into_iter().filter_map(|i| { + if let syn::ImplItem::Method(mut m) = i { + m.sig.ident = quote::format_ident!("{}_with_context", m.sig.ident); + m.sig.inputs.insert(2, parse_quote!( _: #crate_::ExecutionContext )); + + Some(m.into()) + } else { + None + } + })); + + let block_type = self.block_type; + + impl_item.items.push(parse_quote! { + fn __runtime_api_internal_call_api_at( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: std::vec::Vec, + _: &dyn Fn(#crate_::RuntimeVersion) -> &'static str, + ) -> std::result::Result, #crate_::ApiError> { + unimplemented!( + "`__runtime_api_internal_call_api_at` not implemented for runtime api mocks. \ + Calling deprecated methods is not supported by mocked runtime api." + ) + } + }); + + impl_item + } } impl<'a> Fold for FoldRuntimeApiImpl<'a> { @@ -277,14 +344,9 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.sig.inputs = parse_quote! { &self, #at_param_name: #block_id_type, - _: #crate_::ExecutionContext, - ___params___sp___api___: Option<( #( #param_types ),* )>, - _: Vec, + #( #param_names: #param_types ),* }; - input.sig.ident = - generate_method_runtime_api_impl_name(self.impl_trait, &input.sig.ident); - // When using advanced, the user needs to declare the correct return type on its own, // otherwise do it for the user. if !is_advanced { @@ -292,7 +354,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> + -> std::result::Result<#ret_type, #crate_::ApiError> ); } @@ -304,7 +366,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { quote! { let __fn_implementation__ = move || #orig_block; - Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + Ok(__fn_implementation__()) } }; @@ -314,9 +376,6 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Get the error to the user (if we have one). #( #errors )* - let (#( #param_names ),*) = ___params___sp___api___ - .expect("Mocked runtime apis don't support calling deprecated api versions"); - #construct_return_value } ) @@ -351,11 +410,6 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Result Some(block_type.clone()), }; - let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; - - result.push(visitor.fold_item_impl(impl_.clone())); + result.push(FoldRuntimeApiImpl { block_type }.process(impl_.clone())); } Ok(GeneratedRuntimeApiImpls { diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 97b456b62dfa6..2ccd050cfb151 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -18,16 +18,18 @@ use proc_macro2::{Span, TokenStream}; use syn::{ - parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, - ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, + parse_quote, spanned::Spanned, token::And, Attribute, Error, FnArg, GenericArgument, Ident, + ImplItem, ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; -use quote::quote; +use quote::{format_ident, quote}; use std::env; use proc_macro_crate::{crate_name, FoundCrate}; +use crate::common::API_VERSION_ATTRIBUTE; + fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) } @@ -68,11 +70,6 @@ pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { Ident::new(&format!("runtime_decl_for_{}", trait_), Span::call_site()) } -/// Generates a name for a method that needs to be implemented in the runtime for the client side. -pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> Ident { - Ident::new(&format!("{}_{}_runtime_api_impl", trait_, method), Span::call_site()) -} - /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { @@ -166,16 +163,6 @@ pub fn extract_parameter_names_types_and_borrows( Ok(result) } -/// Generates the name for the native call generator function. -pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_native_call_generator", fn_name), Span::call_site()) -} - -/// Generates the name for the call api at function. -pub fn generate_call_api_at_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_call_api_at", fn_name), Span::call_site()) -} - /// Prefix the given function with the trait name. pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> String { format!("{}_{}", trait_, function.to_string()) @@ -267,3 +254,23 @@ pub fn extract_impl_trait(impl_: &ItemImpl, require: RequireQualifiedTraitPath) } }) } + +/// Parse the given attribute as `API_VERSION_ATTRIBUTE`. +pub fn parse_runtime_api_version(version: &Attribute) -> Result { + let version = version.parse_args::().map_err(|_| { + Error::new( + version.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + ) + })?; + + version.base10_parse() +} + +// Each versioned trait is named 'ApiNameVN' where N is the specific version. E.g. ParachainHostV2 +pub fn versioned_trait_name(trait_ident: &Ident, version: u64) -> Ident { + format_ident!("{}V{}", trait_ident, version) +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 2635c81948ff3..a5230cfc5f116 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -78,12 +78,12 @@ pub use hash_db::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; +#[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::{NativeOrEncoded, NeverNativeValue}; #[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] @@ -187,6 +187,56 @@ pub const MAX_EXTRINSIC_DEPTH: u32 = 256; /// To check if a given runtime implements a runtime api trait, the `RuntimeVersion` has the /// function `has_api()`. Also the `ApiExt` provides a function `has_api(at: &BlockId)` /// to check if the runtime at the given block id implements the requested runtime api trait. +/// +/// # Declaring multiple api versions +/// +/// Optionally multiple versions of the same api can be declared. This is useful for +/// development purposes. For example you want to have a testing version of the api which is +/// available only on a testnet. You can define one stable and one development version. This +/// can be done like this: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// The example above defines two api versions - 2 and 3. Version 2 contains `get_balance` and +/// `set_balance`. Version 3 additionally contains `transfer_balance`, which is not available +/// in version 2. Version 2 in this case is considered the default/base version of the api. +/// More than two versions can be defined this way. For example: +/// ```rust +/// sp_api::decl_runtime_apis! { +/// /// Declare the api trait. +/// #[api_version(2)] +/// pub trait Balance { +/// /// Get the balance. +/// fn get_balance() -> u64; +/// /// Set the balance. +/// fn set_balance(val: u64); +/// /// Transfer the balance to another user id +/// #[api_version(3)] +/// fn transfer_balance(uid: u64); +/// /// Clears the balance +/// #[api_version(4)] +/// fn clear_balance(); +/// } +/// } +/// +/// # fn main() {} +/// ``` +/// Note that the latest version (4 in our example above) always contains all methods from all +/// the versions before. pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. @@ -276,6 +326,22 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// /// # fn main() {} /// ``` +/// +/// # Implementing specific api version +/// +/// If `decl_runtime_apis!` declares multiple versions for an api `impl_runtime_apis!` +/// should specify which version it implements by adding `api_version` attribute to the +/// `impl` block. If omitted - the base/default version is implemented. Here is an example: +/// ```ignore +/// sp_api::impl_runtime_apis! { +/// #[api_version(3)] +/// impl self::Balance for Runtime { +/// // implementation +/// } +/// } +/// ``` +/// In this case `Balance` api version 3 is being implemented for `Runtime`. The `impl` block +/// must contain all methods declared in version 3 and below. pub use sp_api_proc_macro::impl_runtime_apis; /// Mocks given trait implementations as runtime apis. @@ -341,15 +407,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// using the `advanced` attribute, the macro expects that the first parameter of the function /// is this `at` parameter. Besides that the macro also doesn't do the automatic return value /// rewrite, which means that full return value must be specified. The full return value is -/// constructed like [`Result`]`<`[`NativeOrEncoded`](sp_api::NativeOrEncoded)`, -/// Error>` while `ReturnValue` being the return value that is specified in the trait -/// declaration. +/// constructed like [`Result`]`<, Error>` while `ReturnValue` being the return +/// value that is specified in the trait declaration. /// /// ## Example /// ```rust /// # use sp_runtime::{traits::Block as BlockT, generic::BlockId}; /// # use sp_test_primitives::Block; -/// # use sp_core::NativeOrEncoded; /// # use codec; /// # /// # sp_api::decl_runtime_apis! { @@ -368,13 +432,13 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { +/// fn get_balance(&self, at: &BlockId) -> Result { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { +/// fn set_balance(at: &BlockId, val: u64) -> Result<(), sp_api::ApiError> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 1db416a1d3db6..42628830cc7fa 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -18,7 +18,6 @@ use sp_api::{ decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; -use sp_core::NativeOrEncoded; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, GetNodeBlockType}, @@ -47,6 +46,15 @@ decl_runtime_apis! { #[changed_in(2)] fn same_name() -> String; } + + #[api_version(2)] + pub trait ApiWithMultipleVersions { + fn stable_one(data: u64); + #[api_version(3)] + fn new_one(); + #[api_version(4)] + fn glory_one(); + } } impl_runtime_apis! { @@ -72,6 +80,13 @@ impl_runtime_apis! { fn same_name() {} } + #[api_version(3)] + impl self::ApiWithMultipleVersions for Runtime { + fn stable_one(_: u64) {} + + fn new_one() {} + } + impl sp_api::Core for Runtime { fn version() -> sp_version::RuntimeVersion { unimplemented!() @@ -104,22 +119,12 @@ mock_impl_runtime_apis! { } #[advanced] - fn same_name(_: &BlockId) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn same_name(_: &BlockId) -> Result<(), ApiError> { Ok(().into()) } #[advanced] - fn wild_card(at: &BlockId, _: u32) -> - Result< - NativeOrEncoded<()>, - ApiError - > - { + fn wild_card(at: &BlockId, _: u32) -> Result<(), ApiError> { if let BlockId::Number(1337) = at { // yeah Ok(().into()) @@ -176,6 +181,9 @@ fn check_runtime_api_info() { &runtime_decl_for_ApiWithCustomVersion::ID, ); assert_eq!(>::VERSION, 2); + + // The stable version of the API + assert_eq!(>::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -186,6 +194,9 @@ fn check_runtime_api_versions_contains() { fn check_runtime_api_versions() { check_runtime_api_versions_contains::>(); check_runtime_api_versions_contains::>(); + assert!(RUNTIME_API_VERSIONS + .iter() + .any(|v| v == &(>::ID, 3))); check_runtime_api_versions_contains::>(); } @@ -198,7 +209,7 @@ fn mock_runtime_api_has_api() { } #[test] -#[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] +#[should_panic(expected = "Calling deprecated methods is not supported by mocked runtime api.")] fn mock_runtime_api_panics_on_calling_old_version() { let mock = MockApi { block: None }; diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index ba42b342377c7..968bda6885d0d 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -25,7 +25,7 @@ use sp_state_machine::{ }; use substrate_test_runtime_client::{ prelude::*, - runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, + runtime::{Block, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, }; @@ -51,28 +51,6 @@ fn calling_wasm_runtime_function() { calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } -#[test] -#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] -fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_parameter(&block_id, DecodeFails::default()).unwrap(); -} - -#[test] -#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] -fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_return_value(&block_id).unwrap(); -} - #[test] fn calling_native_runtime_signature_changed_function() { let client = TestClientBuilder::new() diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index f3d6aa59a0336..13af1ded7dc6b 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -30,4 +30,5 @@ fn ui() { let t = trybuild::TestCases::new(); t.compile_fail("tests/ui/*.rs"); + t.pass("tests/ui/positive_cases/*.rs"); } diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index b1478e2f53344..2c47c2f480add 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(std::string::String)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found struct `std::string::String` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/impl_incorrect_method_signature.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/impl_incorrect_method_signature.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: String) {} -20 | | } -... | -32 | | } -33 | | } - | |_^ expected `u64`, found struct `std::string::String` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 | diff --git a/primitives/api/test/tests/ui/impl_missing_version.rs b/primitives/api/test/tests/ui/impl_missing_version.rs new file mode 100644 index 0000000000000..63e0599622ac9 --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.rs @@ -0,0 +1,40 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test3() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/impl_missing_version.stderr b/primitives/api/test/tests/ui/impl_missing_version.stderr new file mode 100644 index 0000000000000..c0abeffe0cccf --- /dev/null +++ b/primitives/api/test/tests/ui/impl_missing_version.stderr @@ -0,0 +1,14 @@ +error[E0433]: failed to resolve: could not find `ApiV4` in `runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +21 | impl self::Api for Runtime { + | ^^^ could not find `ApiV4` in `runtime_decl_for_Api` + +error[E0405]: cannot find trait `ApiV4` in module `self::runtime_decl_for_Api` + --> tests/ui/impl_missing_version.rs:21:13 + | +11 | pub trait Api { + | ------------- similarly named trait `ApiV2` defined here +... +21 | impl self::Api for Runtime { + | ^^^ help: a trait with a similar name exists: `ApiV2` diff --git a/primitives/api/test/tests/ui/invalid_api_version.rs b/primitives/api/test/tests/ui/invalid_api_version_1.rs similarity index 100% rename from primitives/api/test/tests/ui/invalid_api_version.rs rename to primitives/api/test/tests/ui/invalid_api_version_1.rs diff --git a/primitives/api/test/tests/ui/invalid_api_version.stderr b/primitives/api/test/tests/ui/invalid_api_version_1.stderr similarity index 65% rename from primitives/api/test/tests/ui/invalid_api_version.stderr rename to primitives/api/test/tests/ui/invalid_api_version_1.stderr index 7770bc70e72d6..53ffce959bb66 100644 --- a/primitives/api/test/tests/ui/invalid_api_version.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_1.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version.rs:2:4 + --> tests/ui/invalid_api_version_1.rs:2:2 | 2 | #[api_version] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_2.stderr b/primitives/api/test/tests/ui/invalid_api_version_2.stderr index 7ca6a7eebe49c..0c5274d4680ff 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_2.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_2.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_2.rs:2:4 + --> tests/ui/invalid_api_version_2.rs:2:2 | 2 | #[api_version("1")] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_3.stderr b/primitives/api/test/tests/ui/invalid_api_version_3.stderr index cef4763a6de96..4a34a7aa9b47a 100644 --- a/primitives/api/test/tests/ui/invalid_api_version_3.stderr +++ b/primitives/api/test/tests/ui/invalid_api_version_3.stderr @@ -1,5 +1,5 @@ error: Unexpected `api_version` attribute. The supported format is `api_version(1)` - --> $DIR/invalid_api_version_3.rs:2:4 + --> tests/ui/invalid_api_version_3.rs:2:2 | 2 | #[api_version()] - | ^^^^^^^^^^^ + | ^ diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.rs b/primitives/api/test/tests/ui/invalid_api_version_4.rs new file mode 100644 index 0000000000000..37b5b6ffa25d1 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.rs @@ -0,0 +1,8 @@ +sp_api::decl_runtime_apis! { + pub trait Api { + #[api_version("1")] + fn test(data: u64); + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/invalid_api_version_4.stderr b/primitives/api/test/tests/ui/invalid_api_version_4.stderr new file mode 100644 index 0000000000000..57541a97f6c91 --- /dev/null +++ b/primitives/api/test/tests/ui/invalid_api_version_4.stderr @@ -0,0 +1,5 @@ +error: Unexpected `api_version` attribute. The supported format is `api_version(1)` + --> tests/ui/invalid_api_version_4.rs:3:3 + | +3 | #[api_version("1")] + | ^ diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs new file mode 100644 index 0000000000000..b4f43cd401bba --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.rs @@ -0,0 +1,9 @@ +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + #[api_version(1)] + fn test(data: u64); + } +} + +fn main() {} \ No newline at end of file diff --git a/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr new file mode 100644 index 0000000000000..ec4b594023a05 --- /dev/null +++ b/primitives/api/test/tests/ui/method_ver_lower_than_trait_ver.stderr @@ -0,0 +1,11 @@ +error: Method version `1` is older than (or equal to) trait version `2`.Methods can't define versions older than the trait version. + --> tests/ui/method_ver_lower_than_trait_ver.rs:4:3 + | +4 | #[api_version(1)] + | ^ + +error: Trait version is set here. + --> tests/ui/method_ver_lower_than_trait_ver.rs:2:2 + | +2 | #[api_version(2)] + | ^ diff --git a/primitives/api/test/tests/ui/missing_versioned_method.rs b/primitives/api/test/tests/ui/missing_versioned_method.rs new file mode 100644 index 0000000000000..d973a94c2101d --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.rs @@ -0,0 +1,39 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(3)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method.stderr b/primitives/api/test/tests/ui/missing_versioned_method.stderr new file mode 100644 index 0000000000000..e3ace7979c27e --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method.rs:21:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +21 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs new file mode 100644 index 0000000000000..72358b99164d5 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.rs @@ -0,0 +1,42 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(4)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + fn test4() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr new file mode 100644 index 0000000000000..7354fbd537fd7 --- /dev/null +++ b/primitives/api/test/tests/ui/missing_versioned_method_multiple_vers.stderr @@ -0,0 +1,8 @@ +error[E0046]: not all trait items implemented, missing: `test3` + --> tests/ui/missing_versioned_method_multiple_vers.rs:23:2 + | +15 | fn test3(); + | ----------- `test3` from trait +... +23 | impl self::Api for Runtime { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `test3` in implementation diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs index fd654ffdc63d6..aeef40f4c77d6 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self, _: BlockId) -> Result, ApiError> { + fn test(&self, _: BlockId) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index befe67c1d0b4a..3b3c4e94c3121 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -1,10 +1,10 @@ error: `BlockId` needs to be taken by reference and not by value! - --> $DIR/mock_advanced_block_id_by_value.rs:12:1 + --> tests/ui/mock_advanced_block_id_by_value.rs:12:1 | 12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | #[advanced] -15 | | fn test(&self, _: BlockId) -> Result, ApiError> { +15 | | fn test(&self, _: BlockId) -> Result<(), ApiError> { ... | 18 | | } 19 | | } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs index a15ef133fa6c4..76bf5f1aa7459 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.rs @@ -12,7 +12,7 @@ struct MockApi; sp_api::mock_impl_runtime_apis! { impl Api for MockApi { #[advanced] - fn test(&self) -> Result, ApiError> { + fn test(&self) -> Result<(), ApiError> { Ok(().into()) } } diff --git a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr index 87d3660316b1e..b9ce7324b5caa 100644 --- a/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_missing_blockid.stderr @@ -1,5 +1,5 @@ error: If using the `advanced` attribute, it is required that the function takes at least one argument, the `BlockId`. - --> $DIR/mock_advanced_missing_blockid.rs:15:3 + --> tests/ui/mock_advanced_missing_blockid.rs:15:3 | -15 | fn test(&self) -> Result, ApiError> { +15 | fn test(&self) -> Result<(), ApiError> { | ^^ diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index c67de70b9c140..430f63eee1660 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -10,62 +10,80 @@ error: Only `&self` is supported! 16 | fn test2(&mut self, data: u64) {} | ^ -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test` has 2 parameters but the declaration in trait `Api::test` has 3 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 3 parameters, found 2 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2` has 2 parameters but the declaration in trait `Api::test2` has 3 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 3 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 3 parameters, found 2 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait +error[E0050]: method `test_with_context` has 3 parameters but the declaration in trait `Api::test_with_context` has 4 --> tests/ui/mock_only_self_reference.rs:12:1 | -12 | sp_api::mock_impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `()` - | | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); + | |_________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { 13 | | impl Api for MockApi { 14 | | fn test(self, data: u64) {} 15 | | 16 | | fn test2(&mut self, data: u64) {} 17 | | } 18 | | } - | |_- help: change the parameter type to match the trait: `Option` + | |_^ expected 4 parameters, found 3 | -note: type in trait - --> tests/ui/mock_only_self_reference.rs:3:1 + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) + +error[E0050]: method `test2_with_context` has 3 parameters but the declaration in trait `Api::test2_with_context` has 4 + --> tests/ui/mock_only_self_reference.rs:12:1 | 3 | / sp_api::decl_runtime_apis! { 4 | | pub trait Api { 5 | | fn test(data: u64); 6 | | fn test2(data: u64); -7 | | } -8 | | } - | |_^ - = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` + | |__________________________- trait requires 4 parameters +... +12 | / sp_api::mock_impl_runtime_apis! { +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_^ expected 4 parameters, found 3 + | = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/positive_cases/default_impls.rs b/primitives/api/test/tests/ui/positive_cases/default_impls.rs new file mode 100644 index 0000000000000..3434db1089f05 --- /dev/null +++ b/primitives/api/test/tests/ui/positive_cases/default_impls.rs @@ -0,0 +1,41 @@ +use sp_runtime::traits::{Block as BlockT, GetNodeBlockType}; +use substrate_test_runtime_client::runtime::Block; + +struct Runtime {} +impl GetNodeBlockType for Runtime { + type NodeBlock = Block; +} + +sp_api::decl_runtime_apis! { + #[api_version(2)] + pub trait Api { + fn test1(); + fn test2(); + #[api_version(3)] + fn test3(); + #[api_version(4)] + fn test4(); + } +} + +sp_api::impl_runtime_apis! { + #[api_version(2)] + impl self::Api for Runtime { + fn test1() {} + fn test2() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index dbc0f6def3aa5..479e1cf05a9d1 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -15,49 +15,6 @@ note: type in trait = note: expected fn pointer `fn(u64)` found fn pointer `fn(&u64)` -error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | sp_api::impl_runtime_apis! { - | -^^^^^^^^^^^^^^^^^^^^^^^^^ - | | - | _expected `u64`, found `&u64` - | | -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_- help: change the parameter type to match the trait: `std::option::Option` - | -note: type in trait - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:11:1 - | -11 | / sp_api::decl_runtime_apis! { -12 | | pub trait Api { -13 | | fn test(data: u64); -14 | | } -15 | | } - | |_^ - = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0308]: mismatched types - --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:17:1 - | -17 | / sp_api::impl_runtime_apis! { -18 | | impl self::Api for Runtime { -19 | | fn test(data: &u64) { -20 | | unimplemented!() -... | -34 | | } -35 | | } - | |_^ expected `u64`, found `&u64` - | - = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) - error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 | From d558a21dda1a6f9df919c442527b84975bcfee47 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 13 Aug 2022 13:05:39 +0100 Subject: [PATCH 032/348] Fix Gov V2 Benchmarks (#12022) * better benchmark log * Update lib.rs * fix gov2 benchmarks * saturating math + fmt * add comment --- frame/referenda/src/benchmarking.rs | 12 ++++++++++-- frame/referenda/src/lib.rs | 6 ++++-- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 9abd3768f780c..dfca7cdf465dd 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -101,8 +101,16 @@ fn info(index: ReferendumIndex) -> &'static TrackInfoOf { } fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { - let support = info::(index).min_support.threshold(period_portion); - let approval = info::(index).min_approval.threshold(period_portion); + // We add an extra 1 percent to handle any perbill rounding errors which may cause + // a proposal to not actually pass. + let support = info::(index) + .min_support + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); + let approval = info::(index) + .min_approval + .threshold(period_portion) + .saturating_add(Perbill::from_percent(1)); Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::from_requirements(support, approval, class); diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 15c5562d64c84..e9e14e1d4a96a 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -699,7 +699,8 @@ impl, I: 'static> Pallet { when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); - let when = (when + alarm_interval - One::one()) / alarm_interval * alarm_interval; + let when = when.saturating_add(alarm_interval).saturating_sub(One::one()) / + (alarm_interval.saturating_mul(alarm_interval)).max(One::one()); let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, @@ -752,7 +753,8 @@ impl, I: 'static> Pallet { None }; let deciding_status = DecidingStatus { since: now, confirming }; - let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track); + let alarm = Self::decision_time(&deciding_status, &status.tally, status.track, track) + .max(now.saturating_add(One::one())); status.deciding = Some(deciding_status); let branch = if is_passing { BeginDecidingBranch::Passing } else { BeginDecidingBranch::Failing }; From 055453ebdf00db9e59ef5af0c59c1b4e39267813 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sat, 13 Aug 2022 16:07:10 +0100 Subject: [PATCH 033/348] Add Benchmarking Instance to Pallets (#12026) * benchmark instance * add benchmark instance to conviction voting * instance on bags list * fmt --- frame/bags-list/src/benchmarks.rs | 6 +- frame/conviction-voting/src/benchmarking.rs | 117 +++--- frame/referenda/src/benchmarking.rs | 408 ++++++++++---------- 3 files changed, 266 insertions(+), 265 deletions(-) diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index dba0c9ee1e623..bb45da076f751 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -25,7 +25,7 @@ use frame_support::{assert_ok, traits::Get}; use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; -frame_benchmarking::benchmarks! { +frame_benchmarking::benchmarks_instance_pallet! { rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // @@ -97,7 +97,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); // define our origin and destination thresholds. let origin_bag_thresh = T::BagThresholds::get()[0]; @@ -146,7 +146,7 @@ frame_benchmarking::benchmarks! { // clear any pre-existing storage. // NOTE: safe to call outside block production - List::::unsafe_clear(); + List::::unsafe_clear(); let bag_thresh = T::BagThresholds::get()[0]; diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index 53ac7a07302f9..d472770832497 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ dispatch::RawOrigin, traits::{fungible, Currency, Get}, @@ -34,8 +34,9 @@ const SEED: u32 = 0; /// Fill all classes as much as possible up to `MaxVotes` and return the Class with the most votes /// ongoing. -fn fill_voting() -> (ClassOf, BTreeMap, Vec>>) { - let mut r = BTreeMap::, Vec>>::new(); +fn fill_voting, I: 'static>( +) -> (ClassOf, BTreeMap, Vec>>) { + let mut r = BTreeMap::, Vec>>::new(); for class in T::Polls::classes().into_iter() { for _ in 0..T::MaxVotes::get() { match T::Polls::create_ongoing(class.clone()) { @@ -48,34 +49,34 @@ fn fill_voting() -> (ClassOf, BTreeMap, Vec> (c, r) } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn account_vote(b: BalanceOf) -> AccountVote> { +fn account_vote, I: 'static>(b: BalanceOf) -> AccountVote> { let v = Vote { aye: true, conviction: Conviction::Locked1x }; AccountVote::Standard { vote: v, balance: b } } -benchmarks! { +benchmarks_instance_pallet! { where_clause { where T::MaxVotes: core::fmt::Debug } vote_new { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let account_vote = account_vote::(100u32.into()); + let account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len() - 1; // We need to create existing votes for i in polls.iter().skip(1) { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -85,52 +86,52 @@ benchmarks! { }: vote(RawOrigin::Signed(caller.clone()), index, account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r + 1) as usize ); } vote_existing { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; assert_eq!(votes.len(), r, "Votes were not recorded."); - let new_account_vote = account_vote::(200u32.into()); + let new_account_vote = account_vote::(200u32.into()); let index = polls[0]; }: vote(RawOrigin::Signed(caller.clone()), index, new_account_vote) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); } remove_vote { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -140,25 +141,25 @@ benchmarks! { }: _(RawOrigin::Signed(caller.clone()), Some(class.clone()), index) verify { assert_matches!( - VotingFor::::get(&caller, &class), + VotingFor::::get(&caller, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } remove_other_vote { - let caller = funded_account::("caller", 0); - let voter = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); + let voter = funded_account::("caller", 0); whitelist_account!(caller); - let old_account_vote = account_vote::(100u32.into()); + let old_account_vote = account_vote::(100u32.into()); - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); let polls = &all_polls[&class]; let r = polls.len(); // We need to create existing votes for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, old_account_vote)?; } - let votes = match VotingFor::::get(&caller, &class) { + let votes = match VotingFor::::get(&caller, &class) { Voting::Casting(Casting { votes, .. }) => votes, _ => return Err("Votes are not direct".into()), }; @@ -169,7 +170,7 @@ benchmarks! { }: _(RawOrigin::Signed(caller.clone()), voter.clone(), class.clone(), index) verify { assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == (r - 1) as usize ); } @@ -177,44 +178,44 @@ benchmarks! { delegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); // We need to create existing delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter, Conviction::Locked1x, delegated_balance) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); } undelegate { let r in 0 .. T::MaxVotes::get().min(T::Polls::max_ongoing().1); - let all_polls = fill_voting::().1; + let all_polls = fill_voting::().1; let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; - let voter = funded_account::("voter", 0); - let caller = funded_account::("caller", 0); + let voter = funded_account::("voter", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let delegated_balance: BalanceOf = 1000u32.into(); - let delegate_vote = account_vote::(delegated_balance); + let delegated_balance: BalanceOf = 1000u32.into(); + let delegate_vote = account_vote::(delegated_balance); - ConvictionVoting::::delegate( + ConvictionVoting::::delegate( RawOrigin::Signed(caller.clone()).into(), class.clone(), voter.clone(), @@ -224,31 +225,31 @@ benchmarks! { // We need to create delegations for i in polls.iter().take(r as usize) { - ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(voter.clone()).into(), *i, delegate_vote)?; } assert_matches!( - VotingFor::::get(&voter, &class), + VotingFor::::get(&voter, &class), Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); }: _(RawOrigin::Signed(caller.clone()), class.clone()) verify { - assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); + assert_matches!(VotingFor::::get(&caller, &class), Voting::Casting(_)); } unlock { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); - let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); - let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); + let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); + let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); // Fill everything up to the max by filling all classes with votes and voting on them all. - let (class, all_polls) = fill_voting::(); + let (class, all_polls) = fill_voting::(); assert!(all_polls.len() > 0); for (class, polls) in all_polls.iter() { assert!(polls.len() > 0); for i in polls.iter() { - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), *i, normal_account_vote)?; } } @@ -257,12 +258,12 @@ benchmarks! { // Vote big on the class with the most ongoing votes of them to bump the lock and make it // hard to recompute when removed. - ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; + ConvictionVoting::::vote(RawOrigin::Signed(caller.clone()).into(), polls[0], big_account_vote)?; let now_usable = >::reducible_balance(&caller, false); assert_eq!(orig_usable - now_usable, 100u32.into()); // Remove the vote - ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; + ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; // We can now unlock on `class` from 200 to 100... }: _(RawOrigin::Signed(caller.clone()), class, caller.clone()) diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index dfca7cdf465dd..b9c64a61f510d 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use crate::Pallet as Referenda; use assert_matches::assert_matches; -use frame_benchmarking::{account, benchmarks, whitelist_account}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ assert_ok, traits::{Currency, EnsureOrigin}, @@ -31,157 +31,157 @@ use sp_runtime::traits::{Bounded, Hash}; const SEED: u32 = 0; #[allow(dead_code)] -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn funded_account(name: &'static str, index: u32) -> T::AccountId { +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); caller } -fn create_referendum() -> (T::AccountId, ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn create_referendum, I: 'static>() -> (T::AccountId, ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::submit( + assert_ok!(Referenda::::submit( RawOrigin::Signed(caller.clone()).into(), Box::new(RawOrigin::Root.into()), T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) )); - let index = ReferendumCount::::get() - 1; + let index = ReferendumCount::::get() - 1; (caller, index) } -fn place_deposit(index: ReferendumIndex) { - let caller = funded_account::("caller", 0); +fn place_deposit, I: 'static>(index: ReferendumIndex) { + let caller = funded_account::("caller", 0); whitelist_account!(caller); - assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); + assert_ok!(Referenda::::place_decision_deposit(RawOrigin::Signed(caller).into(), index)); } -fn nudge(index: ReferendumIndex) { - assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); +fn nudge, I: 'static>(index: ReferendumIndex) { + assert_ok!(Referenda::::nudge_referendum(RawOrigin::Root.into(), index)); } -fn fill_queue( +fn fill_queue, I: 'static>( index: ReferendumIndex, spaces: u32, pass_after: u32, ) -> Vec { // First, create enough other referendums to fill the track. let mut others = vec![]; - for _ in 0..info::(index).max_deciding { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + for _ in 0..info::(index).max_deciding { + let (_caller, index) = create_referendum::(); + place_deposit::(index); others.push(index); } // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing_after::(index, Perbill::from_percent(pass_after)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); } // Skip to when they can start being decided. - skip_prepare_period::(index); + skip_prepare_period::(index); // Manually nudge the other referenda first to ensure that they begin. - others.iter().for_each(|&i| nudge::(i)); + others.iter().for_each(|&i| nudge::(i)); others } -fn info(index: ReferendumIndex) -> &'static TrackInfoOf { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn info, I: 'static>(index: ReferendumIndex) -> &'static TrackInfoOf { + let status = Referenda::::ensure_ongoing(index).unwrap(); T::Tracks::info(status.track).expect("Id value returned from T::Tracks") } -fn make_passing_after(index: ReferendumIndex, period_portion: Perbill) { +fn make_passing_after, I: 'static>(index: ReferendumIndex, period_portion: Perbill) { // We add an extra 1 percent to handle any perbill rounding errors which may cause // a proposal to not actually pass. - let support = info::(index) + let support = info::(index) .min_support .threshold(period_portion) .saturating_add(Perbill::from_percent(1)); - let approval = info::(index) + let approval = info::(index) .min_approval .threshold(period_portion) .saturating_add(Perbill::from_percent(1)); - Referenda::::access_poll(index, |status| { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::from_requirements(support, approval, class); } }); } -fn make_passing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_passing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::unanimity(class); } }); } -fn make_failing(index: ReferendumIndex) { - Referenda::::access_poll(index, |status| { +fn make_failing, I: 'static>(index: ReferendumIndex) { + Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { *tally = T::Tally::rejection(class); } }); } -fn skip_prepare_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let prepare_period_over = status.submitted + info::(index).prepare_period; +fn skip_prepare_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let prepare_period_over = status.submitted + info::(index).prepare_period; frame_system::Pallet::::set_block_number(prepare_period_over); } -fn skip_decision_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); - let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; +fn skip_decision_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); + let decision_period_over = status.deciding.unwrap().since + info::(index).decision_period; frame_system::Pallet::::set_block_number(decision_period_over); } -fn skip_confirm_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_confirm_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let confirm_period_over = status.deciding.unwrap().confirming.unwrap(); frame_system::Pallet::::set_block_number(confirm_period_over); } -fn skip_timeout_period(index: ReferendumIndex) { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn skip_timeout_period, I: 'static>(index: ReferendumIndex) { + let status = Referenda::::ensure_ongoing(index).unwrap(); let timeout_period_over = status.submitted + T::UndecidingTimeout::get(); frame_system::Pallet::::set_block_number(timeout_period_over); } -fn alarm_time(index: ReferendumIndex) -> T::BlockNumber { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn alarm_time, I: 'static>(index: ReferendumIndex) -> T::BlockNumber { + let status = Referenda::::ensure_ongoing(index).unwrap(); status.alarm.unwrap().0 } -fn is_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: Some(_), .. }), .. } ) } -fn is_not_confirming(index: ReferendumIndex) -> bool { - let status = Referenda::::ensure_ongoing(index).unwrap(); +fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { + let status = Referenda::::ensure_ongoing(index).unwrap(); matches!( status, ReferendumStatus { deciding: Some(DecidingStatus { confirming: None, .. }), .. } ) } -benchmarks! { +benchmarks_instance_pallet! { submit { - let caller = funded_account::("caller", 0); + let caller = funded_account::("caller", 0); whitelist_account!(caller); }: _( RawOrigin::Signed(caller), @@ -189,105 +189,105 @@ benchmarks! { T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) ) verify { - let index = ReferendumCount::::get().checked_sub(1).unwrap(); - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); + let index = ReferendumCount::::get().checked_sub(1).unwrap(); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Ongoing(_))); } place_decision_deposit_preparing { - let (caller, index) = create_referendum::(); + let (caller, index) = create_referendum::(); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); + assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 1, 90); + let (caller, index) = create_referendum::(); + fill_queue::(index, 1, 90); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } place_decision_deposit_not_queued { - let (caller, index) = create_referendum::(); - fill_queue::(index, 0, 90); + let (caller, index) = create_referendum::(); + fill_queue::(index, 0, 90); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); - make_passing::(index); + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); + make_passing::(index); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (caller, index) = create_referendum::(); - skip_prepare_period::(index); + let (caller, index) = create_referendum::(); + skip_prepare_period::(index); }: place_decision_deposit(RawOrigin::Signed(caller), index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (caller, index) = create_referendum::(); - place_deposit::(index); - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + let (caller, index) = create_referendum::(); + place_deposit::(index); + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); }: _(RawOrigin::Signed(caller), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: _(T::CancelOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); } kill { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: _(T::KillOrigin::successful_origin(), index) verify { - assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); + assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); } one_fewer_deciding_queue_empty { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - assert_eq!(DecidingCount::::get(&track), 1); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); + assert_eq!(DecidingCount::::get(&track), 1); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), 0); + assert_eq!(DecidingCount::::get(&track), 0); } one_fewer_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 90); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 90); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_none()) @@ -295,18 +295,18 @@ benchmarks! { } one_fewer_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_caller, index) = create_referendum::(); // No spaces free in the queue. - let queued = fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - let deciding_count = DecidingCount::::get(&track); + let queued = fill_queue::(index, 0, 0); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), queued[0])); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + let deciding_count = DecidingCount::::get(&track); }: one_fewer_deciding(RawOrigin::Root, track) verify { - assert_eq!(DecidingCount::::get(&track), deciding_count); - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert_eq!(DecidingCount::::get(&track), deciding_count); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) .unwrap() .deciding .map_or(true, |d| d.confirming.is_some()) @@ -315,43 +315,43 @@ benchmarks! { nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 90); // Now nudge ours, with the track now full and the queue full of referenda with votes, // ours will not be in the queue. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); // Now alter the voting, so that ours goes into pole-position and shifts others down. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 90); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 90); // Now nudge ours, with the track now full, ours will be queued, but with no votes, it // will have the worst position. - nudge::(index); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + nudge::(index); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); // Now alter the voting, so that ours leap-frogs all into the best position. - make_passing::(index); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let t = TrackQueue::::get(&track); + let t = TrackQueue::::get(&track); assert_eq!(t.len() as u32, T::MaxQueued::get()); assert_eq!(t[t.len() - 1].0, index); } @@ -362,159 +362,159 @@ benchmarks! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 1, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 1, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); } nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); - place_deposit::(index); - fill_queue::(index, 0, 0); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + fill_queue::(index, 0, 0); - let track = Referenda::::ensure_ongoing(index).unwrap().track; - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); + let track = Referenda::::ensure_ongoing(index).unwrap().track; + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(_, v)| v > 0u32.into())); // Then nudge ours, with the track now full, ours will be queued. }: nudge_referendum(RawOrigin::Root, index) verify { - assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } nudge_referendum_no_deposit { - let (_caller, index) = create_referendum::(); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_preparing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let status = Referenda::::ensure_ongoing(index).unwrap(); + let status = Referenda::::ensure_ongoing(index).unwrap(); assert_matches!(status, ReferendumStatus { deciding: None, .. }); } nudge_referendum_timed_out { - let (_caller, index) = create_referendum::(); - skip_timeout_period::(index); + let (_caller, index) = create_referendum::(); + skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::TimedOut(..)); } nudge_referendum_begin_deciding_failing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_not_confirming::(index)); + assert!(is_not_confirming::(index)); } nudge_referendum_begin_deciding_passing { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_begin_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - make_passing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + make_passing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_end_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - assert!(is_confirming::(index)); - make_failing::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + assert!(is_confirming::(index)); + make_failing::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(!is_confirming::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_not_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(!is_confirming::(index)); - let old_alarm = alarm_time::(index); - make_passing_after::(index, Perbill::from_percent(50)); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(!is_confirming::(index)); + let old_alarm = alarm_time::(index); + make_passing_after::(index, Perbill::from_percent(50)); }: nudge_referendum(RawOrigin::Root, index) verify { - assert_ne!(old_alarm, alarm_time::(index)); - assert!(!is_confirming::(index)); + assert_ne!(old_alarm, alarm_time::(index)); + assert!(!is_confirming::(index)); } nudge_referendum_continue_confirming { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - make_passing::(index); - skip_prepare_period::(index); - nudge::(index); - assert!(is_confirming::(index)); - let old_alarm = alarm_time::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + make_passing::(index); + skip_prepare_period::(index); + nudge::(index); + assert!(is_confirming::(index)); + let old_alarm = alarm_time::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - assert!(is_confirming::(index)); + assert!(is_confirming::(index)); } nudge_referendum_approved { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_passing::(index); - nudge::(index); - skip_confirm_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_passing::(index); + nudge::(index); + skip_confirm_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Approved(..)); } nudge_referendum_rejected { - let (_caller, index) = create_referendum::(); - place_deposit::(index); - skip_prepare_period::(index); - make_failing::(index); - nudge::(index); - skip_decision_period::(index); + let (_caller, index) = create_referendum::(); + place_deposit::(index); + skip_prepare_period::(index); + make_failing::(index); + nudge::(index); + skip_decision_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { - let info = ReferendumInfoFor::::get(index).unwrap(); + let info = ReferendumInfoFor::::get(index).unwrap(); assert_matches!(info, ReferendumInfo::Rejected(..)); } From 34a0621761c4a333cb2074ff720f7acbfb92dbb8 Mon Sep 17 00:00:00 2001 From: Sudip Ghimire <39845901+sudipghimire533@users.noreply.github.com> Date: Sun, 14 Aug 2022 23:50:06 +0545 Subject: [PATCH 034/348] make MAX_VOTERS and MAX_CANDIDATES in elections-phragmen configurable. Fix: #11092 (#11908) * make MAX_VOTERS and MAX_CANDIDATES in elections-phragmen configurable * Configure election-phragmen in node bin configuring max candidates & voters * Add document comment for added Config parameter * Incorporate suggestion * fix benchmarks * Update frame/elections-phragmen/src/lib.rs * Update frame/elections-phragmen/src/lib.rs * fix wrong values * fix typo * docs * more detailed docs * fmt * ".git/.scripts/bench-bot.sh" pallet dev pallet_elections_phragmen Co-authored-by: Szegoo Co-authored-by: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 4 + frame/elections-phragmen/src/benchmarking.rs | 26 ++--- frame/elections-phragmen/src/lib.rs | 40 +++++--- frame/elections-phragmen/src/weights.rs | 102 +++++++++---------- 4 files changed, 94 insertions(+), 78 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6bfbe3413058a..ff793a49b5ce6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -966,6 +966,8 @@ parameter_types! { pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; + pub const MaxVoters: u32 = 10 * 1000; + pub const MaxCandidates: u32 = 1000; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -989,6 +991,8 @@ impl pallet_elections_phragmen::Config for Runtime { type DesiredMembers = DesiredMembers; type DesiredRunnersUp = DesiredRunnersUp; type TermDuration = TermDuration; + type MaxVoters = MaxVoters; + type MaxCandidates = MaxCandidates; type WeightInfo = pallet_elections_phragmen::weights::SubstrateWeight; } diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 9a9d448449eca..2d1b698940a9d 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -36,7 +36,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); // Fund each account with at-least his stake but still a sane amount as to not mess up // the vote calculation. - let amount = default_stake::(MAX_VOTERS) * BalanceOf::::from(BALANCE_FACTOR); + let amount = default_stake::(T::MaxVoters::get()) * BalanceOf::::from(BALANCE_FACTOR); let _ = T::Currency::make_free_balance_be(&account, amount); // important to increase the total issuance since T::CurrencyToVote will need it to be sane for // phragmen to work. @@ -230,7 +230,7 @@ benchmarks! { submit_candidacy { // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -261,7 +261,7 @@ benchmarks! { // this will check members, runners-up and candidate for removal. Members and runners-up are // limited by the runtime bound, nonetheless we fill them by `m`. // number of already existing candidates. - let c in 1 .. MAX_CANDIDATES; + let c in 1 .. T::MaxCandidates::get(); // we fix the number of members to the number of desired members and runners-up. We'll be in // this state almost always. let m = T::DesiredMembers::get() + T::DesiredRunnersUp::get(); @@ -362,14 +362,14 @@ benchmarks! { clean_defunct_voters { // total number of voters. - let v in (MAX_VOTERS / 2) .. MAX_VOTERS; + let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); // those that are defunct and need removal. - let d in 1 .. (MAX_VOTERS / 2); + let d in 1 .. (T::MaxVoters::get() / 2); // remove any previous stuff. clean::(); - let all_candidates = submit_candidates::(MAX_CANDIDATES, "candidates")?; + let all_candidates = submit_candidates::(T::MaxCandidates::get(), "candidates")?; distribute_voters::(all_candidates, v, MAXIMUM_VOTE)?; // all candidates leave. @@ -389,9 +389,9 @@ benchmarks! { // members, this is hard-coded in the runtime and cannot be trivially changed at this stage. // Yet, change the number of voters, candidates and edge per voter to see the impact. Note // that we give all candidates a self vote to make sure they are all considered. - let c in 1 .. MAX_CANDIDATES; - let v in 1 .. MAX_VOTERS; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; + let c in 1 .. T::MaxCandidates::get(); + let v in 1 .. T::MaxVoters::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() as u32 * MAXIMUM_VOTE as u32; clean::(); // so we have a situation with v and e. we want e to basically always be in the range of `e @@ -425,9 +425,9 @@ benchmarks! { #[extra] election_phragmen_c_e { - let c in 1 .. MAX_CANDIDATES; - let e in MAX_VOTERS .. MAX_VOTERS * MAXIMUM_VOTE as u32; - let fixed_v = MAX_VOTERS; + let c in 1 .. T::MaxCandidates::get(); + let e in (T::MaxVoters::get()) .. T::MaxVoters::get() * MAXIMUM_VOTE as u32; + let fixed_v = T::MaxVoters::get(); clean::(); let votes_per_voter = e / fixed_v; @@ -459,7 +459,7 @@ benchmarks! { #[extra] election_phragmen_v { let v in 4 .. 16; - let fixed_c = MAX_CANDIDATES / 10; + let fixed_c = T::MaxCandidates::get() / 10; let fixed_e = 64; clean::(); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 28fed28f18e5c..ffd1c6ab4de9c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -125,17 +125,6 @@ pub mod migrations; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; -// Some safe temp values to make the wasm execution sane while we still use this pallet. -#[cfg(test)] -pub(crate) const MAX_CANDIDATES: u32 = 100; -#[cfg(not(test))] -pub(crate) const MAX_CANDIDATES: u32 = 1000; - -#[cfg(test)] -pub(crate) const MAX_VOTERS: u32 = 1000; -#[cfg(not(test))] -pub(crate) const MAX_VOTERS: u32 = 10 * 1000; - type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency< @@ -259,6 +248,21 @@ pub mod pallet { #[pallet::constant] type TermDuration: Get; + /// The maximum number of candidates in a phragmen election. + /// + /// Warning: The election happens onchain, and this value will determine + /// the size of the election. When this limit is reached no more + /// candidates are accepted in the election. + #[pallet::constant] + type MaxCandidates: Get; + + /// The maximum number of voters to allow in a phragmen election. + /// + /// Warning: This impacts the size of the election which is run onchain. + /// When the limit is reached the new voters are ignored. + #[pallet::constant] + type MaxVoters: Get; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } @@ -397,7 +401,10 @@ pub mod pallet { let actual_count = >::decode_len().unwrap_or(0) as u32; ensure!(actual_count <= candidate_count, Error::::InvalidWitnessData); - ensure!(actual_count <= MAX_CANDIDATES, Error::::TooManyCandidates); + ensure!( + actual_count <= ::MaxCandidates::get(), + Error::::TooManyCandidates + ); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -913,10 +920,11 @@ impl Pallet { let mut num_edges: u32 = 0; + let max_voters = ::MaxVoters::get() as usize; // used for prime election. let mut voters_and_stakes = Vec::new(); match Voting::::iter().try_for_each(|(voter, Voter { stake, votes, .. })| { - if voters_and_stakes.len() < MAX_VOTERS as usize { + if voters_and_stakes.len() < max_voters { voters_and_stakes.push((voter, stake, votes)); Ok(()) } else { @@ -930,7 +938,7 @@ impl Pallet { "Failed to run election. Number of voters exceeded", ); Self::deposit_event(Event::ElectionError); - return T::DbWeight::get().reads(3 + MAX_VOTERS as u64) + return T::DbWeight::get().reads(3 + max_voters as u64) }, } @@ -1266,6 +1274,8 @@ mod tests { parameter_types! { pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; + pub const PhragmenMaxVoters: u32 = 1000; + pub const PhragmenMaxCandidates: u32 = 100; } impl Config for Test { @@ -1284,6 +1294,8 @@ mod tests { type LoserCandidate = (); type KickedMember = (); type WeightInfo = (); + type MaxVoters = PhragmenMaxVoters; + type MaxCandidates = PhragmenMaxCandidates; } pub type Block = sp_runtime::generic::Block; diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 07ee7aa2012ad..0e067699e5fac 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-08-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-08-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -70,9 +70,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (26_143_000 as Weight) - // Standard Error: 23_000 - .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + (27_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -83,9 +83,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_431_000 as Weight) - // Standard Error: 6_000 - .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + (40_240_000 as Weight) + // Standard Error: 5_000 + .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -96,16 +96,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_188_000 as Weight) - // Standard Error: 6_000 - .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + (40_394_000 as Weight) + // Standard Error: 5_000 + .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (38_031_000 as Weight) + (37_651_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -114,18 +114,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (43_715_000 as Weight) + (42_217_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (47_882_000 as Weight) - // Standard Error: 1_000 - .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + (46_459_000 as Weight) + // Standard Error: 0 + .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -135,13 +135,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_600_000 as Weight) + (45_189_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_959_000 as Weight) + (34_516_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -156,7 +156,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (52_684_000 as Weight) + (51_838_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } @@ -170,8 +170,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 76_000 + .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -184,17 +184,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:19 w:19) + // Storage: System Account (r:1 w:1) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 778_000 - .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 773_000 + .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 51_000 - .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(279 as Weight)) + .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(T::DbWeight::get().reads(280 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) @@ -210,9 +210,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (26_143_000 as Weight) - // Standard Error: 23_000 - .saturating_add((297_000 as Weight).saturating_mul(v as Weight)) + (27_011_000 as Weight) + // Standard Error: 3_000 + .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -223,9 +223,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_431_000 as Weight) - // Standard Error: 6_000 - .saturating_add((205_000 as Weight).saturating_mul(v as Weight)) + (40_240_000 as Weight) + // Standard Error: 5_000 + .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -236,16 +236,16 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_188_000 as Weight) - // Standard Error: 6_000 - .saturating_add((225_000 as Weight).saturating_mul(v as Weight)) + (40_394_000 as Weight) + // Standard Error: 5_000 + .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (38_031_000 as Weight) + (37_651_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -254,18 +254,18 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (43_715_000 as Weight) + (42_217_000 as Weight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (47_882_000 as Weight) - // Standard Error: 1_000 - .saturating_add((25_000 as Weight).saturating_mul(c as Weight)) + (46_459_000 as Weight) + // Standard Error: 0 + .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -275,13 +275,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_600_000 as Weight) + (45_189_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_959_000 as Weight) + (34_516_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -296,7 +296,7 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (52_684_000 as Weight) + (51_838_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } @@ -310,8 +310,8 @@ impl WeightInfo for () { /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 65_000 - .saturating_add((64_009_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 76_000 + .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) @@ -324,17 +324,17 @@ impl WeightInfo for () { // Storage: Elections ElectionRounds (r:1 w:1) // Storage: Council Members (r:0 w:1) // Storage: Council Prime (r:0 w:1) - // Storage: System Account (r:19 w:19) + // Storage: System Account (r:1 w:1) /// The range of component `c` is `[1, 1000]`. /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 778_000 - .saturating_add((81_049_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 773_000 + .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) // Standard Error: 51_000 - .saturating_add((4_420_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(279 as Weight)) + .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(RocksDbWeight::get().reads(280 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) From bfc2d085f392ccfb369251709bccb4bfa0edebe7 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sun, 14 Aug 2022 19:36:19 +0100 Subject: [PATCH 035/348] Further Improve Gov V2 Stuff (#12028) * refactor and improve successful origin * use appropriate origin for creating referendum * fmt * add setup tally feature for benchmarks * feedback updates * fmt * Update frame/referenda/src/benchmarking.rs Co-authored-by: Gavin Wood * feedback on `setup` trait * fix Co-authored-by: Gavin Wood --- frame/conviction-voting/src/types.rs | 3 + frame/ranked-collective/src/lib.rs | 137 +++++++++++++++++++++------ frame/ranked-collective/src/tests.rs | 17 ++++ frame/referenda/src/benchmarking.rs | 118 ++++++++++++----------- frame/referenda/src/mock.rs | 3 + frame/support/src/traits/voting.rs | 13 +++ 6 files changed, 210 insertions(+), 81 deletions(-) diff --git a/frame/conviction-voting/src/types.rs b/frame/conviction-voting/src/types.rs index 8f4f3697e9766..d6051dff62569 100644 --- a/frame/conviction-voting/src/types.rs +++ b/frame/conviction-voting/src/types.rs @@ -93,6 +93,9 @@ impl< let ayes = approval.mul_ceil(support); Self { ayes, nays: support - ayes, support, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } impl< diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 7ea43a9017445..79910c259a7cd 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -85,16 +85,16 @@ pub type Votes = u32; Decode, MaxEncodedLen, )] -#[scale_info(skip_type_params(M))] +#[scale_info(skip_type_params(T, I, M))] #[codec(mel_bound())] -pub struct Tally { +pub struct Tally { bare_ayes: MemberIndex, ayes: Votes, nays: Votes, - dummy: PhantomData, + dummy: PhantomData<(T, I, M)>, } -impl Tally { +impl, I: 'static, M: GetMaxVoters> Tally { pub fn from_parts(bare_ayes: MemberIndex, ayes: Votes, nays: Votes) -> Self { Tally { bare_ayes, ayes, nays, dummy: PhantomData } } @@ -107,10 +107,10 @@ impl Tally { // All functions of VoteTally now include the class as a param. -pub type TallyOf = Tally>; +pub type TallyOf = Tally>; pub type PollIndexOf = <>::Polls as Polling>>::Index; -impl VoteTally for Tally { +impl, I: 'static, M: GetMaxVoters> VoteTally for Tally { fn new(_: Rank) -> Self { Self { bare_ayes: 0, ayes: 0, nays: 0, dummy: PhantomData } } @@ -143,6 +143,20 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { bare_ayes: ayes, ayes, nays, dummy: PhantomData } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(class: Rank, granularity: Perbill) { + if M::get_max_voters(class) == 0 { + let max_voters = granularity.saturating_reciprocal_mul(1u32); + for i in 0..max_voters { + let who: T::AccountId = + frame_benchmarking::account("ranked_collective_benchmarking", i, 0); + crate::Pallet::::do_add_member_to_rank(who, class) + .expect("could not add members for benchmarks"); + } + assert_eq!(M::get_max_voters(class), max_voters); + } + } } /// Record needed for every member. @@ -241,6 +255,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The account ID of the @@ -264,6 +291,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } /// Guard to ensure that the given origin is a member of the collective. The pair of including both @@ -287,6 +327,19 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + match Self::try_successful_origin() { + Ok(o) => o, + Err(()) => { + let who: T::AccountId = frame_benchmarking::whitelisted_caller(); + crate::Pallet::::do_add_member_to_rank(who.clone(), MIN_RANK) + .expect("failed to add ranked member"); + frame_system::RawOrigin::Signed(who).into() + }, + } + } } #[frame_support::pallet] @@ -415,17 +468,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_member())] pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; - ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); - let index = MemberCount::::get(0); - let count = index.checked_add(1).ok_or(Overflow)?; - - Members::::insert(&who, MemberRecord { rank: 0 }); - IdToIndex::::insert(0, &who, index); - IndexToId::::insert(0, index, &who); - MemberCount::::insert(0, count); - Self::deposit_event(Event::MemberAdded { who }); - - Ok(()) + Self::do_add_member(who) } /// Increment the rank of an existing member by one. @@ -437,17 +480,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::promote_member(0))] pub fn promote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; - let record = Self::ensure_member(&who)?; - let rank = record.rank.checked_add(1).ok_or(Overflow)?; - ensure!(max_rank >= rank, Error::::NoPermission); - let index = MemberCount::::get(rank); - MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); - IdToIndex::::insert(rank, &who, index); - IndexToId::::insert(rank, index, &who); - Members::::insert(&who, MemberRecord { rank }); - Self::deposit_event(Event::RankChanged { who, rank }); - - Ok(()) + Self::do_promote_member(who, Some(max_rank)) } /// Decrement the rank of an existing member by one. If the member is already at rank zero, @@ -626,5 +659,53 @@ pub mod pallet { MemberCount::::mutate(rank, |r| r.saturating_dec()); Ok(()) } + + /// Adds a member into the ranked collective at level 0. + /// + /// No origin checks are executed. + pub fn do_add_member(who: T::AccountId) -> DispatchResult { + ensure!(!Members::::contains_key(&who), Error::::AlreadyMember); + let index = MemberCount::::get(0); + let count = index.checked_add(1).ok_or(Overflow)?; + + Members::::insert(&who, MemberRecord { rank: 0 }); + IdToIndex::::insert(0, &who, index); + IndexToId::::insert(0, index, &who); + MemberCount::::insert(0, count); + Self::deposit_event(Event::MemberAdded { who }); + Ok(()) + } + + /// Promotes a member in the ranked collective into the next role. + /// + /// A `maybe_max_rank` may be provided to check that the member does not get promoted beyond + /// a certain rank. Is `None` is provided, then the rank will be incremented without checks. + pub fn do_promote_member( + who: T::AccountId, + maybe_max_rank: Option, + ) -> DispatchResult { + let record = Self::ensure_member(&who)?; + let rank = record.rank.checked_add(1).ok_or(Overflow)?; + if let Some(max_rank) = maybe_max_rank { + ensure!(max_rank >= rank, Error::::NoPermission); + } + let index = MemberCount::::get(rank); + MemberCount::::insert(rank, index.checked_add(1).ok_or(Overflow)?); + IdToIndex::::insert(rank, &who, index); + IndexToId::::insert(rank, index, &who); + Members::::insert(&who, MemberRecord { rank }); + Self::deposit_event(Event::RankChanged { who, rank }); + Ok(()) + } + + /// Add a member to the rank collective, and continue to promote them until a certain rank + /// is reached. + pub fn do_add_member_to_rank(who: T::AccountId, rank: Rank) -> DispatchResult { + Self::do_add_member(who.clone())?; + for _ in 0..rank { + Self::do_promote_member(who.clone(), None)?; + } + Ok(()) + } } } diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 4344a1be730fb..530388d83f3c4 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -455,3 +455,20 @@ fn ensure_ranked_works() { assert_eq!(Rank4::try_origin(Origin::signed(3)).unwrap_err().as_signed().unwrap(), 3); }); } + +#[test] +fn do_add_member_to_rank_works() { + new_test_ext().execute_with(|| { + let max_rank = 9u16; + assert_ok!(Club::do_add_member_to_rank(69, max_rank / 2)); + assert_ok!(Club::do_add_member_to_rank(1337, max_rank)); + for i in 0..=max_rank { + if i <= max_rank / 2 { + assert_eq!(member_count(i), 2); + } else { + assert_eq!(member_count(i), 1); + } + } + assert_eq!(member_count(max_rank + 1), 0); + }) +} diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index b9c64a61f510d..4a64a9aa03b77 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -23,6 +23,7 @@ use assert_matches::assert_matches; use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account}; use frame_support::{ assert_ok, + dispatch::UnfilteredDispatchable, traits::{Currency, EnsureOrigin}, }; use frame_system::RawOrigin; @@ -41,17 +42,20 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } -fn create_referendum, I: 'static>() -> (T::AccountId, ReferendumIndex) { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - assert_ok!(Referenda::::submit( - RawOrigin::Signed(caller.clone()).into(), - Box::new(RawOrigin::Root.into()), - T::Hashing::hash_of(&0), - DispatchTime::After(0u32.into()) - )); +fn create_referendum, I: 'static>() -> (T::Origin, ReferendumIndex) { + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } + + let proposal_origin = Box::new(RawOrigin::Root.into()); + let proposal_hash = T::Hashing::hash_of(&0); + let enactment_moment = DispatchTime::After(0u32.into()); + let call = Call::::submit { proposal_origin, proposal_hash, enactment_moment }; + assert_ok!(call.dispatch_bypass_filter(origin.clone())); let index = ReferendumCount::::get() - 1; - (caller, index) + (origin, index) } fn place_deposit, I: 'static>(index: ReferendumIndex) { @@ -72,7 +76,7 @@ fn fill_queue, I: 'static>( // First, create enough other referendums to fill the track. let mut others = vec![]; for _ in 0..info::(index).max_deciding { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); others.push(index); } @@ -80,7 +84,7 @@ fn fill_queue, I: 'static>( // We will also need enough referenda which are queued and passing, we want `MaxQueued - 1` // in order to force the maximum amount of work to insert ours into the queue. for _ in spaces..T::MaxQueued::get() { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing_after::(index, Perbill::from_percent(pass_after)); others.push(index); @@ -113,6 +117,7 @@ fn make_passing_after, I: 'static>(index: ReferendumIndex, period_p .saturating_add(Perbill::from_percent(1)); Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::from_requirements(support, approval, class); } }); @@ -121,6 +126,7 @@ fn make_passing_after, I: 'static>(index: ReferendumIndex, period_p fn make_passing, I: 'static>(index: ReferendumIndex) { Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::unanimity(class); } }); @@ -129,6 +135,7 @@ fn make_passing, I: 'static>(index: ReferendumIndex) { fn make_failing, I: 'static>(index: ReferendumIndex) { Referenda::::access_poll(index, |status| { if let PollStatus::Ongoing(tally, class) = status { + T::Tally::setup(class, Perbill::from_rational(1u32, 1000u32)); *tally = T::Tally::rejection(class); } }); @@ -181,10 +188,13 @@ fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { benchmarks_instance_pallet! { submit { - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - }: _( - RawOrigin::Signed(caller), + let origin: T::Origin = T::SubmitOrigin::successful_origin(); + if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + whitelist_account!(caller); + } + }: _( + origin, Box::new(RawOrigin::Root.into()), T::Hashing::hash_of(&0), DispatchTime::After(0u32.into()) @@ -194,60 +204,62 @@ benchmarks_instance_pallet! { } place_decision_deposit_preparing { - let (caller, index) = create_referendum::(); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + let (origin, index) = create_referendum::(); + }: place_decision_deposit(origin, index) verify { assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } place_decision_deposit_queued { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); fill_queue::(index, 1, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); - assert_eq!(TrackQueue::::get(&track)[0], (index, 0u32.into())); + assert!(TrackQueue::::get(&track).contains(&(index, 0u32.into()))); } place_decision_deposit_not_queued { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); fill_queue::(index, 0, 90); - }: place_decision_deposit(RawOrigin::Signed(caller), index) - verify { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); + }: place_decision_deposit(origin, index) + verify { + assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); + assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); } place_decision_deposit_passing { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); skip_prepare_period::(index); make_passing::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { assert!(is_confirming::(index)); } place_decision_deposit_failing { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); skip_prepare_period::(index); - }: place_decision_deposit(RawOrigin::Signed(caller), index) + }: place_decision_deposit(origin, index) verify { assert!(is_not_confirming::(index)); } refund_decision_deposit { - let (caller, index) = create_referendum::(); + let (origin, index) = create_referendum::(); place_deposit::(index); assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - }: _(RawOrigin::Signed(caller), index) + }: _(origin, index) verify { assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } cancel { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: _(T::CancelOrigin::successful_origin(), index) verify { @@ -255,7 +267,7 @@ benchmarks_instance_pallet! { } kill { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: _(T::KillOrigin::successful_origin(), index) verify { @@ -263,7 +275,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_queue_empty { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -276,7 +288,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. let queued = fill_queue::(index, 0, 90); let track = Referenda::::ensure_ongoing(index).unwrap().track; @@ -295,7 +307,7 @@ benchmarks_instance_pallet! { } one_fewer_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); // No spaces free in the queue. let queued = fill_queue::(index, 0, 0); let track = Referenda::::ensure_ongoing(index).unwrap().track; @@ -306,16 +318,16 @@ benchmarks_instance_pallet! { verify { assert_eq!(DecidingCount::::get(&track), deciding_count); assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get() - 1); - assert!(queued.into_iter().skip(1).all(|i| Referenda::::ensure_ongoing(i) + assert!(queued.into_iter().skip(1).filter(|i| Referenda::::ensure_ongoing(*i) .unwrap() .deciding - .map_or(true, |d| d.confirming.is_some()) - )); + .map_or(false, |d| d.confirming.is_some()) + ).count() == 1); } nudge_referendum_requeued_insertion { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 0, 90); @@ -336,7 +348,7 @@ benchmarks_instance_pallet! { nudge_referendum_requeued_slide { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 1, 90); @@ -362,7 +374,7 @@ benchmarks_instance_pallet! { // insertion at the beginning. // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 1, 0); @@ -379,7 +391,7 @@ benchmarks_instance_pallet! { nudge_referendum_not_queued { // First create our referendum and place the deposit. It will be failing. - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); fill_queue::(index, 0, 0); @@ -395,7 +407,7 @@ benchmarks_instance_pallet! { } nudge_referendum_no_deposit { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -404,7 +416,7 @@ benchmarks_instance_pallet! { } nudge_referendum_preparing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -413,7 +425,7 @@ benchmarks_instance_pallet! { } nudge_referendum_timed_out { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); skip_timeout_period::(index); }: nudge_referendum(RawOrigin::Root, index) verify { @@ -422,7 +434,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_failing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); }: nudge_referendum(RawOrigin::Root, index) @@ -431,7 +443,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_deciding_passing { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing::(index); skip_prepare_period::(index); @@ -441,7 +453,7 @@ benchmarks_instance_pallet! { } nudge_referendum_begin_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -453,7 +465,7 @@ benchmarks_instance_pallet! { } nudge_referendum_end_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_passing::(index); @@ -466,7 +478,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_not_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); nudge::(index); @@ -480,7 +492,7 @@ benchmarks_instance_pallet! { } nudge_referendum_continue_confirming { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); make_passing::(index); skip_prepare_period::(index); @@ -493,7 +505,7 @@ benchmarks_instance_pallet! { } nudge_referendum_approved { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_passing::(index); @@ -506,7 +518,7 @@ benchmarks_instance_pallet! { } nudge_referendum_rejected { - let (_caller, index) = create_referendum::(); + let (_origin, index) = create_referendum::(); place_deposit::(index); skip_prepare_period::(index); make_failing::(index); diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 1a24911603990..dbe22cd562349 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -289,6 +289,9 @@ impl VoteTally for Tally { let nays = ((ayes as u64) * 1_000_000_000u64 / approval.deconstruct() as u64) as u32 - ayes; Self { ayes, nays } } + + #[cfg(feature = "runtime-benchmarks")] + fn setup(_: Class, _: Perbill) {} } pub fn set_balance_proposal(value: u64) -> Vec { diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs index b5e7d27073895..49ae3163d0cd1 100644 --- a/frame/support/src/traits/voting.rs +++ b/frame/support/src/traits/voting.rs @@ -106,6 +106,19 @@ pub trait VoteTally { fn rejection(class: Class) -> Self; #[cfg(feature = "runtime-benchmarks")] fn from_requirements(support: Perbill, approval: Perbill, class: Class) -> Self; + #[cfg(feature = "runtime-benchmarks")] + /// A function that should be called before any use of the `runtime-benchmarks` gated functions + /// of the `VoteTally` trait. + /// + /// Should be used to set up any needed state in a Pallet which implements `VoteTally` so that + /// benchmarks that execute will complete successfully. `class` can be used to set up a + /// particular class of voters, and `granularity` is used to determine the weight of one vote + /// relative to total unanimity. + /// + /// For example, in the case where there are a number of unique voters, and each voter has equal + /// voting weight, a granularity of `Perbill::from_rational(1, 1000)` should create `1_000` + /// users. + fn setup(class: Class, granularity: Perbill); } pub enum PollStatus { None, From 669ed362f922c679ab1416fc8b9e9f4f68a21483 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sun, 14 Aug 2022 20:06:02 +0100 Subject: [PATCH 036/348] Proposal: Flatten `AllPallets` and similar types (#11813) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * flratten AllPallets types * feature flag it * fix * fix * fmt * remove todo * Update frame/support/src/traits/metadata.rs Co-authored-by: Bastian Köcher * Update frame/support/src/migrations.rs Co-authored-by: Bastian Köcher * fix * mark as deprecated * add docs * fix ui test? * fmt Co-authored-by: parity-processbot <> Co-authored-by: Bastian Köcher --- frame/support/Cargo.toml | 4 ++ .../procedural/src/construct_runtime/mod.rs | 60 +++++++------------ .../src/pallet/expand/pallet_struct.rs | 6 +- frame/support/src/dispatch.rs | 4 +- frame/support/src/lib.rs | 10 ++++ frame/support/src/migrations.rs | 5 +- frame/support/src/traits.rs | 5 +- frame/support/src/traits/filter.rs | 11 ---- frame/support/src/traits/hooks.rs | 37 ++++++++++-- frame/support/src/traits/members.rs | 9 ++- frame/support/src/traits/metadata.rs | 41 ++++--------- frame/support/src/traits/misc.rs | 13 +++- frame/support/src/traits/storage.rs | 5 +- frame/support/test/tests/pallet.rs | 13 ++-- .../tests/pallet_ui/hooks_invalid_item.stderr | 6 +- 15 files changed, 121 insertions(+), 108 deletions(-) diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index a69133961e970..1b6488bbd4712 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -73,3 +73,7 @@ no-metadata-docs = ["frame-support-procedural/no-metadata-docs"] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. full-metadata-docs = ["scale-info/docs"] +# Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of +# pallets in a runtime grows. Does increase the compile time! +tuples-96 = [] +tuples-128 = [] diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 7b4156a94db58..042af31be6bf4 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -308,47 +308,26 @@ fn decl_all_pallets<'a>( names.push(&pallet_declaration.name); } - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((FirstPallet, (SecondPallet, ( ... , LastPallet) ... ))))` - let all_pallets_with_system = names - .iter() - .rev() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - // But ignore the system pallet. - let all_pallets_without_system_reversed = names - .iter() - .filter(|n| **n != SYSTEM_PALLET_NAME) - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - // Make nested tuple structure like: - // `((LastPallet, (SecondLastPallet, ( ... , FirstPallet) ... ))))` - let all_pallets_with_system_reversed = names - .iter() - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let system_pallet = match names.iter().find(|n| **n == SYSTEM_PALLET_NAME) { Some(name) => name, None => return syn::Error::new( proc_macro2::Span::call_site(), "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", + Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) .into_compile_error(), }; + let names_without_system = + names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).collect::>(); + let names_reversed = names.clone().into_iter().rev().collect::>(); + let names_without_system_reverse = + names_without_system.clone().into_iter().rev().collect::>(); + let names_reversed_with_system_first = std::iter::once(system_pallet) + .chain(names_without_system_reverse.clone().into_iter()) + .collect::>(); + quote!( #types @@ -364,25 +343,28 @@ fn decl_all_pallets<'a>( pub type AllPallets = AllPalletsWithSystem; /// All pallets included in the runtime as a nested tuple of types. - pub type AllPalletsWithSystem = ( #all_pallets_with_system ); + pub type AllPalletsWithSystem = ( #(#names),* ); /// All pallets included in the runtime as a nested tuple of types. /// Excludes the System pallet. - pub type AllPalletsWithoutSystem = ( #all_pallets_without_system ); + pub type AllPalletsWithoutSystem = ( #(#names_without_system),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. /// Excludes the System pallet. - pub type AllPalletsWithoutSystemReversed = ( #all_pallets_without_system_reversed ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithoutSystemReversed =( #(#names_without_system_reverse),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. - pub type AllPalletsWithSystemReversed = ( #all_pallets_with_system_reversed ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithSystemReversed = ( #(#names_reversed),* ); /// All pallets included in the runtime as a nested tuple of types in reversed order. /// With the system pallet first. - pub type AllPalletsReversedWithSystemFirst = ( - #system_pallet, - AllPalletsWithoutSystemReversed - ); + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsReversedWithSystemFirst = ( #(#names_reversed_with_system_first),* ); ) } diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index a4a8acc10f799..f0fb6bacedffb 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -240,9 +240,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { #config_where_clause { fn count() -> usize { 1 } - fn accumulate( - acc: &mut #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> - ) { + fn infos() -> #frame_support::sp_std::vec::Vec<#frame_support::traits::PalletInfoData> { use #frame_support::traits::PalletInfoAccess; let item = #frame_support::traits::PalletInfoData { index: Self::index(), @@ -250,7 +248,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + #frame_support::sp_std::vec![item] } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index fb0b658cd9303..a658f01fa6854 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2207,7 +2207,7 @@ macro_rules! decl_module { for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { fn count() -> usize { 1 } - fn accumulate(acc: &mut $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData>) { + fn infos() -> $crate::sp_std::vec::Vec<$crate::traits::PalletInfoData> { use $crate::traits::PalletInfoAccess; let item = $crate::traits::PalletInfoData { index: Self::index(), @@ -2215,7 +2215,7 @@ macro_rules! decl_module { module_name: Self::module_name(), crate_version: Self::crate_version(), }; - acc.push(item); + vec![item] } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 8e43df82a284c..7e4c944330fe3 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -16,6 +16,16 @@ // limitations under the License. //! Support code for the runtime. +//! +//! ## Note on Tuple Traits +//! +//! Many of the traits defined in [`traits`] have auto-implementations on tuples as well. Usually, +//! the tuple is a function of number of pallets in the runtime. By default, the traits are +//! implemented for tuples of up to 64 items. +// +// If you have more pallets in your runtime, or for any other reason need more, enabled `tuples-96` +// or the `tuples-128` complication flag. Note that these features *will increase* the compilation +// of this crate. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index 05833e0515c07..f594b98ede4ff 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -19,6 +19,7 @@ use crate::{ traits::{GetStorageVersion, PalletInfoAccess}, weights::{RuntimeDbWeight, Weight}, }; +use impl_trait_for_tuples::impl_for_tuples; /// Trait used by [`migrate_from_pallet_version_to_storage_version`] to do the actual migration. pub trait PalletVersionToStorageVersionHelper { @@ -42,7 +43,9 @@ impl PalletVersionToStorageVersionHelpe } } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { let mut weight: Weight = 0; diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 72d6d6682f14a..16504beb16907 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -50,7 +50,7 @@ mod error; pub use error::PalletError; mod filter; -pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter, IntegrityTest}; +pub use filter::{ClearFilterGuard, FilterStack, FilterStackGuard, InstanceFilter}; mod misc; pub use misc::{ @@ -81,7 +81,8 @@ mod hooks; #[cfg(feature = "std")] pub use hooks::GenesisBuild; pub use hooks::{ - Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, + Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, + OnTimestampSet, }; #[cfg(feature = "try-runtime")] pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs index 95e5954184b4b..cdd82a3124e63 100644 --- a/frame/support/src/traits/filter.rs +++ b/frame/support/src/traits/filter.rs @@ -180,17 +180,6 @@ macro_rules! impl_filter_stack { } } -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - #[cfg(test)] pub mod test_impl_filter_stack { use super::*; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 385db4e4d1ad9..2b7234006e0ff 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -38,7 +38,9 @@ pub trait OnInitialize { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnInitialize for Tuple { fn on_initialize(n: BlockNumber) -> crate::weights::Weight { let mut weight = 0; @@ -50,7 +52,9 @@ impl OnInitialize for Tuple { /// The block finalization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is ending. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnFinalize { /// The block is being finalized. Implement to have something happen. /// @@ -79,7 +83,9 @@ pub trait OnIdle { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnIdle for Tuple { fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { let on_idle_functions: &[fn( @@ -105,7 +111,9 @@ impl OnIdle for Tuple { /// Implementing this trait for a pallet let's you express operations that should /// happen at genesis. It will be called in an externalities provided environment and /// will see the genesis state after all pallets have written their genesis state. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnGenesis { /// Something that should happen at genesis. fn on_genesis() {} @@ -187,7 +195,9 @@ pub trait OnRuntimeUpgrade { } } -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { fn on_runtime_upgrade() -> crate::weights::Weight { let mut weight = 0; @@ -210,6 +220,19 @@ impl OnRuntimeUpgrade for Tuple { } } +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + /// The pallet hooks trait. Implementing this lets you express some logic to execute. pub trait Hooks { /// The block is being finalized. Implement to have something happen. @@ -321,7 +344,9 @@ pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeD } /// A trait which is called when the timestamp is set in the runtime. -#[impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnTimestampSet { /// Called when the timestamp is set. fn on_timestamp_set(moment: Moment); diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs index 8c69a2aaccb33..daf2d3aa6517d 100644 --- a/frame/support/src/traits/members.rs +++ b/frame/support/src/traits/members.rs @@ -17,6 +17,7 @@ //! Traits for dealing with the idea of membership. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. @@ -25,7 +26,9 @@ pub trait Contains { fn contains(t: &T) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(1, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl Contains for Tuple { fn contains(t: &T) -> bool { for_tuples!( #( @@ -41,7 +44,9 @@ pub trait ContainsPair { fn contains(a: &A, b: &B) -> bool; } -#[impl_trait_for_tuples::impl_for_tuples(0, 30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl ContainsPair for Tuple { fn contains(a: &A, b: &B) -> bool { for_tuples!( #( diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index d3dc57e1ee52d..b0dd5bd5160b4 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -18,6 +18,7 @@ //! Traits for managing information attached to pallets and their constituents. use codec::{Decode, Encode}; +use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::RuntimeDebug; use sp_std::prelude::*; @@ -70,40 +71,22 @@ pub trait PalletsInfoAccess { /// /// You probably don't want this function but `infos()` instead. fn count() -> usize { - 0 + // for backwards compatibility with XCM-3, Mark is deprecated. + Self::infos().len() } - /// Extend the given vector by all of the pallets' information that this type represents. - /// - /// You probably don't want this function but `infos()` instead. - fn accumulate(_accumulator: &mut Vec) {} - /// All of the pallets' information that this type represents. - fn infos() -> Vec { - let mut result = Vec::with_capacity(Self::count()); - Self::accumulate(&mut result); - result - } + fn infos() -> Vec; } -impl PalletsInfoAccess for () {} -impl PalletsInfoAccess for (T,) { - fn count() -> usize { - T::count() - } - fn accumulate(acc: &mut Vec) { - T::accumulate(acc) - } -} - -impl PalletsInfoAccess for (T1, T2) { - fn count() -> usize { - T1::count() + T2::count() - } - fn accumulate(acc: &mut Vec) { - // The AllPallets type tuplises the pallets in reverse order, so we unreverse them here. - T2::accumulate(acc); - T1::accumulate(acc); +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl PalletsInfoAccess for Tuple { + fn infos() -> Vec { + let mut res = vec![]; + for_tuples!( #( res.extend(Tuple::infos()); )* ); + res } } diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index fa59fa92f920d..0f781d0bbd4cf 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -19,6 +19,7 @@ use crate::dispatch::Parameter; use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncodedLen}; +use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; #[doc(hidden)] @@ -467,14 +468,18 @@ impl SameOrOther { } /// Handler for when a new account has been created. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnNewAccount { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OnKilledAccount { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); @@ -632,7 +637,9 @@ impl PrivilegeCmp for EqualPrivilegeOnly { /// but cannot preform any alterations. More specifically alterations are /// not forbidden, but they are not persisted in any way after the worker /// has finished. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] pub trait OffchainWorker { /// This function is being called after every block import (when fully synced). /// diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index e484140cc2fd9..d40d82c28e87e 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -17,6 +17,7 @@ //! Traits for encoding data related to pallet's storage items. +use impl_trait_for_tuples::impl_for_tuples; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -71,7 +72,9 @@ pub trait StorageInfoTrait { fn storage_info() -> Vec; } -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl StorageInfoTrait for Tuple { fn storage_info() -> Vec { let mut res = vec![]; diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 6b72327eb4989..c4bbc59c70373 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -1597,8 +1597,9 @@ fn test_storage_info() { #[test] fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} - fn _b(t: (System, (Example4, (Example2, (Example,))))) { + fn _b(t: (System, Example4, Example2, Example)) { _a(t) } } @@ -1607,7 +1608,7 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} - fn _b(t: (System, (Example, (Example2, (Example4,))))) { + fn _b(t: (System, Example, Example2, Example4)) { _a(t) } } @@ -1616,7 +1617,7 @@ fn assert_type_all_pallets_with_system_is_correct() { fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} - fn _b(t: (Example, (Example2, (Example4,)))) { + fn _b(t: (Example, Example2, Example4)) { _a(t) } } @@ -1624,8 +1625,9 @@ fn assert_type_all_pallets_without_system_is_correct() { #[test] fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example, (System,))))) { + fn _b(t: (Example4, Example2, Example, System)) { _a(t) } } @@ -1633,8 +1635,9 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { #[test] fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. + #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} - fn _b(t: (Example4, (Example2, (Example,)))) { + fn _b(t: (Example4, Example2, Example)) { _a(t) } } diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index d1a89fbb850e9..ff52a094d6f8d 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,13 +1,13 @@ error[E0107]: missing generics for trait `Hooks` - --> $DIR/hooks_invalid_item.rs:12:18 + --> tests/pallet_ui/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} | ^^^^^ expected 1 generic argument | note: trait defined here, with 1 generic parameter: `BlockNumber` - --> $DIR/hooks.rs:214:11 + --> $WORKSPACE/frame/support/src/traits/hooks.rs | -214 | pub trait Hooks { + | pub trait Hooks { | ^^^^^ ----------- help: add missing generic argument | From cf36d2f7eaaee27516c601141a2ea0acd3db641c Mon Sep 17 00:00:00 2001 From: Jake Hemmerle Date: Mon, 15 Aug 2022 03:53:39 -0400 Subject: [PATCH 037/348] swap ed25519-dalek for ed25519-zebra (#11781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * swap ed25519-dalek for ed25519-zebra; no batch verificaiton fixed batch verificaiton tests removed additional zero verificaiton tests removed comments, fixed test bug, added #[derive(Clone)] Update primitives/core/src/ed25519.rs Co-authored-by: Squirrel * modified assertion to allow default ed25519-zebra zero key behavior * cargo clippy * Update primitives/core/Cargo.toml Co-authored-by: Bastian Köcher * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * Update primitives/core/src/ed25519.rs Co-authored-by: Davide Galassi * updated Cargo.lock for sp-core * fix inaccurate comment Co-authored-by: Squirrel Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher Co-authored-by: Davide Galassi --- Cargo.lock | 16 ++++++++++- Cargo.toml | 2 +- primitives/core/Cargo.toml | 6 ++-- primitives/core/src/ed25519.rs | 51 ++++++++++++++-------------------- primitives/io/src/lib.rs | 27 +++++++++++------- 5 files changed, 57 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2db192dcbf1a1..b5506b1259436 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1788,6 +1788,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "403ef3e961ab98f0ba902771d29f842058578bb1ce7e3c59dad5a6a93e784c69" +dependencies = [ + "curve25519-dalek 3.0.2", + "hex", + "rand_core 0.6.2", + "sha2 0.9.8", + "thiserror", + "zeroize", +] + [[package]] name = "either" version = "1.6.1" @@ -9624,7 +9638,7 @@ dependencies = [ "byteorder", "criterion", "dyn-clonable", - "ed25519-dalek", + "ed25519-zebra", "futures", "hash-db", "hash256-std-hasher", diff --git a/Cargo.toml b/Cargo.toml index 1f22343c002a8..e2907716ca9f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -258,7 +258,7 @@ crc32fast = { opt-level = 3 } crossbeam-deque = { opt-level = 3 } crypto-mac = { opt-level = 3 } curve25519-dalek = { opt-level = 3 } -ed25519-dalek = { opt-level = 3 } +ed25519-zebra = { opt-level = 3 } flate2 = { opt-level = 3 } futures-channel = { opt-level = 3 } hashbrown = { opt-level = 3 } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index fced678293140..2cdfd04e942d7 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -47,7 +47,7 @@ thiserror = { version = "1.0.30", optional = true } bitflags = "1.3" # full crypto -ed25519-dalek = { version = "1.0.1", default-features = false, features = ["u64_backend", "alloc"], optional = true } +ed25519-zebra = { version = "3.0.0", default-features = false, optional = true} blake2-rfc = { version = "0.2.18", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", @@ -97,7 +97,7 @@ std = [ "sp-std/std", "serde", "blake2-rfc/std", - "ed25519-dalek/std", + "ed25519-zebra", "hex/std", "base58", "substrate-bip39", @@ -127,7 +127,7 @@ std = [ # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "ed25519-dalek", + "ed25519-zebra", "blake2-rfc", "schnorrkel", "hex", diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 177af0651c0ef..0553cf4843df5 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -39,7 +39,9 @@ use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; #[cfg(feature = "std")] use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use ed25519_dalek::{Signer as _, Verifier as _}; +use core::convert::TryFrom; +#[cfg(feature = "full_crypto")] +use ed25519_zebra::{SigningKey, VerificationKey}; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; @@ -75,17 +77,10 @@ pub struct Public(pub [u8; 32]); /// A key pair. #[cfg(feature = "full_crypto")] -pub struct Pair(ed25519_dalek::Keypair); - -#[cfg(feature = "full_crypto")] -impl Clone for Pair { - fn clone(&self) -> Self { - Pair(ed25519_dalek::Keypair { - public: self.0.public, - secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed"), - }) - } +#[derive(Copy, Clone)] +pub struct Pair { + public: VerificationKey, + secret: SigningKey, } impl AsRef<[u8; 32]> for Public { @@ -456,10 +451,10 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = ed25519_dalek::PublicKey::from(&secret); - Ok(Pair(ed25519_dalek::Keypair { secret, public })) + let secret = + SigningKey::try_from(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = VerificationKey::from(&secret); + Ok(Pair { secret, public }) } /// Derive a child key from a series of given junctions. @@ -468,7 +463,7 @@ impl TraitPair for Pair { path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.0.secret.to_bytes(); + let mut acc = self.secret.into(); for j in path { match j { DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), @@ -480,16 +475,12 @@ impl TraitPair for Pair { /// Get the public key. fn public(&self) -> Public { - let mut r = [0u8; 32]; - let pk = self.0.public.as_bytes(); - r.copy_from_slice(pk); - Public(r) + Public(self.public.into()) } /// Sign a message. fn sign(&self, message: &[u8]) -> Signature { - let r = self.0.sign(message).to_bytes(); - Signature::from_raw(r) + Signature::from_raw(self.secret.sign(message).into()) } /// Verify a signature on a message. Returns true if the signature is good. @@ -502,17 +493,17 @@ impl TraitPair for Pair { /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { + let public_key = match VerificationKey::try_from(pubkey.as_ref()) { Ok(pk) => pk, Err(_) => return false, }; - let sig = match ed25519_dalek::Signature::try_from(sig) { + let sig = match ed25519_zebra::Signature::try_from(sig) { Ok(s) => s, Err(_) => return false, }; - public_key.verify(message.as_ref(), &sig).is_ok() + public_key.verify(&sig, message.as_ref()).is_ok() } /// Return a vec filled with raw data. @@ -524,8 +515,8 @@ impl TraitPair for Pair { #[cfg(feature = "full_crypto")] impl Pair { /// Get the seed for this key. - pub fn seed(&self) -> &Seed { - self.0.secret.as_bytes() + pub fn seed(&self) -> Seed { + self.secret.into() } /// Exactly as `from_string` except that if no matches are found then, the the first 32 @@ -577,12 +568,12 @@ mod test { fn seed_and_derive_should_work() { let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), &seed); + assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") + hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") ); } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 9bf9345e594c3..7942bafcc2a1b 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1895,6 +1895,7 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair_unused = sr25519::Pair::generate_with_phrase(None).0; crypto::start_batch_verify(); for it in 0..70 { let msg = format!("Schnorrkel {}!", it); @@ -1902,8 +1903,10 @@ mod tests { crypto::sr25519_batch_verify(&signature, msg.as_bytes(), &pair.public()); } - // push invlaid - crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); + // push invalid + let msg = b"asdf!"; + let signature = pair.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair_unused.public()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1938,10 +1941,10 @@ mod tests { ext.register_extension(TaskExecutorExt::new(TaskExecutor::new())); ext.execute_with(|| { - // invalid ed25519 signature + // valid ed25519 signature crypto::start_batch_verify(); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); - assert!(!crypto::finish_batch_verify()); + assert!(crypto::finish_batch_verify()); // 2 valid ed25519 signatures crypto::start_batch_verify(); @@ -1961,12 +1964,14 @@ mod tests { // 1 valid, 1 invalid ed25519 signature crypto::start_batch_verify(); - let pair = ed25519::Pair::generate_with_phrase(None).0; + let pair1 = ed25519::Pair::generate_with_phrase(None).0; + let pair2 = ed25519::Pair::generate_with_phrase(None).0; let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); crypto::ed25519_batch_verify(&zero_ed_sig(), &Vec::new(), &zero_ed_pub()); + crypto::ed25519_batch_verify(&signature, msg, &pair1.public()); + crypto::ed25519_batch_verify(&signature, msg, &pair2.public()); assert!(!crypto::finish_batch_verify()); @@ -1993,11 +1998,13 @@ mod tests { // 1 valid sr25519, 1 invalid sr25519 crypto::start_batch_verify(); - let pair = sr25519::Pair::generate_with_phrase(None).0; + let pair1 = sr25519::Pair::generate_with_phrase(None).0; + let pair2 = sr25519::Pair::generate_with_phrase(None).0; let msg = b"Schnorrkcel!"; - let signature = pair.sign(msg); - crypto::sr25519_batch_verify(&signature, msg, &pair.public()); + let signature = pair1.sign(msg); + crypto::sr25519_batch_verify(&signature, msg, &pair1.public()); + crypto::sr25519_batch_verify(&signature, msg, &pair2.public()); crypto::sr25519_batch_verify(&zero_sr_sig(), &Vec::new(), &zero_sr_pub()); assert!(!crypto::finish_batch_verify()); From 90c8ac3410966ef538fb1444412b60077d073a05 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Aug 2022 07:57:15 +0000 Subject: [PATCH 038/348] Bump rpassword from 5.0.1 to 7.0.0 (#11826) * Bump rpassword from 5.0.1 to 7.0.0 Bumps [rpassword](https://github.com/conradkleinespel/rpassword) from 5.0.1 to 7.0.0. - [Release notes](https://github.com/conradkleinespel/rpassword/releases) - [Commits](https://github.com/conradkleinespel/rpassword/compare/v5.0.1...v7.0.0) --- updated-dependencies: - dependency-name: rpassword dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Use new API Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi Signed-off-by: dependabot[bot] Signed-off-by: Oliver Tale-Yazdi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 4 ++-- client/cli/Cargo.toml | 2 +- client/cli/src/commands/utils.rs | 2 +- client/cli/src/params/keystore_params.rs | 5 ++--- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5506b1259436..da047d4cba6d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7534,9 +7534,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "5.0.1" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" +checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" dependencies = [ "libc", "winapi", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index ea60e4c9f87e5..5f4c00d06aea3 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -24,7 +24,7 @@ names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" rand = "0.7.3" regex = "1.5.5" -rpassword = "5.0.0" +rpassword = "7.0.0" serde = "1.0.136" serde_json = "1.0.79" thiserror = "1.0.30" diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 32556f0ea728d..95849065471b4 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -48,7 +48,7 @@ pub fn read_uri(uri: Option<&String>) -> error::Result { uri.into() } } else { - rpassword::read_password_from_tty(Some("URI: "))? + rpassword::prompt_password("URI: ")? }; Ok(uri) diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 46403f95fbc4b..386d1791dd805 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -94,7 +94,7 @@ impl KeystoreParams { let (password_interactive, password) = (self.password_interactive, self.password.clone()); let pass = if password_interactive { - let password = rpassword::read_password_from_tty(Some("Key password: "))?; + let password = rpassword::prompt_password("Key password: ")?; Some(SecretString::new(password)) } else { password @@ -105,6 +105,5 @@ impl KeystoreParams { } fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e).into()) + rpassword::prompt_password("Keystore password: ").map_err(|e| format!("{:?}", e).into()) } From 20b5aac7e09e8b25abaa48c6781e653b36ba3ca0 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Mon, 15 Aug 2022 20:38:36 +0100 Subject: [PATCH 039/348] declone and close the door (#12035) * declone and close the door * cargo fmt * remove brackets --- .cargo/config.toml | 2 - bin/node/executor/tests/basic.rs | 2 +- bin/node/executor/tests/fees.rs | 2 +- bin/node/runtime/src/impls.rs | 12 +- client/api/src/in_mem.rs | 2 +- .../authority-discovery/src/worker/tests.rs | 19 ++- client/beefy/src/round.rs | 2 +- client/beefy/src/tests.rs | 2 +- client/beefy/src/worker.rs | 2 +- client/cli/src/commands/chain_info_cmd.rs | 2 +- client/consensus/aura/src/lib.rs | 4 +- client/consensus/babe/src/authorship.rs | 2 +- client/consensus/babe/src/tests.rs | 2 +- .../common/src/import_queue/basic_queue.rs | 2 +- client/consensus/epochs/src/lib.rs | 16 +-- client/consensus/manual-seal/src/lib.rs | 6 +- client/db/src/lib.rs | 2 +- client/db/src/storage_cache.rs | 6 +- client/executor/wasmtime/src/host.rs | 3 +- .../executor/wasmtime/src/instance_wrapper.rs | 26 ++-- client/executor/wasmtime/src/tests.rs | 2 +- .../src/communication/gossip.rs | 36 ++--- .../src/communication/tests.rs | 24 ++-- client/finality-grandpa/src/justification.rs | 4 +- client/finality-grandpa/src/tests.rs | 6 +- client/finality-grandpa/src/until_imported.rs | 4 +- client/finality-grandpa/src/voting_rule.rs | 2 +- client/network-gossip/src/bridge.rs | 18 +-- client/network-gossip/src/state_machine.rs | 2 +- client/network/src/discovery.rs | 2 +- client/network/sync/src/blocks.rs | 54 +++---- client/network/sync/src/extra_requests.rs | 8 +- client/network/sync/src/lib.rs | 40 +++--- client/rpc/src/chain/tests.rs | 8 +- client/service/test/src/client/mod.rs | 36 ++--- client/transaction-pool/benches/basics.rs | 4 +- client/transaction-pool/src/graph/pool.rs | 10 +- client/transaction-pool/src/graph/rotator.rs | 2 +- client/transaction-pool/tests/pool.rs | 32 ++--- frame/alliance/src/tests.rs | 46 +++--- frame/assets/src/mock.rs | 4 +- frame/atomic-swap/src/lib.rs | 2 +- frame/atomic-swap/src/tests.rs | 4 +- frame/contracts/src/exec.rs | 9 +- frame/contracts/src/tests.rs | 6 +- frame/contracts/src/wasm/mod.rs | 4 +- .../democracy/src/tests/external_proposing.rs | 9 +- frame/democracy/src/tests/public_proposals.rs | 2 +- frame/elections-phragmen/src/lib.rs | 7 +- frame/examples/offchain-worker/src/tests.rs | 10 +- frame/multisig/src/tests.rs | 134 +++--------------- .../nomination-pools/benchmarking/src/lib.rs | 10 +- frame/nomination-pools/src/lib.rs | 7 +- frame/nomination-pools/src/migration.rs | 2 +- frame/proxy/src/tests.rs | 2 +- frame/remark/src/tests.rs | 4 +- frame/scheduler/src/tests.rs | 4 +- frame/staking/src/mock.rs | 2 +- frame/staking/src/tests.rs | 5 +- frame/state-trie-migration/src/lib.rs | 2 +- frame/timestamp/src/mock.rs | 2 +- frame/tips/src/tests.rs | 66 ++++----- frame/transaction-payment/src/lib.rs | 8 +- frame/transaction-storage/src/tests.rs | 12 +- frame/uniques/src/functions.rs | 8 +- primitives/api/test/tests/runtime_calls.rs | 6 +- .../npos-elections/fuzzer/src/reduce.rs | 2 +- primitives/npos-elections/src/mock.rs | 16 +-- primitives/npos-elections/src/pjr.rs | 2 +- primitives/npos-elections/src/tests.rs | 45 ++---- primitives/runtime/src/curve.rs | 2 +- primitives/runtime/src/lib.rs | 2 +- 72 files changed, 344 insertions(+), 512 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index de299a90971e4..5355758f7a4fa 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -12,8 +12,6 @@ rustflags = [ "-Aclippy::if-same-then-else", "-Aclippy::clone-double-ref", "-Dclippy::complexity", - "-Aclippy::clone_on_copy", # Too common - "-Aclippy::needless_lifetimes", # Backward compat? "-Aclippy::zero-prefixed-literal", # 00_1000_000 "-Aclippy::type_complexity", # raison d'etre "-Aclippy::nonminimal-bool", # maybe diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 27e848a281097..468b9c8ac4c1b 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -127,7 +127,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { let block2 = construct_block( &mut t, 2, - block1.1.clone(), + block1.1, vec![ CheckedExtrinsic { signed: None, diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 008ed5f53927b..296d1d8e28798 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -73,7 +73,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { let block2 = construct_block( &mut tt, 2, - block1.1.clone(), + block1.1, vec![ CheckedExtrinsic { signed: None, diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 68c780094208f..eaf872d6f3fec 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -197,12 +197,12 @@ mod multiplier_tests { fn truth_value_update_poc_works() { let fm = Multiplier::saturating_from_rational(1, 2); let test_set = vec![ - (0, fm.clone()), - (100, fm.clone()), - (1000, fm.clone()), - (target(), fm.clone()), - (max_normal() / 2, fm.clone()), - (max_normal(), fm.clone()), + (0, fm), + (100, fm), + (1000, fm), + (target(), fm), + (max_normal() / 2, fm), + (max_normal(), fm), ]; test_set.into_iter().for_each(|(w, fm)| { run_with_system_weight(w, || { diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index a8a7442a8ef9f..9000f62aa6cc3 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -173,7 +173,7 @@ impl Blockchain { { let mut storage = self.storage.write(); - storage.leaves.import(hash, number, header.parent_hash().clone()); + storage.leaves.import(hash, number, *header.parent_hash()); storage.blocks.insert(hash, StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 5a60d3353db52..7b0ee45833e19 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -48,7 +48,7 @@ pub(crate) struct TestApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { RuntimeApi { authorities: self.authorities.clone() }.into() } } @@ -530,7 +530,7 @@ impl DhtValueFoundTester { ) -> Option<&HashSet> { let (_dht_event_tx, dht_event_rx) = channel(1); let local_test_api = - Arc::new(TestApi { authorities: vec![self.remote_authority_public.clone().into()] }); + Arc::new(TestApi { authorities: vec![self.remote_authority_public.into()] }); let local_network: Arc = Arc::new(Default::default()); let local_key_store = KeyStore::new(); @@ -555,8 +555,7 @@ impl DhtValueFoundTester { self.local_worker .as_ref() .map(|w| { - w.addr_cache - .get_addresses_by_authority_id(&self.remote_authority_public.clone().into()) + w.addr_cache.get_addresses_by_authority_id(&self.remote_authority_public.into()) }) .unwrap() } @@ -569,7 +568,7 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { let addresses = (1..100).map(|i| tester.multiaddr_with_peer_id(i)).collect(); let kv_pairs = block_on(build_dht_event::( addresses, - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); @@ -584,7 +583,7 @@ fn strict_accept_address_with_peer_signature() { let addr = tester.multiaddr_with_peer_id(1); let kv_pairs = block_on(build_dht_event( vec![addr.clone()], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &tester.remote_node_key }), )); @@ -604,7 +603,7 @@ fn reject_address_with_rogue_peer_signature() { let rogue_remote_node_key = Keypair::generate_ed25519(); let kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &rogue_remote_node_key }), )); @@ -622,7 +621,7 @@ fn reject_address_with_invalid_peer_signature() { let mut tester = DhtValueFoundTester::new(); let mut kv_pairs = block_on(build_dht_event( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, Some(&TestSigner { keypair: &tester.remote_node_key }), )); @@ -644,7 +643,7 @@ fn reject_address_without_peer_signature() { let mut tester = DhtValueFoundTester::new(); let kv_pairs = block_on(build_dht_event::( vec![tester.multiaddr_with_peer_id(1)], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); @@ -662,7 +661,7 @@ fn do_not_cache_addresses_without_peer_id() { "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse().unwrap(); let kv_pairs = block_on(build_dht_event::( vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], - tester.remote_authority_public.clone().into(), + tester.remote_authority_public.into(), &tester.remote_key_store, None, )); diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index ebd85c8dea05d..762a8f7e5d544 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -109,7 +109,7 @@ where } pub(crate) fn should_self_vote(&self, round: &(P, NumberFor)) -> bool { - Some(round.1.clone()) > self.best_done && + Some(round.1) > self.best_done && self.rounds.get(round).map(|tracker| !tracker.has_self_vote()).unwrap_or(true) } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 134339009302b..f0257d179cb33 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -326,7 +326,7 @@ fn add_auth_change_digest(header: &mut Header, new_auth_set: BeefyValidatorSet) } pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { - keys.iter().map(|key| key.clone().public().into()).collect() + keys.iter().map(|&key| key.public().into()).collect() } pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> SyncCryptoStorePtr { diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 2c4985c0e6966..9f1938fa91c33 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1134,7 +1134,7 @@ pub(crate) mod tests { let mmr_root_hash = H256::random(); header.digest_mut().push(DigestItem::Consensus( BEEFY_ENGINE_ID, - ConsensusLog::::MmrRoot(mmr_root_hash.clone()).encode(), + ConsensusLog::::MmrRoot(mmr_root_hash).encode(), )); // verify validator set is correctly extracted from digest diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index c65092290cf2d..8fea0d7058e50 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -77,7 +77,7 @@ impl ChainInfoCmd { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), - blocks_pruning: config.blocks_pruning.clone(), + blocks_pruning: config.blocks_pruning, }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index ee8be727dcdac..92fe1fa3cf29d 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -821,7 +821,7 @@ mod tests { block_import: client, env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle.clone(), + sync_oracle: DummyOracle, justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), @@ -873,7 +873,7 @@ mod tests { block_import: client.clone(), env: environ, keystore: keystore.into(), - sync_oracle: DummyOracle.clone(), + sync_oracle: DummyOracle, justification_sync_link: (), force_authoring: false, backoff_authoring_blocks: Option::<()>::None, diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 43df26a9a29ae..896bfaeda1dc9 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -310,7 +310,7 @@ mod tests { assert!(claim_slot(10.into(), &epoch, &keystore).is_none()); - epoch.authorities.push((valid_public_key.clone().into(), 10)); + epoch.authorities.push((valid_public_key.into(), 10)); assert_eq!(claim_slot(10.into(), &epoch, &keystore).unwrap().1, valid_public_key.into()); } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index c0a7a8c6c013a..5ecdb42f7f177 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -152,7 +152,7 @@ impl DummyProposer { // that will re-check the randomness logic off-chain. let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: epoch.authorities.clone(), - randomness: epoch.randomness.clone(), + randomness: epoch.randomness, }) .encode(); let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 9fe293142050b..84ccba990e599 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -535,7 +535,7 @@ mod tests { _number: BlockNumber, _success: bool, ) { - self.events.push(Event::JustificationImported(hash.clone())) + self.events.push(Event::JustificationImported(*hash)) } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index fee69613debf0..2e0186495db5e 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -1063,7 +1063,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1080,7 +1080,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1145,7 +1145,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1162,7 +1162,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) @@ -1220,7 +1220,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"1", 1, *b"0", incremented_epoch) @@ -1330,7 +1330,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) @@ -1347,7 +1347,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&epoch_b_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"B", 201, *b"A", incremented_epoch) @@ -1364,7 +1364,7 @@ mod tests { let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() - .increment(next_descriptor.clone()); + .increment(next_descriptor); epoch_changes .import(&is_descendent_of, *b"C", 1, *b"0", incremented_epoch) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index ba63666f3e46c..c5dd169e281f2 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -355,7 +355,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -422,7 +422,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -502,7 +502,7 @@ mod tests { assert_eq!( created_block, CreatedBlock { - hash: created_block.hash.clone(), + hash: created_block.hash, aux: ImportedAux { header_only: false, clear_justification_requests: false, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3214ee8f0d738..6b644ad53d52b 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -3390,7 +3390,7 @@ pub(crate) mod tests { assert!(backend.remove_leaf_block(&best_hash).is_err()); assert!(backend.have_state_at(&prev_hash, 1)); backend.remove_leaf_block(&prev_hash).unwrap(); - assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash.clone())).unwrap()); + assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash)).unwrap()); assert!(!backend.have_state_at(&prev_hash, 1)); } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 8326946999946..d9253fe09eb50 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1223,7 +1223,7 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent.clone()), + Some(root_parent), ); let key = H256::random()[..].to_vec(); @@ -1307,14 +1307,14 @@ mod tests { let mut s = CachingState::new( InMemoryBackend::::default(), shared.clone(), - Some(root_parent.clone()), + Some(root_parent), ); s.cache.sync_cache( &[], &[], vec![(key.clone(), Some(vec![2]))], vec![], - Some(h0.clone()), + Some(h0), Some(0), true, ); diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index a54254810b68b..768a6e36e2390 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -276,12 +276,11 @@ impl<'a> Sandbox for HostContext<'a> { .ok_or("Runtime doesn't have a table; sandbox is unavailable")?; let table_item = table.get(&mut self.caller, dispatch_thunk_id); - table_item + *table_item .ok_or("dispatch_thunk_id is out of bounds")? .funcref() .ok_or("dispatch_thunk_idx should be a funcref")? .ok_or("dispatch_thunk_idx should point to actual func")? - .clone() }; let guest_env = match sandbox::GuestEnvironment::decode(self.sandbox_store(), raw_env_def) { diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 5d272accd3524..feded4008068d 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -107,8 +107,7 @@ impl EntryPoint { ) -> std::result::Result { let entrypoint = func .typed::<(u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for direct entry point")? - .clone(); + .map_err(|_| "Invalid signature for direct entry point")?; Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) } @@ -119,8 +118,7 @@ impl EntryPoint { ) -> std::result::Result { let dispatcher = dispatcher .typed::<(u32, u32, u32), u64, _>(ctx) - .map_err(|_| "Invalid signature for wrapped entry point")? - .clone(); + .map_err(|_| "Invalid signature for wrapped entry point")?; Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) } } @@ -214,9 +212,8 @@ impl InstanceWrapper { Error::from(format!("Exported method {} is not found", method)) })?; let func = extern_func(&export) - .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? - .clone(); - EntryPoint::direct(func, &self.store).map_err(|_| { + .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))?; + EntryPoint::direct(*func, &self.store).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, @@ -231,10 +228,9 @@ impl InstanceWrapper { let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? - .ok_or(Error::FunctionRefIsNull(func_ref))? - .clone(); + .ok_or(Error::FunctionRefIsNull(func_ref))?; - EntryPoint::direct(func, &self.store).map_err(|_| { + EntryPoint::direct(*func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -252,10 +248,9 @@ impl InstanceWrapper { let dispatcher = val .funcref() .ok_or(Error::TableElementIsNotAFunction(dispatcher_ref))? - .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? - .clone(); + .ok_or(Error::FunctionRefIsNull(dispatcher_ref))?; - EntryPoint::wrapped(dispatcher, func, &self.store).map_err(|_| { + EntryPoint::wrapped(*dispatcher, func, &self.store).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -315,9 +310,8 @@ fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result(&'a mut self) -> impl WasmModule + 'a { + fn build(&mut self) -> impl WasmModule + '_ { let blob = { let wasm: Vec; diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 9e9f2fb98b0d1..5f94a4d1b65be 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -1729,7 +1729,7 @@ mod tests { assert!(res.unwrap().is_none()); // connect & disconnect. - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); peers.peer_disconnected(&id); let res = peers.update_peer_state(&id, update.clone()); @@ -1753,7 +1753,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); let mut check_update = move |update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); @@ -1773,7 +1773,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), ObservedRole::Authority); + peers.new_peer(id, ObservedRole::Authority); peers .update_peer_state( @@ -1964,7 +1964,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let mut inner = val.inner.write(); - inner.peers.new_peer(peer.clone(), ObservedRole::Authority); + inner.peers.new_peer(peer, ObservedRole::Authority); let res = inner.handle_catch_up_request( &peer, @@ -2005,7 +2005,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); let send_request = |set_id, round| { let mut inner = val.inner.write(); @@ -2060,7 +2060,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); let import_neighbor_message = |set_id, round| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2130,7 +2130,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer, ObservedRole::Authority); // importing a neighbor message from a peer in the same set in a later // round should lead to a catch up request but since they're disabled @@ -2158,11 +2158,8 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer_authority.clone(), ObservedRole::Authority); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(peer_authority, ObservedRole::Authority); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2211,7 +2208,7 @@ mod tests { // add the peer making the requests to the validator, otherwise it is // discarded. let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(peer_full, ObservedRole::Full); let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, @@ -2271,12 +2268,9 @@ mod tests { full_nodes.resize_with(30, || PeerId::random()); for i in 0..30 { - val.inner - .write() - .peers - .new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(authorities[i], ObservedRole::Authority); - val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(full_nodes[i], ObservedRole::Full); } let test = |rounds_elapsed, peers| { @@ -2355,7 +2349,7 @@ mod tests { // add a new light client as peer let light_peer = PeerId::random(); - val.inner.write().peers.new_peer(light_peer.clone(), ObservedRole::Light); + val.inner.write().peers.new_peer(light_peer, ObservedRole::Light); assert!(!val.message_allowed()( &light_peer, @@ -2427,7 +2421,7 @@ mod tests { // add a new peer at set id 1 let peer1 = PeerId::random(); - val.inner.write().peers.new_peer(peer1.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer1, ObservedRole::Authority); val.inner .write() @@ -2440,7 +2434,7 @@ mod tests { // peer2 will default to set id 0 let peer2 = PeerId::random(); - val.inner.write().peers.new_peer(peer2.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer2, ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 59935cef6a095..5b2436b23351e 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -177,7 +177,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { fn send_message(&mut self, who: &PeerId, data: Vec) { ::write_notification( self, - who.clone(), + *who, grandpa_protocol_name::NAME.into(), data, ); @@ -280,7 +280,7 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } struct NoopContext; @@ -305,8 +305,7 @@ fn good_commit_leads_to_relay() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = - finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = finality_grandpa::Precommit { target_hash, target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -362,19 +361,19 @@ fn good_commit_leads_to_relay() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id.clone(); + let sender_id = id; let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { // Add the sending peer and send the commit let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), + remote: sender_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), + remote: sender_id, messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), @@ -384,7 +383,7 @@ fn good_commit_leads_to_relay() { // Add a random peer which will be the recipient of this message let receiver_id = PeerId::random(); let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id.clone(), + remote: receiver_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, @@ -456,8 +455,7 @@ fn bad_commit_leads_to_report() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = - finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = finality_grandpa::Precommit { target_hash, target_number }; let payload = sp_finality_grandpa::localized_payload( round, set_id, @@ -513,17 +511,17 @@ fn bad_commit_leads_to_report() { // asking for global communication will cause the test network // to send us an event asking us for a stream. use it to // send a message. - let sender_id = id.clone(); + let sender_id = id; let send_message = tester.filter_network_events(move |event| match event { Event::EventStream(sender) => { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), + remote: sender_id, protocol: grandpa_protocol_name::NAME.into(), negotiated_fallback: None, role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), + remote: sender_id, messages: vec![( grandpa_protocol_name::NAME.into(), commit_to_send.clone().into(), diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 44abb4b95beba..6c3b6aa826ded 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -74,7 +74,7 @@ impl GrandpaJustification { .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| (precommit.target_hash.clone(), precommit.target_number)) + .map(|precommit| (precommit.target_hash, precommit.target_number)) { None => return error(), Some(base) => base, @@ -176,7 +176,7 @@ impl GrandpaJustification { .iter() .map(|signed| &signed.precommit) .min_by_key(|precommit| precommit.target_number) - .map(|precommit| precommit.target_hash.clone()) + .map(|precommit| precommit.target_hash) .expect( "can only fail if precommits is empty; \ commit has been validated above; \ diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 623ac577c5579..3dd21d51b6a2d 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -170,7 +170,7 @@ pub(crate) struct RuntimeApi { impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + fn runtime_api(&self) -> ApiRef<'_, Self::Api> { RuntimeApi { inner: self.clone() }.into() } } @@ -210,7 +210,7 @@ impl GenesisAuthoritySetProvider for TestApi { const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() + keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { @@ -533,7 +533,7 @@ fn transition_3_voters_twice_1_full_observer() { { let net = net.clone(); let client = net.lock().peers[0].client().clone(); - let peers_c = peers_c.clone(); + let peers_c = *peers_c; // wait for blocks to be finalized before generating new ones let block_production = client diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 6adce0d920209..fe7caf74422db 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -587,7 +587,7 @@ mod tests { fn import_header(&self, header: Header) { let hash = header.hash(); - let number = header.number().clone(); + let number = *header.number(); self.known_blocks.lock().insert(hash, number); self.sender @@ -608,7 +608,7 @@ mod tests { impl BlockStatusT for TestBlockStatus { fn block_number(&self, hash: Hash) -> Result, Error> { - Ok(self.inner.lock().get(&hash).map(|x| x.clone())) + Ok(self.inner.lock().get(&hash).map(|x| *x)) } } diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 051c7f2c03658..fb7754fc0169a 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -419,7 +419,7 @@ mod tests { } let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); - let best_number = best.number().clone(); + let best_number = *best.number(); for i in 0u32..5 { let base = client.header(&BlockId::Number(i.into())).unwrap().unwrap(); diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 8a6c3358e4409..b5c957024701d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -520,7 +520,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer.clone(), + remote: remote_peer, protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -532,7 +532,7 @@ mod tests { .iter() .cloned() .map(|m| Event::NotificationsReceived { - remote: remote_peer.clone(), + remote: remote_peer, messages: vec![(protocol.clone(), m.into())], }) .collect::>(); @@ -562,10 +562,7 @@ mod tests { for subscriber in subscribers.iter_mut() { assert_eq!( subscriber.next(), - Some(TopicNotification { - message: message.clone(), - sender: Some(remote_peer.clone()), - }), + Some(TopicNotification { message: message.clone(), sender: Some(remote_peer) }), ); } } @@ -661,7 +658,7 @@ mod tests { // Create channels. let (txs, mut rxs) = channels .iter() - .map(|ChannelLengthAndTopic { length, topic }| (topic.clone(), channel(*length))) + .map(|ChannelLengthAndTopic { length, topic }| (*topic, channel(*length))) .fold((vec![], vec![]), |mut acc, (topic, (tx, rx))| { acc.0.push((topic, tx)); acc.1.push((topic, rx)); @@ -683,7 +680,7 @@ mod tests { // Register the remote peer. event_sender .start_send(Event::NotificationStreamOpened { - remote: remote_peer.clone(), + remote: remote_peer, protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, @@ -710,10 +707,7 @@ mod tests { .collect(); event_sender - .start_send(Event::NotificationsReceived { - remote: remote_peer.clone(), - messages, - }) + .start_send(Event::NotificationsReceived { remote: remote_peer, messages }) .expect("Event stream is unbounded; qed."); } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 4cc4e25529af4..3cb8d5bd9bab9 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -810,7 +810,7 @@ mod tests { .on_incoming( &mut network, // Unregistered peer. - remote.clone(), + remote, vec![vec![1, 2, 3]], ); diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index f3d1588c0280e..ab93662968dc2 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -1069,7 +1069,7 @@ mod tests { // Skip the first swarm as all other swarms already know it. .skip(1) .filter(|p| *p != n) - .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .map(|p| *Swarm::local_peer_id(&swarms[p].0)) .collect::>() }) .collect::>(); diff --git a/client/network/sync/src/blocks.rs b/client/network/sync/src/blocks.rs index 5fb1484675071..b8acd61a2009f 100644 --- a/client/network/sync/src/blocks.rs +++ b/client/network/sync/src/blocks.rs @@ -290,51 +290,51 @@ mod test { let peer2 = PeerId::random(); let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 0, 1, 200), Some(81..121)); bc.clear_peer_download(&peer1); - bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); + bc.insert(41, blocks[41..81].to_vec(), peer1); assert_eq!(bc.ready_blocks(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); + assert_eq!(bc.needed_blocks(peer1, 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); - bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); + bc.insert(1, blocks[1..11].to_vec(), peer0); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); + assert_eq!(bc.needed_blocks(peer0, 40, 150, 0, 1, 200), Some(11..41)); assert_eq!( bc.ready_blocks(1), blocks[1..11] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) .collect::>() ); bc.clear_peer_download(&peer0); - bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); + bc.insert(11, blocks[11..41].to_vec(), peer0); let ready = bc.ready_blocks(12); assert_eq!( ready[..30], blocks[11..41] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer0) }) .collect::>()[..] ); assert_eq!( ready[30..], blocks[41..81] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) .collect::>()[..] ); bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121)); + assert_eq!(bc.needed_blocks(peer2, 40, 150, 80, 1, 200), Some(81..121)); bc.clear_peer_download(&peer2); - bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); + bc.insert(81, blocks[81..121].to_vec(), peer2); bc.clear_peer_download(&peer1); - bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); + bc.insert(121, blocks[121..150].to_vec(), peer1); assert_eq!(bc.ready_blocks(80), vec![]); let ready = bc.ready_blocks(81); @@ -342,14 +342,14 @@ mod test { ready[..40], blocks[81..121] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer2) }) .collect::>()[..] ); assert_eq!( ready[40..], blocks[121..150] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer1) }) .collect::>()[..] ); } @@ -365,10 +365,10 @@ mod test { bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100)); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 000, 1, 200), Some(1..100)); + assert_eq!(bc.needed_blocks(peer0, 128, 10000, 600, 1, 200), None); // too far ahead assert_eq!( - bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), + bc.needed_blocks(peer0, 128, 10000, 600, 1, 200000), Some(100 + 128..100 + 128 + 128) ); } @@ -382,11 +382,11 @@ mod test { let blocks = generate_blocks(10); // count = 5, peer_best = 50, common = 39, max_parallel = 0, max_ahead = 200 - assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); // got a response on the request for `40..45` bc.clear_peer_download(&peer); - bc.insert(40, blocks[..5].to_vec(), peer.clone()); + bc.insert(40, blocks[..5].to_vec(), peer); // our "node" started on a fork, with its current best = 47, which is > common let ready = bc.ready_blocks(48); @@ -394,11 +394,11 @@ mod test { ready, blocks[..5] .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) .collect::>() ); - assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); } #[test] @@ -410,12 +410,12 @@ mod test { let blocks = generate_blocks(10); // Request 2 ranges - assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(40..45)); - assert_eq!(bc.needed_blocks(peer.clone(), 5, 50, 39, 0, 200), Some(45..50)); + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(40..45)); + assert_eq!(bc.needed_blocks(peer, 5, 50, 39, 0, 200), Some(45..50)); // got a response on the request for `40..50` bc.clear_peer_download(&peer); - bc.insert(40, blocks.to_vec(), peer.clone()); + bc.insert(40, blocks.to_vec(), peer); // request any blocks starting from 1000 or lower. let ready = bc.ready_blocks(1000); @@ -423,7 +423,7 @@ mod test { ready, blocks .iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer.clone()) }) + .map(|b| BlockData { block: b.clone(), origin: Some(peer) }) .collect::>() ); diff --git a/client/network/sync/src/extra_requests.rs b/client/network/sync/src/extra_requests.rs index 6206f8a61bcf4..0506bd542ff3b 100644 --- a/client/network/sync/src/extra_requests.rs +++ b/client/network/sync/src/extra_requests.rs @@ -446,16 +446,12 @@ mod tests { PeerSyncState::DownloadingJustification(r.0); } - let active = requests - .active_requests - .iter() - .map(|(p, &r)| (p.clone(), r)) - .collect::>(); + let active = requests.active_requests.iter().map(|(&p, &r)| (p, r)).collect::>(); for (peer, req) in &active { assert!(requests.failed_requests.get(req).is_none()); assert!(!requests.pending_requests.contains(req)); - assert!(requests.on_response::<()>(peer.clone(), None).is_none()); + assert!(requests.on_response::<()>(*peer, None).is_none()); assert!(requests.pending_requests.contains(req)); assert_eq!( 1, diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 2f837cd6c4f51..aae5f4de353fe 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -2614,15 +2614,15 @@ mod test { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1.clone(), Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2.clone(), Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); + sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); // we wil send block requests to these peers // for these blocks we don't know about assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3.clone(), b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -2673,7 +2673,7 @@ mod test { data: Some(Vec::new()), }; - sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true); + sync.push_block_announce_validation(*peer_id, header.hash(), block_annnounce, true); // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { @@ -2790,8 +2790,8 @@ mod test { let block3_fork = build_block_at(block2.hash(), false); // Add two peers which are on block 1. - sync.new_peer(peer_id1.clone(), block1.hash(), 1).unwrap(); - sync.new_peer(peer_id2.clone(), block1.hash(), 1).unwrap(); + sync.new_peer(peer_id1, block1.hash(), 1).unwrap(); + sync.new_peer(peer_id2, block1.hash(), 1).unwrap(); // Tell sync that our best block is 3. sync.update_chain_info(&block3.hash(), 3); @@ -2885,9 +2885,9 @@ mod test { let best_block = blocks.last().unwrap().clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) .unwrap(); - sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); + sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -2922,9 +2922,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - b.header().number().clone(), + *b.header().number(), Default::default(), - Some(peer_id1.clone()), + Some(peer_id1), )), b.hash(), ) @@ -3034,7 +3034,7 @@ mod test { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); @@ -3059,7 +3059,7 @@ mod test { } // Now request and import the fork. - let mut best_block_num = finalized_block.header().number().clone() as u32; + let mut best_block_num = *finalized_block.header().number() as u32; while best_block_num < *fork_blocks.last().unwrap().header().number() as u32 - 1 { let request = get_block_request( &mut sync, @@ -3092,9 +3092,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - b.header().number().clone(), + *b.header().number(), Default::default(), - Some(peer_id1.clone()), + Some(peer_id1), )), b.hash(), ) @@ -3165,7 +3165,7 @@ mod test { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); @@ -3190,7 +3190,7 @@ mod test { } // Now request and import the fork. - let mut best_block_num = finalized_block.header().number().clone() as u32; + let mut best_block_num = *finalized_block.header().number() as u32; let mut request = get_block_request( &mut sync, FromBlock::Number(MAX_BLOCKS_TO_REQUEST as u64 + best_block_num as u64), @@ -3231,9 +3231,9 @@ mod test { .map(|b| { ( Ok(BlockImportStatus::ImportedUnknown( - b.header().number().clone(), + *b.header().number(), Default::default(), - Some(peer_id1.clone()), + Some(peer_id1), )), b.hash(), ) @@ -3288,7 +3288,7 @@ mod test { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) .unwrap(); // Create a "new" header and announce it @@ -3320,7 +3320,7 @@ mod test { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) .unwrap(); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index f09da200ff587..7d12458511cfd 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -40,7 +40,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root.clone(), + state_root: res.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -54,7 +54,7 @@ async fn should_return_header() { Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: res.state_root.clone(), + state_root: res.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -93,7 +93,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root.clone(), + state_root: res.block.header.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), @@ -110,7 +110,7 @@ async fn should_return_a_block() { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: res.block.header.state_root.clone(), + state_root: res.block.header.state_root, extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" .parse() .unwrap(), diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index f363b621d5bcd..9c4fa84c144c7 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -410,7 +410,7 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)).unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash, None)).unwrap(), ); } @@ -1333,9 +1333,9 @@ fn respects_block_rules() { .block; let params = BlockCheckParams { - hash: block_ok.hash().clone(), + hash: block_ok.hash(), number: 0, - parent_hash: block_ok.header().parent_hash().clone(), + parent_hash: *block_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1349,9 +1349,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), + hash: block_not_ok.hash(), number: 0, - parent_hash: block_not_ok.header().parent_hash().clone(), + parent_hash: *block_not_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1372,15 +1372,15 @@ fn respects_block_rules() { let block_ok = block_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_ok.hash().clone(), + hash: block_ok.hash(), number: 1, - parent_hash: block_ok.header().parent_hash().clone(), + parent_hash: *block_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, }; if record_only { - fork_rules.push((1, block_ok.hash().clone())); + fork_rules.push((1, block_ok.hash())); } assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); @@ -1391,9 +1391,9 @@ fn respects_block_rules() { let block_not_ok = block_not_ok.build().unwrap().block; let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), + hash: block_not_ok.hash(), number: 1, - parent_hash: block_not_ok.header().parent_hash().clone(), + parent_hash: *block_not_ok.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1457,9 +1457,9 @@ fn returns_status_for_pruned_blocks() { let b1 = b1.build().unwrap().block; let check_block_a1 = BlockCheckParams { - hash: a1.hash().clone(), + hash: a1.hash(), number: 0, - parent_hash: a1.header().parent_hash().clone(), + parent_hash: *a1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1494,9 +1494,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { - hash: a2.hash().clone(), + hash: a2.hash(), number: 1, - parent_hash: a1.header().parent_hash().clone(), + parent_hash: *a1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1528,9 +1528,9 @@ fn returns_status_for_pruned_blocks() { block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { - hash: a3.hash().clone(), + hash: a3.hash(), number: 2, - parent_hash: a2.header().parent_hash().clone(), + parent_hash: *a2.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, @@ -1563,9 +1563,9 @@ fn returns_status_for_pruned_blocks() { ); let mut check_block_b1 = BlockCheckParams { - hash: b1.hash().clone(), + hash: b1.hash(), number: 0, - parent_hash: b1.header().parent_hash().clone(), + parent_hash: *b1.header().parent_hash(), allow_missing_state: false, allow_missing_parent: false, import_existing: false, diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index c3577a45faf07..a7991269439ce 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -66,7 +66,7 @@ impl ChainApi for TestApi { uxt: ::Extrinsic, ) -> Self::ValidationFuture { let nonce = uxt.transfer().nonce; - let from = uxt.transfer().from.clone(); + let from = uxt.transfer().from; match self.block_id_to_number(at) { Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), @@ -76,7 +76,7 @@ impl ChainApi for TestApi { ready(Ok(Ok(ValidTransaction { priority: 4, requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce - 1, from.clone())] + vec![to_tag(nonce - 1, from)] } else { vec![] }, diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 4ce7954f8d479..19acbddbe7843 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -641,7 +641,7 @@ mod tests { .unwrap(); // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); + block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1])).unwrap(); // then assert!(pool.validated_pool.is_banned(&hash1)); @@ -793,12 +793,8 @@ mod tests { assert_eq!(pool.validated_pool().status().future, 0); // when - block_on(pool.prune_tags( - &BlockId::Number(2), - vec![vec![0u8]], - vec![watcher.hash().clone()], - )) - .unwrap(); + block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![*watcher.hash()])) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); diff --git a/client/transaction-pool/src/graph/rotator.rs b/client/transaction-pool/src/graph/rotator.rs index c91c8e407bc0f..47e00a1292155 100644 --- a/client/transaction-pool/src/graph/rotator.rs +++ b/client/transaction-pool/src/graph/rotator.rs @@ -120,7 +120,7 @@ mod tests { let tx = Transaction { data: (), bytes: 1, - hash: hash.clone(), + hash, priority: 5, valid_till: 1, requires: vec![], diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index d6ea5ab8c0625..17c2cfa8a1e06 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -387,7 +387,7 @@ fn should_push_watchers_during_maintenance() { let header_hash = header.hash(); block_on(pool.maintain(block_event(header))); - let event = ChainEvent::Finalized { hash: header_hash.clone(), tree_route: Arc::from(vec![]) }; + let event = ChainEvent::Finalized { hash: header_hash, tree_route: Arc::from(vec![]) }; block_on(pool.maintain(event)); // then @@ -398,24 +398,24 @@ fn should_push_watchers_during_maintenance() { futures::executor::block_on_stream(watcher0).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone()) + TransactionStatus::InBlock(header_hash), + TransactionStatus::Finalized(header_hash) ], ); } @@ -573,7 +573,7 @@ fn fork_aware_finalization() { for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h))); assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); assert_eq!(stream.next(), None); } @@ -581,22 +581,22 @@ fn fork_aware_finalization() { { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); assert_eq!(stream.next(), None); } } @@ -646,9 +646,9 @@ fn prune_and_retract_tx_at_same_time() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2))); assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2))); assert_eq!(stream.next(), None); } @@ -781,7 +781,7 @@ fn resubmit_from_retracted_fork() { let e1 = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(d1.clone(), vec![tx4.clone()], true); + let header = pool.api().push_block_with_parent(d1, vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() }; @@ -790,7 +790,7 @@ fn resubmit_from_retracted_fork() { let f1_header = { let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) .expect("1. Imported"); - let header = pool.api().push_block_with_parent(e1.clone(), vec![tx5.clone()], true); + let header = pool.api().push_block_with_parent(e1, vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. assert_eq!(pool.status().ready, 3); diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 85c91b451d351..14265cad5aa76 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -76,7 +76,7 @@ fn vote_works() { Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -85,12 +85,12 @@ fn vote_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 1, no: 0, @@ -114,7 +114,7 @@ fn veto_works() { )); // only set_rule/elevate_ally can be veto assert_noop!( - Alliance::veto(Origin::signed(1), hash.clone()), + Alliance::veto(Origin::signed(1), hash), Error::::NotVetoableProposal ); @@ -131,11 +131,11 @@ fn veto_works() { // only founder have veto rights, 3 is fellow assert_noop!( - Alliance::veto(Origin::signed(3), vetoable_hash.clone()), + Alliance::veto(Origin::signed(3), vetoable_hash), Error::::NotFounder ); - assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash.clone())); + assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( System::events(), @@ -143,17 +143,17 @@ fn veto_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 1, - proposal_hash: vetoable_hash.clone(), + proposal_hash: vetoable_hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Disapproved { - proposal_hash: vetoable_hash.clone() + proposal_hash: vetoable_hash })), ] ); @@ -173,16 +173,10 @@ fn close_works() { Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(1), hash.clone(), 0, true)); - assert_ok!(Alliance::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_ok!(Alliance::vote(Origin::signed(3), hash.clone(), 0, true)); - assert_ok!(Alliance::close( - Origin::signed(1), - hash.clone(), - 0, - proposal_weight, - proposal_len - )); + assert_ok!(Alliance::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Alliance::vote(Origin::signed(3), hash, 0, true)); + assert_ok!(Alliance::close(Origin::signed(1), hash, 0, proposal_weight, proposal_len)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -191,40 +185,40 @@ fn close_works() { record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, - proposal_hash: hash.clone(), + proposal_hash: hash, threshold: 3 })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 1, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 1, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 2, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 2, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { account: 3, - proposal_hash: hash.clone(), + proposal_hash: hash, voted: true, yes: 3, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Closed { - proposal_hash: hash.clone(), + proposal_hash: hash, yes: 3, no: 0, })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Approved { - proposal_hash: hash.clone() + proposal_hash: hash })), record(mock::Event::AllianceMotion(AllianceMotionEvent::Executed { - proposal_hash: hash.clone(), + proposal_hash: hash, result: Err(DispatchError::BadOrigin), })) ] diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 67690e2b28ec1..0fd4dd3281516 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -115,11 +115,11 @@ thread_local! { pub struct TestFreezer; impl FrozenBalance for TestFreezer { fn frozen_balance(asset: u32, who: &u64) -> Option { - FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) + FROZEN.with(|f| f.borrow().get(&(asset, *who)).cloned()) } fn died(asset: u32, who: &u64) { - HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, *who))); // Sanity check: dead accounts have no balance. assert!(Assets::balance(asset, *who).is_zero()); } diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 1ddf3888d3c96..54643001ad6fc 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -333,7 +333,7 @@ pub mod pallet { ); swap.action.cancel(&swap.source); - PendingSwaps::::remove(&target, hashed_proof.clone()); + PendingSwaps::::remove(&target, hashed_proof); Self::deposit_event(Event::SwapCancelled { account: target, proof: hashed_proof }); diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 2352e7852d090..688d3ff17f786 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -102,7 +102,7 @@ fn two_party_successful_swap() { AtomicSwap::create_swap( Origin::signed(A), B, - hashed_proof.clone(), + hashed_proof, BalanceSwapAction::new(50), 1000, ) @@ -117,7 +117,7 @@ fn two_party_successful_swap() { AtomicSwap::create_swap( Origin::signed(B), A, - hashed_proof.clone(), + hashed_proof, BalanceSwapAction::new(75), 1000, ) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 5ca74e681e5dd..3b186236dd0fa 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1416,12 +1416,7 @@ mod tests { loader.counter += 1; loader.map.insert( hash, - MockExecutable { - func: Rc::new(f), - func_type, - code_hash: hash.clone(), - refcount: 1, - }, + MockExecutable { func: Rc::new(f), func_type, code_hash: hash, refcount: 1 }, ); hash }) @@ -2189,7 +2184,6 @@ mod tests { let dummy_ch = MockLoader::insert(Call, |_, _| exec_success()); let instantiated_contract_address = Rc::new(RefCell::new(None::>)); let instantiator_ch = MockLoader::insert(Call, { - let dummy_ch = dummy_ch.clone(); let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. @@ -2252,7 +2246,6 @@ mod tests { fn instantiation_traps() { let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); let instantiator_ch = MockLoader::insert(Call, { - let dummy_ch = dummy_ch.clone(); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 30417d8544489..204908cc4a989 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -139,7 +139,7 @@ impl TestExtension { } fn last_seen_inputs() -> (u32, u32, u32, u32) { - TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs.clone()) + TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs) } } @@ -3478,8 +3478,8 @@ fn set_code_hash() { phase: Phase::Initialization, event: Event::Contracts(crate::Event::ContractCodeUpdated { contract: contract_addr.clone(), - new_code_hash: new_code_hash.clone(), - old_code_hash: code_hash.clone(), + new_code_hash, + old_code_hash: code_hash, }), topics: vec![], }, diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 3dd5da187b258..02a360fe86b45 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -403,7 +403,7 @@ mod tests { salt: &[u8], ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { self.instantiates.push(InstantiateEntry { - code_hash: code_hash.clone(), + code_hash, value, data: data.to_vec(), gas_left: gas_limit, @@ -541,7 +541,7 @@ mod tests { signature: &[u8; 65], message_hash: &[u8; 32], ) -> Result<[u8; 33], ()> { - self.ecdsa_recover.borrow_mut().push((signature.clone(), message_hash.clone())); + self.ecdsa_recover.borrow_mut().push((*signature, *message_hash)); Ok([3; 33]) } fn contract_info(&mut self) -> &mut crate::ContractInfo { diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index 5d4a9f2a7cbfc..6ac34a38723ce 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -30,7 +30,7 @@ fn veto_external_works() { assert!(>::exists()); let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); + assert_ok!(Democracy::veto_external(Origin::signed(3), h)); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. @@ -55,13 +55,10 @@ fn veto_external_works() { assert!(>::exists()); // 3 can't veto the same thing twice. - assert_noop!( - Democracy::veto_external(Origin::signed(3), h.clone()), - Error::::AlreadyVetoed - ); + assert_noop!(Democracy::veto_external(Origin::signed(3), h), Error::::AlreadyVetoed); // 4 vetoes. - assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); + assert_ok!(Democracy::veto_external(Origin::signed(4), h)); // cancelled again. assert!(!>::exists()); diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index db06696ca5c95..65e5466857dc1 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -115,7 +115,7 @@ fn blacklisting_should_work() { assert_ok!(propose_set_balance_and_note(1, 2, 2)); assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_noop!(Democracy::blacklist(Origin::signed(1), hash.clone(), None), BadOrigin); + assert_noop!(Democracy::blacklist(Origin::signed(1), hash, None), BadOrigin); assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); assert_eq!(Democracy::backing_for(0), None); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index ffd1c6ab4de9c..03d41fa2af40f 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1342,9 +1342,7 @@ mod tests { self } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { - MEMBERS.with(|m| { - *m.borrow_mut() = members.iter().map(|(m, _)| m.clone()).collect::>() - }); + MEMBERS.with(|m| *m.borrow_mut() = members.iter().map(|(m, _)| *m).collect::>()); self.genesis_members = members; self } @@ -1359,8 +1357,7 @@ mod tests { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { sp_tracing::try_init_simple(); MEMBERS.with(|m| { - *m.borrow_mut() = - self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() + *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| *m).collect::>() }); let mut ext: sp_io::TestExternalities = GenesisConfig { balances: pallet_balances::GenesisConfig:: { diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index e5bd9fabc629b..703220e64fa8a 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -253,10 +253,9 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { ) .unwrap(); - let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap() - .clone(); + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); @@ -313,10 +312,9 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { ) .unwrap(); - let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) + let public_key = *SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) - .unwrap() - .clone(); + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index d67d06e1bce05..5d37e32e4d8a4 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -221,14 +221,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); @@ -266,31 +259,18 @@ fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -308,14 +288,7 @@ fn timepoint_checking_works() { let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi( - Origin::signed(2), - 2, - vec![1, 3], - Some(now()), - hash.clone(), - 0 - ), + Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, 0), Error::::UnexpectedTimepoint, ); @@ -424,20 +397,13 @@ fn multisig_3_of_3_works() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_eq!(Balances::free_balance(6), 0); @@ -460,33 +426,20 @@ fn cancel_multisig_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); }); } @@ -510,20 +463,14 @@ fn cancel_multisig_with_call_storage_works() { 3, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - now(), - hash.clone() - ),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); assert_eq!(Balances::free_balance(1), 10); }); } @@ -533,14 +480,7 @@ fn cancel_multisig_with_alt_call_storage_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(1), 6); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -770,23 +710,9 @@ fn duplicate_approvals_are_ignored() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 2, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_noop!( - Multisig::approve_as_multi( - Origin::signed(1), - 2, - vec![2, 3], - Some(now()), - hash.clone(), - 0 - ), + Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash, 0), Error::::AlreadyApproved, ); assert_ok!(Multisig::approve_as_multi( @@ -794,18 +720,11 @@ fn duplicate_approvals_are_ignored() { 2, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_noop!( - Multisig::approve_as_multi( - Origin::signed(3), - 2, - vec![1, 2], - Some(now()), - hash.clone(), - 0 - ), + Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash, 0), Error::::AlreadyApproved, ); }); @@ -822,7 +741,7 @@ fn multisig_1_of_3_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone(), 0), + Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash, 0), Error::::MinimumThreshold, ); assert_noop!( @@ -906,20 +825,13 @@ fn multisig_handles_no_preimage_after_all_approve() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), - 3, - vec![2, 3], - None, - hash.clone(), - 0 - )); + assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), - hash.clone(), + hash, 0 )); assert_ok!(Multisig::approve_as_multi( @@ -927,7 +839,7 @@ fn multisig_handles_no_preimage_after_all_approve() { 3, vec![1, 2], Some(now()), - hash.clone(), + hash, 0 )); assert_eq!(Balances::free_balance(6), 0); diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 4c2c902846a85..6cc589ceb08bd 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -224,7 +224,7 @@ frame_benchmarking::benchmarks! { origin_weight ); - let max_additional = scenario.dest_weight.clone() - origin_weight; + let max_additional = scenario.dest_weight - origin_weight; let joiner_free = CurrencyOf::::minimum_balance() + max_additional; let joiner: T::AccountId @@ -243,7 +243,7 @@ frame_benchmarking::benchmarks! { bond_extra_transfer { let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = scenario.dest_weight.clone() - origin_weight; + let extra = scenario.dest_weight - origin_weight; // creator of the src pool will bond-extra, bumping itself to dest bag. @@ -258,7 +258,7 @@ frame_benchmarking::benchmarks! { bond_extra_reward { let origin_weight = min_create_bond::() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; - let extra = (scenario.dest_weight.clone() - origin_weight).max(CurrencyOf::::minimum_balance()); + let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); // transfer exactly `extra` to the depositor of the src pool (1), let reward_account1 = Pools::::create_reward_account(1); @@ -306,7 +306,7 @@ frame_benchmarking::benchmarks! { // significantly higher than the first position in a list (e.g. the first bag threshold). let origin_weight = min_create_bond::() * 200u32.into(); let scenario = ListScenario::::new(origin_weight, false)?; - let amount = origin_weight - scenario.dest_weight.clone(); + let amount = origin_weight - scenario.dest_weight; let scenario = scenario.add_joiner(amount); let member_id = scenario.origin1_member.unwrap().clone(); @@ -316,7 +316,7 @@ frame_benchmarking::benchmarks! { verify { let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag - assert!(bonded_after <= scenario.dest_weight.clone()); + assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( &member_id ) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 09f1746b8e5ba..4a31048fb8d8b 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -491,7 +491,7 @@ impl PoolMember { true } else { removed_points - .try_insert(*e, p.clone()) + .try_insert(*e, *p) .expect("source map is bounded, this is a subset, will be bounded; qed"); false } @@ -1920,10 +1920,7 @@ pub mod pallet { ); ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); - Self::deposit_event(Event::::Created { - depositor: who.clone(), - pool_id: pool_id.clone(), - }); + Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id }); Self::deposit_event(Event::::Bonded { member: who, diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index f2abfe29dfbf7..243e5489b5445 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -244,7 +244,7 @@ pub mod v2 { }, }; - total_value_locked += bonded_pool.points_to_balance(points.clone()); + total_value_locked += bonded_pool.points_to_balance(*points); let portion = Perbill::from_rational(*points, bonded_pool.points); let last_claim = portion * accumulated_reward; diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index a0807f1d3d0b6..72acee1e0347e 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -543,7 +543,7 @@ fn anonymous_works() { let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( ProxyEvent::AnonymousCreated { - anonymous: anon.clone(), + anonymous: anon, who: 1, proxy_type: ProxyType::Any, disambiguation_index: 0, diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index eac006054c1d5..580b64cc64997 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -28,7 +28,7 @@ fn generates_event() { let caller = 1; let data = vec![0u8; 100]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. - assert_ok!(Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),)); + assert_ok!(Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),)); let events = System::events(); // this one we create as we expect it let system_event: ::Event = Event::Stored { @@ -50,7 +50,7 @@ fn does_not_store_empty() { let data = vec![]; System::set_block_number(System::block_number() + 1); //otherwise event won't be registered. assert_noop!( - Remark::::store(RawOrigin::Signed(caller.clone()).into(), data.clone(),), + Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),), Error::::Empty ); assert!(System::events().is_empty()); diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index d2a795cb19fa4..d03e13da4747a 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -47,7 +47,7 @@ fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash.clone()); + let hashed = MaybeHashed::Hash(hash); assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); assert!(Preimage::preimage_requested(&hash)); @@ -67,7 +67,7 @@ fn scheduling_with_preimage_postpones_correctly() { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash.clone()); + let hashed = MaybeHashed::Hash(hash); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); assert!(Preimage::preimage_requested(&hash)); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index d9dc97f9c1127..7911428b3337c 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -826,7 +826,7 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[OffenceDetails { - offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), + offender: (*who, Staking::eras_stakers(active_era(), *who)), reporters: vec![], }], &[Perbill::from_percent(10)], diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index d14d8c4a75f2e..485a9dc3ae66a 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1082,10 +1082,7 @@ fn validator_payment_prefs_work() { // This test will focus on validator payment. ExtBuilder::default().build_and_execute(|| { let commission = Perbill::from_percent(40); - >::insert( - &11, - ValidatorPrefs { commission: commission.clone(), ..Default::default() }, - ); + >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 353bc93e5ab94..9b0172bf97aa2 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1253,7 +1253,7 @@ mod mock { weight_sum += StateTrieMigration::on_initialize(System::block_number()); - root = System::finalize().state_root().clone(); + root = *System::finalize().state_root(); System::on_finalize(System::block_number()); } (root, weight_sum) diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 9536414c54db6..0da94deb7112a 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -101,7 +101,7 @@ pub(crate) fn clear_captured_moment() { } pub(crate) fn get_captured_moment() -> Option { - CAPTURED_MOMENT.with(|x| x.borrow().clone()) + CAPTURED_MOMENT.with(|x| *x.borrow()) } pub(crate) fn new_test_ext() -> TestExternalities { diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 194ecc60d9890..bcaa99285d2e7 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -256,10 +256,10 @@ fn report_awesome_and_tip_works() { ); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips::tip(Origin::signed(9), h, 10), BadOrigin); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); @@ -276,9 +276,9 @@ fn report_awesome_from_beneficiary_and_tip_works() { assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); @@ -300,11 +300,11 @@ fn close_tip_works() { assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); assert_eq!(last_event(), TipEvent::TipClosing { tip_hash: h }); @@ -340,10 +340,10 @@ fn slash_tip_works() { assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); // can't remove from any origin - assert_noop!(Tips::slash_tip(Origin::signed(0), h.clone()), BadOrigin); + assert_noop!(Tips::slash_tip(Origin::signed(0), h), BadOrigin); // can remove from root. - assert_ok!(Tips::slash_tip(Origin::root(), h.clone())); + assert_ok!(Tips::slash_tip(Origin::root(), h)); assert_eq!(last_event(), TipEvent::TipSlashed { tip_hash: h, finder: 0, deposit: 12 }); // tipper slashed @@ -359,11 +359,11 @@ fn retract_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(0), h.clone())); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(Origin::signed(10), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(0), h)); System::set_block_number(2); assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); @@ -371,10 +371,10 @@ fn retract_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(10), h.clone())); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(Origin::signed(0), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(Origin::signed(10), h)); System::set_block_number(2); assert_noop!(Tips::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); }); @@ -386,8 +386,8 @@ fn tip_median_calculation_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000000)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(Origin::signed(12), h, 1000000)); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); @@ -400,13 +400,13 @@ fn tip_changing_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Tips::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Tips::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Tips::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Tips::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Tips::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Tips::tip(Origin::signed(11), h, 10000)); + assert_ok!(Tips::tip(Origin::signed(12), h, 10000)); + assert_ok!(Tips::tip(Origin::signed(13), h, 0)); + assert_ok!(Tips::tip(Origin::signed(14), h, 0)); + assert_ok!(Tips::tip(Origin::signed(12), h, 1000)); + assert_ok!(Tips::tip(Origin::signed(11), h, 100)); + assert_ok!(Tips::tip(Origin::signed(10), h, 10)); System::set_block_number(2); assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); @@ -595,10 +595,10 @@ fn report_awesome_and_tip_works_second_instance() { assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips1::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Tips1::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Tips1::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Tips1::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + assert_ok!(Tips1::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips1::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips1::tip(Origin::signed(12), h, 10)); + assert_noop!(Tips1::tip(Origin::signed(9), h, 10), BadOrigin); System::set_block_number(2); diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index ff65c0d2735fd..707f7d8a22065 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -1042,8 +1042,8 @@ mod tests { &Ok(()) )); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 5 + 10); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 0); FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); @@ -1060,8 +1060,8 @@ mod tests { &Ok(()) )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 10 + 50); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5); }); } diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs index 8825890ae67a2..4f5ce1c4b654d 100644 --- a/frame/transaction-storage/src/tests.rs +++ b/frame/transaction-storage/src/tests.rs @@ -31,11 +31,11 @@ fn discards_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); let proof_provider = || { @@ -74,7 +74,7 @@ fn burns_fee() { Error::::InsufficientFunds, ); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000 as usize] )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); @@ -87,7 +87,7 @@ fn checks_proof() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; MAX_DATA_SIZE as usize] )); run_to_block(10, || None); @@ -119,13 +119,13 @@ fn renews_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), vec![0u8; 2000] )); let info = BlockTransactions::::get().last().unwrap().clone(); run_to_block(6, || None); assert_ok!(TransactionStorage::::renew( - RawOrigin::Signed(caller.clone()).into(), + RawOrigin::Signed(caller).into(), 1, // block 0, // transaction )); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index dcbb1f27973d7..e9169bdd963b4 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -234,15 +234,11 @@ impl, I: 'static> Pallet { ensure!(details.owner == sender, Error::::NoPermission); if let Some(ref price) = price { - ItemPriceOf::::insert( - &collection, - &item, - (price.clone(), whitelisted_buyer.clone()), - ); + ItemPriceOf::::insert(&collection, &item, (price, whitelisted_buyer.clone())); Self::deposit_event(Event::ItemPriceSet { collection, item, - price: price.clone(), + price: *price, whitelisted_buyer, }); } else { diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 968bda6885d0d..2ac88c7e6c04f 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -148,10 +148,8 @@ fn record_proof_works() { .build_with_longest_chain(); let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = futures::executor::block_on(longest_chain.best_chain()) - .unwrap() - .state_root() - .clone(); + let storage_root = + *futures::executor::block_on(longest_chain.best_chain()).unwrap().state_root(); let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode( diff --git a/primitives/npos-elections/fuzzer/src/reduce.rs b/primitives/npos-elections/fuzzer/src/reduce.rs index 605f2d6081a6f..602467a343884 100644 --- a/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/primitives/npos-elections/fuzzer/src/reduce.rs @@ -90,7 +90,7 @@ fn generate_random_phragmen_assignment( let target = targets_to_chose_from.remove(rng.gen_range(0..targets_to_chose_from.len())); if winners.iter().all(|w| *w != target) { - winners.push(target.clone()); + winners.push(target); } (target, rng.gen_range(1 * KSM..100 * KSM)) }) diff --git a/primitives/npos-elections/src/mock.rs b/primitives/npos-elections/src/mock.rs index dd85ce9b6dfae..5a06e3f3c88ca 100644 --- a/primitives/npos-elections/src/mock.rs +++ b/primitives/npos-elections/src/mock.rs @@ -92,7 +92,7 @@ where .into_iter() .enumerate() .map(|(idx, who)| { - c_idx_cache.insert(who.clone(), idx); + c_idx_cache.insert(who, idx); _Candidate { who, ..Default::default() } }) .collect::>>(); @@ -103,7 +103,7 @@ where for v in votes { if let Some(idx) = c_idx_cache.get(&v) { candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push(_Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); + edges.push(_Edge { who: v, candidate_index: *idx, ..Default::default() }); } } _Voter { who, edges, budget: voter_stake, load: 0f64 } @@ -143,21 +143,21 @@ where } } - elected_candidates.push((winner.who.clone(), winner.approval_stake as ExtendedBalance)); + elected_candidates.push((winner.who, winner.approval_stake as ExtendedBalance)); } else { break } } for n in &mut voters { - let mut assignment = (n.who.clone(), vec![]); + let mut assignment = (n.who, vec![]); for e in &mut n.edges { if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { if c != n.who { let ratio = e.load / n.load; - assignment.1.push((e.who.clone(), ratio)); + assignment.1.push((e.who, ratio)); } } } @@ -321,7 +321,7 @@ pub(crate) fn run_and_compare( candidates.clone(), voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -368,7 +368,7 @@ pub(crate) fn build_support_map_float( let mut supports = <_SupportMap>::new(); result.winners.iter().map(|(e, _)| (e, stake_of(e) as f64)).for_each(|(e, s)| { let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(e.clone(), item); + supports.insert(*e, item); }); for (n, assignment) in result.assignments.iter_mut() { @@ -377,7 +377,7 @@ pub(crate) fn build_support_map_float( let other_stake = nominator_stake * *r; if let Some(support) = supports.get_mut(c) { support.total = support.total + other_stake; - support.others.push((n.clone(), other_stake)); + support.others.push((*n, other_stake)); } *r = other_stake; } diff --git a/primitives/npos-elections/src/pjr.rs b/primitives/npos-elections/src/pjr.rs index 914834fbb2aef..fd7c8ef539241 100644 --- a/primitives/npos-elections/src/pjr.rs +++ b/primitives/npos-elections/src/pjr.rs @@ -481,7 +481,7 @@ mod tests { assert_eq!( candidates .iter() - .map(|c| (c.borrow().who.clone(), c.borrow().elected, c.borrow().backed_stake)) + .map(|c| (c.borrow().who, c.borrow().elected, c.borrow().backed_stake)) .collect::>(), vec![(10, false, 0), (20, true, 15), (30, false, 0), (40, true, 15)], ); diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 5b88889201b31..6f2e4fca77115 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -233,7 +233,7 @@ fn phragmen_poc_works() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -289,7 +289,7 @@ fn phragmen_poc_works_with_balancing() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), Some(config), ) @@ -376,7 +376,7 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { candidates.clone(), auto_generate_self_voters(&candidates) .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -407,7 +407,7 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -439,7 +439,7 @@ fn phragmen_accuracy_on_small_scale_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -469,7 +469,7 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -505,7 +505,7 @@ fn phragmen_large_scale_test() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -532,7 +532,7 @@ fn phragmen_large_scale_test_2() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -601,7 +601,7 @@ fn elect_has_no_entry_barrier() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -622,7 +622,7 @@ fn phragmen_self_votes_should_be_kept() { candidates, voters .iter() - .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .map(|(ref v, ref vs)| (*v, stake_of(v), vs.clone())) .collect::>(), None, ) @@ -872,30 +872,15 @@ mod score { let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(1u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(1u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(2u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(2u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(3u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(3u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(4u32, 10_000),), - true, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(4u32, 10_000),), true,); - assert_eq!( - is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(5u32, 10_000),), - false, - ); + assert_eq!(is_score_better(claim, initial, Perbill::from_rational(5u32, 10_000),), false,); } #[test] diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index 99733dbbe9a55..c040b7cf517e0 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -170,7 +170,7 @@ fn test_calculate_for_fraction_times_denominator() { }; pub fn formal_calculate_for_fraction_times_denominator(n: u64, d: u64) -> u64 { - if n <= Perbill::from_parts(0_500_000_000) * d.clone() { + if n <= Perbill::from_parts(0_500_000_000) * d { n + d / 2 } else { (d as u128 * 2 - n as u128 * 2).try_into().unwrap() diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index bf77c08b76906..cd43038522914 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -1118,7 +1118,7 @@ mod tests { ext.insert(b"c".to_vec(), vec![3u8; 33]); ext.insert(b"d".to_vec(), vec![4u8; 33]); - let pre_root = ext.backend.root().clone(); + let pre_root = *ext.backend.root(); let (_, proof) = ext.execute_and_prove(|| { sp_io::storage::get(b"a"); sp_io::storage::get(b"b"); From e7ae42bcc1e5bfac971d7bfd73307cb738623e85 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 16 Aug 2022 00:31:28 +0200 Subject: [PATCH 040/348] Always wipe DB state after benchmark (#12025) * Add test Signed-off-by: Oliver Tale-Yazdi * Always wipe the DB state Signed-off-by: Oliver Tale-Yazdi * Silence Clippy Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + frame/benchmarking/Cargo.toml | 2 ++ frame/benchmarking/src/lib.rs | 18 +++++++------- frame/benchmarking/src/tests.rs | 44 +++++++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da047d4cba6d8..6a114065ee56c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2102,6 +2102,7 @@ dependencies = [ "serde", "sp-api", "sp-application-crypto", + "sp-core", "sp-io", "sp-keystore", "sp-runtime", diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 4205274b5dbc3..d3a2704724f12 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -23,6 +23,7 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../../primitives/application-crypto" } +sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../../primitives/runtime-interface" } @@ -45,6 +46,7 @@ std = [ "serde", "sp-api/std", "sp-application-crypto/std", + "sp-core/std", "sp-io/std", "sp-runtime-interface/std", "sp-runtime/std", diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index a1399dc388b09..926d7985e8ba7 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -38,6 +38,8 @@ pub use log; #[doc(hidden)] pub use paste; #[doc(hidden)] +pub use sp_core::defer; +#[doc(hidden)] pub use sp_io::storage::root as storage_root; #[doc(hidden)] pub use sp_runtime::traits::Zero; @@ -1033,6 +1035,9 @@ macro_rules! impl_benchmark { // Always do at least one internal repeat... for _ in 0 .. internal_repeats.max(1) { + // Always reset the state after the benchmark. + $crate::defer!($crate::benchmarking::wipe_db()); + // Set up the externalities environment for the setup we want to // benchmark. let closure_to_benchmark = < @@ -1110,9 +1115,6 @@ macro_rules! impl_benchmark { proof_size: diff_pov, keys: read_and_written_keys, }); - - // Wipe the DB back to the genesis state. - $crate::benchmarking::wipe_db(); } return Ok(results); @@ -1175,6 +1177,9 @@ macro_rules! impl_benchmark_test { let execute_benchmark = | c: $crate::Vec<($crate::BenchmarkParameter, u32)> | -> Result<(), $crate::BenchmarkError> { + // Always reset the state after the benchmark. + $crate::defer!($crate::benchmarking::wipe_db()); + // Set up the benchmark, return execution + verification function. let closure_to_verify = < SelectedBenchmark as $crate::BenchmarkingSetup @@ -1186,12 +1191,7 @@ macro_rules! impl_benchmark_test { } // Run execution + verification - closure_to_verify()?; - - // Reset the state - $crate::benchmarking::wipe_db(); - - Ok(()) + closure_to_verify() }; if components.is_empty() { diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 06f2b5bdc4916..9a97250aeb965 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -125,6 +125,8 @@ fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig::default().build_storage().unwrap().into() } +// NOTE: This attribute is only needed for the `modify_in_` functions. +#[allow(unreachable_code)] mod benchmarks { use super::{new_test_ext, pallet_test::Value, Test}; use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; @@ -227,6 +229,24 @@ mod benchmarks { // This should never be reached. assert!(value > 100); } + + modify_in_setup_then_error { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + }: { } + + modify_in_call_then_error { + }: { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + } + + modify_in_verify_then_error { + }: { + } verify { + Value::::set(Some(123)); + return Err(BenchmarkError::Stop("Should error")); + } } #[test] @@ -350,4 +370,28 @@ mod benchmarks { assert_eq!(Pallet::::test_benchmark_skip_benchmark(), Err(BenchmarkError::Skip),); }); } + + /// An error return of a benchmark test function still causes the db to be wiped. + #[test] + fn benchmark_error_wipes_storage() { + new_test_ext().execute_with(|| { + // It resets when the error happens in the setup: + assert_err!( + Pallet::::test_benchmark_modify_in_setup_then_error(), + "Should error" + ); + assert_eq!(Value::::get(), None); + + // It resets when the error happens in the call: + assert_err!(Pallet::::test_benchmark_modify_in_call_then_error(), "Should error"); + assert_eq!(Value::::get(), None); + + // It resets when the error happens in the verify: + assert_err!( + Pallet::::test_benchmark_modify_in_verify_then_error(), + "Should error" + ); + assert_eq!(Value::::get(), None); + }); + } } From 97ae6be11b0132224a05634c508417f048894670 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 16 Aug 2022 11:36:34 +0100 Subject: [PATCH 041/348] Fix Origins Used in Runtime Benchmarks (#12037) * fix identity benchmark origin * fix benchmarking macro * bounties * democracy * scheduler * tips and treasury * unused * dunno, this is causing issues --- frame/benchmarking/src/lib.rs | 14 ++++++------ frame/bounties/src/benchmarking.rs | 33 ++++++++++++++++------------- frame/democracy/src/benchmarking.rs | 4 +++- frame/identity/src/benchmarking.rs | 26 ++++++++++++++++------- frame/scheduler/src/benchmarking.rs | 15 ++++++++----- frame/staking/Cargo.toml | 2 +- frame/tips/src/benchmarking.rs | 3 ++- frame/treasury/src/benchmarking.rs | 9 +++++--- 8 files changed, 65 insertions(+), 41 deletions(-) diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 926d7985e8ba7..0a0f8eecdcc3d 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -547,7 +547,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: _ $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -557,7 +557,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: _ ( $origin $( , $arg )* ) + $name { $( $code )* }: _ $(<$origin_type>)? ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -570,7 +570,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) $( $rest:tt )* ) => { $crate::benchmarks_iter! { @@ -580,7 +580,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) + $name { $( $code )* }: $dispatch $(<$origin_type>)? ( $origin $( , $arg )* ) verify { } $( $rest )* } @@ -593,7 +593,7 @@ macro_rules! benchmarks_iter { ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $eval:block + $name:ident { $( $code:tt )* }: $(<$origin_type:ty>)? $eval:block $( $rest:tt )* ) => { $crate::benchmarks_iter!( @@ -603,7 +603,7 @@ macro_rules! benchmarks_iter { ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $eval + $name { $( $code )* }: $(<$origin_type>)? $eval verify { } $( $rest )* ); @@ -617,7 +617,7 @@ macro_rules! to_origin { $origin.into() }; ($origin:expr, $origin_type:ty) => { - >::from($origin) + <::Origin as From<$origin_type>>::from($origin) }; } diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 7566c32f6e9a1..bc559e1e8b358 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -37,7 +37,8 @@ fn create_approved_bounties, I: 'static>(n: u32) -> Result<(), &'st setup_bounty::(i, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin, bounty_id)?; } ensure!(BountyApprovals::::get().len() == n as usize, "Not all bounty approved"); Ok(()) @@ -67,14 +68,10 @@ fn create_bounty, I: 'static>( let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator( - RawOrigin::Root.into(), - bounty_id, - curator_lookup.clone(), - fee, - )?; + Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup.clone(), fee)?; Bounties::::accept_curator(RawOrigin::Signed(curator).into(), bounty_id)?; Ok((curator_lookup, bounty_id)) } @@ -100,7 +97,8 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - }: _(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, bounty_id) propose_curator { setup_pot_account::(); @@ -108,9 +106,11 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin, bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - }: _(RawOrigin::Root, bounty_id, curator_lookup, fee) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { @@ -128,9 +128,10 @@ benchmarks_instance_pallet! { let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - Bounties::::approve_bounty(RawOrigin::Root.into(), bounty_id)?; + let approve_origin = T::ApproveOrigin::successful_origin(); + Bounties::::approve_bounty(approve_origin.clone(), bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); - Bounties::::propose_curator(RawOrigin::Root.into(), bounty_id, curator_lookup, fee)?; + Bounties::::propose_curator(approve_origin, bounty_id, curator_lookup, fee)?; }: _(RawOrigin::Signed(curator), bounty_id) award_bounty { @@ -169,14 +170,16 @@ benchmarks_instance_pallet! { let (caller, curator, fee, value, reason) = setup_bounty::(0, 0); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: close_bounty(approve_origin, bounty_id) close_bounty_active { setup_pot_account::(); let (curator_lookup, bounty_id) = create_bounty::()?; Treasury::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::::get() - 1; - }: close_bounty(RawOrigin::Root, bounty_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: close_bounty(approve_origin, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index c51fde8a3de8b..0544ee1731484 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -318,7 +318,9 @@ benchmarks! { for i in 0 .. p { add_proposal::(i)?; } - }: _(RawOrigin::Root, 0) + + let cancel_origin = T::CancelProposalOrigin::successful_origin(); + }: _(cancel_origin, 0) cancel_referendum { let referendum_index = add_referendum::(0)?; diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index b225db4edfa91..4891302695186 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -23,7 +23,10 @@ use super::*; use crate::Pallet as Identity; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; -use frame_support::{ensure, traits::Get}; +use frame_support::{ + ensure, + traits::{EnsureOrigin, Get}, +}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -38,7 +41,8 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); - Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, registrar.clone())?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; let fields = IdentityFields( @@ -114,7 +118,8 @@ benchmarks! { add_registrar { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); - }: _(RawOrigin::Root, account("registrar", r + 1, SEED)) + let origin = T::RegistrarOrigin::successful_origin(); + }: _(origin, account("registrar", r + 1, SEED)) verify { ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); } @@ -259,7 +264,8 @@ benchmarks! { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller.clone())?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); }: _(RawOrigin::Signed(caller), r, 100u32.into()) @@ -274,7 +280,8 @@ benchmarks! { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller.clone())?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) @@ -289,7 +296,8 @@ benchmarks! { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller.clone())?; let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter @@ -318,7 +326,8 @@ benchmarks! { Identity::::set_identity(user_origin.clone(), Box::new(info))?; }; - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let registrar_origin = T::RegistrarOrigin::successful_origin(); + Identity::::add_registrar(registrar_origin, caller.clone())?; Identity::::request_judgement(user_origin, r, 10u32.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { @@ -350,7 +359,8 @@ benchmarks! { )?; } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); - }: _(RawOrigin::Root, target_lookup) + let origin = T::ForceOrigin::successful_origin(); + }: _(origin, target_lookup) verify { ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); } diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 9c97bc5973384..a8e36b72814b7 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -23,7 +23,6 @@ use frame_support::{ ensure, traits::{OnInitialize, PreimageProvider, PreimageRecipient}, }; -use frame_system::RawOrigin; use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; @@ -32,6 +31,8 @@ use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; +type SystemOrigin = ::Origin; + /// Add `n` named items to the schedule. /// /// For `resolved`: @@ -210,7 +211,8 @@ benchmarks! { let call = Box::new(CallOrHashOf::::Value(inner_call)); fill_schedule::(when, s, true, true, Some(false))?; - }: _(RawOrigin::Root, when, periodic, priority, call) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -224,7 +226,8 @@ benchmarks! { fill_schedule::(when, s, true, true, Some(false))?; assert_eq!(Agenda::::get(when).len(), s as usize); - }: _(RawOrigin::Root, when, 0) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, when, 0) verify { ensure!( Lookup::::get(0.encode()).is_none(), @@ -248,7 +251,8 @@ benchmarks! { let call = Box::new(CallOrHashOf::::Value(inner_call)); fill_schedule::(when, s, true, true, Some(false))?; - }: _(RawOrigin::Root, id, when, periodic, priority, call) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, id, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -261,7 +265,8 @@ benchmarks! { let when = BLOCK_NUMBER.into(); fill_schedule::(when, s, true, true, Some(false))?; - }: _(RawOrigin::Root, 0.encode()) + let schedule_origin = T::ScheduleOrigin::successful_origin(); + }: _>(schedule_origin, 0.encode()) verify { ensure!( Lookup::::get(0.encode()).is_none(), diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 0950478fba089..37d13a54ba5ed 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -71,6 +71,6 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-election-provider-support/runtime-benchmarks", "rand_chacha", - "sp-staking/runtime-benchmarks" + "sp-staking/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 33e455bd3b9fd..4c14cb4de94e1 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -190,7 +190,8 @@ benchmarks_instance_pallet! { let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); - }: _(RawOrigin::Root, hash) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, hash) impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index ddb952383370d..ed30b2f7358e5 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -99,7 +99,8 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - }: _(RawOrigin::Root, proposal_id) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, proposal_id) approve_proposal { let p in 0 .. T::MaxApprovals::get() - 1; @@ -111,7 +112,8 @@ benchmarks_instance_pallet! { beneficiary_lookup )?; let proposal_id = Treasury::::proposal_count() - 1; - }: _(RawOrigin::Root, proposal_id) + let approve_origin = T::ApproveOrigin::successful_origin(); + }: _(approve_origin, proposal_id) remove_approval { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); @@ -122,7 +124,8 @@ benchmarks_instance_pallet! { )?; let proposal_id = Treasury::::proposal_count() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; - }: _(RawOrigin::Root, proposal_id) + let reject_origin = T::RejectOrigin::successful_origin(); + }: _(reject_origin, proposal_id) on_initialize_proposals { let p in 0 .. T::MaxApprovals::get(); From 0fd19982a6a94288c81e31bb6f75f499d4d71b46 Mon Sep 17 00:00:00 2001 From: Aramik Date: Wed, 17 Aug 2022 05:35:56 -0700 Subject: [PATCH 042/348] benchmark-cli: add child tree support (#12021) * benchmark-cli: add child tree support * removed extra comments * addressed pr comments * clean up * addressed pr comments --- bin/node/cli/tests/benchmark_storage_works.rs | 1 + .../frame/benchmarking-cli/src/storage/cmd.rs | 18 +- .../benchmarking-cli/src/storage/read.rs | 41 +++- .../benchmarking-cli/src/storage/write.rs | 189 ++++++++++++++---- 4 files changed, 201 insertions(+), 48 deletions(-) diff --git a/bin/node/cli/tests/benchmark_storage_works.rs b/bin/node/cli/tests/benchmark_storage_works.rs index 30f860e48459f..82d1c943ae7aa 100644 --- a/bin/node/cli/tests/benchmark_storage_works.rs +++ b/bin/node/cli/tests/benchmark_storage_works.rs @@ -47,6 +47,7 @@ fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { .args(["--state-version", "1"]) .args(["--warmups", "0"]) .args(["--add", "100", "--mul", "1.2", "--metric", "p75"]) + .arg("--include-child-trees") .status() .unwrap() } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index b8264dc009232..0a92636b151a2 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -24,7 +24,7 @@ use sp_core::storage::StorageKey; use sp_database::{ColumnId, Database}; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_state_machine::Storage; -use sp_storage::StateVersion; +use sp_storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; use clap::{Args, Parser}; use log::info; @@ -99,6 +99,10 @@ pub struct StorageParams { /// State cache size. #[clap(long, default_value = "0")] pub state_cache_size: usize, + + /// Include child trees in benchmark. + #[clap(long)] + pub include_child_trees: bool, } impl StorageCmd { @@ -155,6 +159,16 @@ impl StorageCmd { } } + /// Returns Some if child node and None if regular + pub(crate) fn is_child_key(&self, key: Vec) -> Option { + if let Some((ChildType::ParentKeyId, storage_key)) = + ChildType::from_prefixed_key(&PrefixedStorageKey::new(key)) + { + return Some(ChildInfo::new_default(storage_key)) + } + None + } + /// Run some rounds of the (read) benchmark as warmup. /// See `frame_benchmarking_cli::storage::read::bench_read` for detailed comments. fn bench_warmup(&self, client: &Arc) -> Result<()> @@ -171,7 +185,7 @@ impl StorageCmd { for i in 0..self.params.warmups { info!("Warmup round {}/{}", i + 1, self.params.warmups); - for key in keys.clone() { + for key in keys.as_slice() { let _ = client .storage(&block, &key) .expect("Checked above to exist") diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index c1dc6daba0953..cba318f87ea98 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -50,16 +50,43 @@ impl StorageCmd { let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); + let mut child_nodes = Vec::new(); // Interesting part here: // Read all the keys in the database and measure the time it takes to access each. info!("Reading {} keys", keys.len()); - for key in keys.clone() { - let start = Instant::now(); - let v = client - .storage(&block, &key) - .expect("Checked above to exist") - .ok_or("Value unexpectedly empty")?; - record.append(v.0.len(), start.elapsed())?; + for key in keys.as_slice() { + match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { + (true, Some(info)) => { + // child tree key + let child_keys = client.child_storage_keys(&block, &info, &empty_prefix)?; + for ck in child_keys { + child_nodes.push((ck.clone(), info.clone())); + } + }, + _ => { + // regular key + let start = Instant::now(); + let v = client + .storage(&block, &key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + record.append(v.0.len(), start.elapsed())?; + }, + } + } + + if self.params.include_child_trees { + child_nodes.shuffle(&mut rng); + + info!("Reading {} child keys", child_nodes.len()); + for (key, info) in child_nodes.as_slice() { + let start = Instant::now(); + let v = client + .child_storage(&block, info, key) + .expect("Checked above to exist") + .ok_or("Value unexpectedly empty")?; + record.append(v.0.len(), start.elapsed())?; + } } Ok(record) } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index ab25109a35d49..0d10ef1b7414d 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -16,7 +16,7 @@ // limitations under the License. use sc_cli::Result; -use sc_client_api::UsageProvider; +use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sc_client_db::{DbHash, DbState}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; @@ -29,7 +29,12 @@ use sp_trie::PrefixedMemoryDB; use log::{info, trace}; use rand::prelude::*; -use std::{fmt::Debug, sync::Arc, time::Instant}; +use sp_storage::{ChildInfo, StateVersion}; +use std::{ + fmt::Debug, + sync::Arc, + time::{Duration, Instant}, +}; use super::cmd::StorageCmd; use crate::shared::{new_rng, BenchRecord}; @@ -37,7 +42,7 @@ use crate::shared::{new_rng, BenchRecord}; impl StorageCmd { /// Benchmarks the time it takes to write a single Storage item. /// Uses the latest state that is available for the given client. - pub(crate) fn bench_write( + pub(crate) fn bench_write( &self, client: Arc, (db, state_col): (Arc>, ColumnId), @@ -46,7 +51,8 @@ impl StorageCmd { where Block: BlockT

+ Debug, H: HeaderT, - C: UsageProvider + HeaderBackend, + BA: ClientBackend, + C: UsageProvider + HeaderBackend + StorageProvider, { // Store the time that it took to write each value. let mut record = BenchRecord::default(); @@ -61,50 +67,96 @@ impl StorageCmd { let mut kvs = trie.pairs(); let (mut rng, _) = new_rng(None); kvs.shuffle(&mut rng); + info!("Writing {} keys", kvs.len()); + + let mut child_nodes = Vec::new(); // Generate all random values first; Make sure there are no collisions with existing // db entries, so we can rollback all additions without corrupting existing entries. - for (k, original_v) in kvs.iter_mut() { - 'retry: loop { - let mut new_v = vec![0; original_v.len()]; - // Create a random value to overwrite with. - // NOTE: We use a possibly higher entropy than the original value, - // could be improved but acts as an over-estimation which is fine for now. - rng.fill_bytes(&mut new_v[..]); - let new_kv = vec![(k.as_ref(), Some(new_v.as_ref()))]; - let (_, mut stx) = trie.storage_root(new_kv.iter().cloned(), self.state_version()); - for (mut k, (_, rc)) in stx.drain().into_iter() { - if rc > 0 { - db.sanitize_key(&mut k); - if db.get(state_col, &k).is_some() { - trace!("Benchmark-store key creation: Key collision detected, retry"); - continue 'retry + for (k, original_v) in kvs { + match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { + (true, Some(info)) => { + let child_keys = + client.child_storage_keys_iter(&block, info.clone(), None, None)?; + for ck in child_keys { + child_nodes.push((ck.clone(), info.clone())); + } + }, + _ => { + // regular key + let mut new_v = vec![0; original_v.len()]; + loop { + // Create a random value to overwrite with. + // NOTE: We use a possibly higher entropy than the original value, + // could be improved but acts as an over-estimation which is fine for now. + rng.fill_bytes(&mut new_v[..]); + if check_new_value::( + db.clone(), + &trie, + &k.to_vec(), + &new_v, + self.state_version(), + state_col, + None, + ) { + break } } - } - *original_v = new_v; - break + + // Write each value in one commit. + let (size, duration) = measure_write::( + db.clone(), + &trie, + k.to_vec(), + new_v.to_vec(), + self.state_version(), + state_col, + None, + )?; + record.append(size, duration)?; + }, } } - info!("Writing {} keys", kvs.len()); - // Write each value in one commit. - for (k, new_v) in kvs.iter() { - // Interesting part here: - let start = Instant::now(); - // Create a TX that will modify the Trie in the DB and - // calculate the root hash of the Trie after the modification. - let replace = vec![(k.as_ref(), Some(new_v.as_ref()))]; - let (_, stx) = trie.storage_root(replace.iter().cloned(), self.state_version()); - // Only the keep the insertions, since we do not want to benchmark pruning. - let tx = convert_tx::(db.clone(), stx.clone(), false, state_col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; - record.append(new_v.len(), start.elapsed())?; - - // Now undo the changes by removing what was added. - let tx = convert_tx::(db.clone(), stx.clone(), true, state_col); - db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + if self.params.include_child_trees { + child_nodes.shuffle(&mut rng); + info!("Writing {} child keys", child_nodes.len()); + + for (key, info) in child_nodes { + if let Some(original_v) = client + .child_storage(&block, &info.clone(), &key) + .expect("Checked above to exist") + { + let mut new_v = vec![0; original_v.0.len()]; + loop { + rng.fill_bytes(&mut new_v[..]); + if check_new_value::( + db.clone(), + &trie, + &key.0, + &new_v, + self.state_version(), + state_col, + Some(&info), + ) { + break + } + } + + let (size, duration) = measure_write::( + db.clone(), + &trie, + key.0, + new_v.to_vec(), + self.state_version(), + state_col, + Some(&info), + )?; + record.append(size, duration)?; + } + } } + Ok(record) } } @@ -134,3 +186,62 @@ fn convert_tx( } ret } + +/// Measures write benchmark +/// if `child_info` exist then it means this is a child tree key +fn measure_write( + db: Arc>, + trie: &DbState, + key: Vec, + new_v: Vec, + version: StateVersion, + col: ColumnId, + child_info: Option<&ChildInfo>, +) -> Result<(usize, Duration)> { + let start = Instant::now(); + // Create a TX that will modify the Trie in the DB and + // calculate the root hash of the Trie after the modification. + let replace = vec![(key.as_ref(), Some(new_v.as_ref()))]; + let stx = match child_info { + Some(info) => trie.child_storage_root(info, replace.iter().cloned(), version).2, + None => trie.storage_root(replace.iter().cloned(), version).1, + }; + // Only the keep the insertions, since we do not want to benchmark pruning. + let tx = convert_tx::(db.clone(), stx.clone(), false, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + let result = (new_v.len(), start.elapsed()); + + // Now undo the changes by removing what was added. + let tx = convert_tx::(db.clone(), stx.clone(), true, col); + db.commit(tx).map_err(|e| format!("Writing to the Database: {}", e))?; + Ok(result) +} + +/// Checks if a new value causes any collision in tree updates +/// returns true if there is no collision +/// if `child_info` exist then it means this is a child tree key +fn check_new_value( + db: Arc>, + trie: &DbState, + key: &Vec, + new_v: &Vec, + version: StateVersion, + col: ColumnId, + child_info: Option<&ChildInfo>, +) -> bool { + let new_kv = vec![(key.as_ref(), Some(new_v.as_ref()))]; + let mut stx = match child_info { + Some(info) => trie.child_storage_root(info, new_kv.iter().cloned(), version).2, + None => trie.storage_root(new_kv.iter().cloned(), version).1, + }; + for (mut k, (_, rc)) in stx.drain().into_iter() { + if rc > 0 { + db.sanitize_key(&mut k); + if db.get(col, &k).is_some() { + trace!("Benchmark-store key creation: Key collision detected, retry"); + return false + } + } + } + true +} From 2cb89be0a20a119e4bc1a7b255a9dd75b1fc8925 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 17 Aug 2022 17:36:18 +0200 Subject: [PATCH 043/348] Fix leaf block removal in the backend (#12005) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix leaf block removal in the backend The fix introduced the new 'removal' method for the backend leaves set and the improvement of the undo features. * Update docs * Apply suggestions from code review Co-authored-by: Bastian Köcher * Fix docs typo * On block block removal the new children list should be persisted. * Align leaves set removal tests to the new interface Co-authored-by: Bastian Köcher --- client/api/src/leaves.rs | 255 ++++++++++++++++++++++++++------------- client/db/src/lib.rs | 92 ++++++++++++-- 2 files changed, 252 insertions(+), 95 deletions(-) diff --git a/client/api/src/leaves.rs b/client/api/src/leaves.rs index 26eda46e6f76f..cdcb80a110b74 100644 --- a/client/api/src/leaves.rs +++ b/client/api/src/leaves.rs @@ -32,32 +32,36 @@ struct LeafSetItem { number: Reverse, } -/// A displaced leaf after import. -#[must_use = "Displaced items from the leaf set must be handled."] -pub struct ImportDisplaced { - new_hash: H, - displaced: LeafSetItem, +/// Inserted and removed leaves after an import action. +pub struct ImportOutcome { + inserted: LeafSetItem, + removed: Option, } -/// Displaced leaves after finalization. -#[must_use = "Displaced items from the leaf set must be handled."] -pub struct FinalizationDisplaced { - leaves: BTreeMap, Vec>, +/// Inserted and removed leaves after a remove action. +pub struct RemoveOutcome { + inserted: Option, + removed: LeafSetItem, } -impl FinalizationDisplaced { +/// Removed leaves after a finalization action. +pub struct FinalizationOutcome { + removed: BTreeMap, Vec>, +} + +impl FinalizationOutcome { /// Merge with another. This should only be used for displaced items that /// are produced within one transaction of each other. pub fn merge(&mut self, mut other: Self) { // this will ignore keys that are in duplicate, however // if these are actually produced correctly via the leaf-set within // one transaction, then there will be no overlap in the keys. - self.leaves.append(&mut other.leaves); + self.removed.append(&mut other.removed); } /// Iterate over all displaced leaves. pub fn leaves(&self) -> impl Iterator { - self.leaves.values().flatten() + self.removed.values().flatten() } } @@ -99,27 +103,52 @@ where } /// Update the leaf list on import. - /// Returns a displaced leaf if there was one. - pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { - // avoid underflow for genesis. - let displaced = if number != N::zero() { - let parent_number = Reverse(number.clone() - N::one()); - let was_displaced = self.remove_leaf(&parent_number, &parent_hash); - - if was_displaced { - Some(ImportDisplaced { - new_hash: hash.clone(), - displaced: LeafSetItem { hash: parent_hash, number: parent_number }, - }) - } else { - None - } + pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> ImportOutcome { + let number = Reverse(number); + + let removed = if number.0 != N::zero() { + let parent_number = Reverse(number.0.clone() - N::one()); + self.remove_leaf(&parent_number, &parent_hash).then(|| parent_hash) } else { None }; - self.insert_leaf(Reverse(number.clone()), hash.clone()); - displaced + self.insert_leaf(number.clone(), hash.clone()); + + ImportOutcome { inserted: LeafSetItem { hash, number }, removed } + } + + /// Update the leaf list on removal. + /// + /// Note that the leaves set structure doesn't have the information to decide if the + /// leaf we're removing is the last children of the parent. Follows that this method requires + /// the caller to check this condition and optionally pass the `parent_hash` if `hash` is + /// its last child. + /// + /// Returns `None` if no modifications are applied. + pub fn remove( + &mut self, + hash: H, + number: N, + parent_hash: Option, + ) -> Option> { + let number = Reverse(number); + + if !self.remove_leaf(&number, &hash) { + return None + } + + let inserted = parent_hash.and_then(|parent_hash| { + if number.0 != N::zero() { + let parent_number = Reverse(number.0.clone() - N::one()); + self.insert_leaf(parent_number, parent_hash.clone()); + Some(parent_hash) + } else { + None + } + }); + + Some(RemoveOutcome { inserted, removed: LeafSetItem { hash, number } }) } /// Note a block height finalized, displacing all leaves with number less than the finalized @@ -129,15 +158,15 @@ where /// same number as the finalized block, but with different hashes, the current behavior /// is simpler and our assumptions about how finalization works means that those leaves /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { + pub fn finalize_height(&mut self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - FinalizationDisplaced { leaves: below_boundary } + FinalizationOutcome { removed: below_boundary } } /// The same as [`Self::finalize_height`], but it only simulates the operation. @@ -145,16 +174,16 @@ where /// This means that no changes are done. /// /// Returns the leaves that would be displaced by finalizing the given block. - pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationDisplaced { + pub fn displaced_by_finalize_height(&self, number: N) -> FinalizationOutcome { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() } + return FinalizationOutcome { removed: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.range(&Reverse(boundary)..); - FinalizationDisplaced { - leaves: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), + FinalizationOutcome { + removed: below_boundary.map(|(k, v)| (k.clone(), v.clone())).collect(), } } @@ -276,16 +305,30 @@ where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { - /// Undo an imported block by providing the displaced leaf. - pub fn undo_import(&mut self, displaced: ImportDisplaced) { - let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); - self.inner.remove_leaf(&new_number, &displaced.new_hash); - self.inner.insert_leaf(displaced.displaced.number, displaced.displaced.hash); + /// Undo an imported block by providing the import operation outcome. + /// No additional operations should be performed between import and undo. + pub fn undo_import(&mut self, outcome: ImportOutcome) { + if let Some(removed_hash) = outcome.removed { + let removed_number = Reverse(outcome.inserted.number.0.clone() - N::one()); + self.inner.insert_leaf(removed_number, removed_hash); + } + self.inner.remove_leaf(&outcome.inserted.number, &outcome.inserted.hash); + } + + /// Undo a removed block by providing the displaced leaf. + /// No additional operations should be performed between remove and undo. + pub fn undo_remove(&mut self, outcome: RemoveOutcome) { + if let Some(inserted_hash) = outcome.inserted { + let inserted_number = Reverse(outcome.removed.number.0.clone() - N::one()); + self.inner.remove_leaf(&inserted_number, &inserted_hash); + } + self.inner.insert_leaf(outcome.removed.number, outcome.removed.hash); } /// Undo a finalization operation by providing the displaced leaves. - pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { - self.inner.storage.append(&mut displaced.leaves); + /// No additional operations should be performed between finalization and undo. + pub fn undo_finalization(&mut self, mut outcome: FinalizationOutcome) { + self.inner.storage.append(&mut outcome.removed); } } @@ -295,7 +338,7 @@ mod tests { use std::sync::Arc; #[test] - fn it_works() { + fn import_works() { let mut set = LeafSet::new(); set.import(0u32, 0u32, 0u32); @@ -317,6 +360,90 @@ mod tests { assert!(set.contains(3, 3_1)); assert!(set.contains(2, 2_2)); assert!(set.contains(2, 2_3)); + + // Finally test the undo feature + + let outcome = set.import(2_4, 2, 1_1); + assert_eq!(outcome.inserted.hash, 2_4); + assert_eq!(outcome.removed, None); + assert_eq!(set.count(), 4); + assert!(set.contains(2, 2_4)); + + set.undo().undo_import(outcome); + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); + + let outcome = set.import(3_2, 3, 2_3); + assert_eq!(outcome.inserted.hash, 3_2); + assert_eq!(outcome.removed, Some(2_3)); + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_2)); + + set.undo().undo_import(outcome); + assert_eq!(set.count(), 3); + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + assert!(set.contains(2, 2_3)); + } + + #[test] + fn removal_works() { + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_1); + + let outcome = set.remove(12_1, 12, Some(11_1)).unwrap(); + assert_eq!(outcome.removed.hash, 12_1); + assert_eq!(outcome.inserted, Some(11_1)); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + + let outcome = set.remove(11_1, 11, None).unwrap(); + assert_eq!(outcome.removed.hash, 11_1); + assert_eq!(outcome.inserted, None); + assert_eq!(set.count(), 1); + assert!(set.contains(11, 11_2)); + + let outcome = set.remove(11_2, 11, Some(10_1)).unwrap(); + assert_eq!(outcome.removed.hash, 11_2); + assert_eq!(outcome.inserted, Some(10_1)); + assert_eq!(set.count(), 1); + assert!(set.contains(10, 10_1)); + + set.undo().undo_remove(outcome); + assert_eq!(set.count(), 1); + assert!(set.contains(11, 11_2)); + } + + #[test] + fn finalization_works() { + let mut set = LeafSet::new(); + set.import(9_1u32, 9u32, 0u32); + set.import(10_1, 10, 9_1); + set.import(10_2, 10, 9_1); + set.import(11_1, 11, 10_1); + set.import(11_2, 11, 10_1); + set.import(12_1, 12, 11_2); + + let outcome = set.finalize_height(11); + assert_eq!(set.count(), 2); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert_eq!( + outcome.removed, + [(Reverse(10), vec![10_2])].into_iter().collect::>(), + ); + + set.undo().undo_finalization(outcome); + assert_eq!(set.count(), 3); + assert!(set.contains(11, 11_1)); + assert!(set.contains(12, 12_1)); + assert!(set.contains(10, 10_2)); } #[test] @@ -383,44 +510,4 @@ mod tests { let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); assert_eq!(set, set2); } - - #[test] - fn undo_import() { - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - - let displaced = set.import(12_1, 12, 11_1).unwrap(); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_2)); - assert!(set.contains(12, 12_1)); - - set.undo().undo_import(displaced); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - } - - #[test] - fn undo_finalization() { - let mut set = LeafSet::new(); - set.import(9_1u32, 9u32, 0u32); - set.import(10_1, 10, 9_1); - set.import(10_2, 10, 9_1); - set.import(11_1, 11, 10_1); - set.import(11_2, 11, 10_1); - set.import(12_1, 12, 11_2); - - let displaced = set.finalize_height(11); - assert_eq!(set.count(), 2); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - - set.undo().undo_finalization(displaced); - assert_eq!(set.count(), 3); - assert!(set.contains(11, 11_1)); - assert!(set.contains(12, 12_1)); - assert!(set.contains(10, 10_2)); - } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 6b644ad53d52b..71f291e66e793 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -59,7 +59,7 @@ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ backend::NewBlockState, - leaves::{FinalizationDisplaced, LeafSet}, + leaves::{FinalizationOutcome, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; @@ -1251,7 +1251,7 @@ impl Backend { header: &Block::Header, last_finalized: Option, justification: Option, - finalization_displaced: &mut Option>>, + finalization_displaced: &mut Option>>, ) -> ClientResult> { // TODO: ensure best chain contains this block. let number = *header.number(); @@ -1657,7 +1657,7 @@ impl Backend { transaction: &mut Transaction, f_header: &Block::Header, f_hash: Block::Hash, - displaced: &mut Option>>, + displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { let f_num = *f_header.number(); @@ -1696,7 +1696,7 @@ impl Backend { &self, transaction: &mut Transaction, finalized: NumberFor, - displaced: &FinalizationDisplaced>, + displaced: &FinalizationOutcome>, ) -> ClientResult<()> { if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { // Always keep the last finalized block @@ -2226,9 +2226,40 @@ impl sc_client_api::backend::Backend for Backend { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - leaves.revert(*hash, hdr.number); + + let children: Vec<_> = self + .blockchain() + .children(hdr.parent)? + .into_iter() + .filter(|child_hash| child_hash != hash) + .collect(); + let parent_leaf = if children.is_empty() { + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hdr.parent, + ); + Some(hdr.parent) + } else { + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + hdr.parent, + children, + ); + None + }; + + let remove_outcome = leaves.remove(*hash, hdr.number, parent_leaf); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.commit(transaction)?; + if let Err(e) = self.storage.db.commit(transaction) { + if let Some(outcome) = remove_outcome { + leaves.undo().undo_remove(outcome); + } + return Err(e.into()) + } self.blockchain().remove_header_metadata(*hash); Ok(()) } @@ -3376,7 +3407,21 @@ pub(crate) mod tests { prev_hash = hash; } - // insert a fork at block 2, which becomes best block + for i in 0..2 { + let hash = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + } + + // insert a fork at block 1, which becomes best block let best_hash = insert_block( &backend, 1, @@ -3387,11 +3432,36 @@ pub(crate) mod tests { None, ) .unwrap(); + + assert_eq!(backend.blockchain().info().best_hash, best_hash); assert!(backend.remove_leaf_block(&best_hash).is_err()); - assert!(backend.have_state_at(&prev_hash, 1)); - backend.remove_leaf_block(&prev_hash).unwrap(); - assert_eq!(None, backend.blockchain().header(BlockId::hash(prev_hash)).unwrap()); - assert!(!backend.have_state_at(&prev_hash, 1)); + + assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); + + assert!(backend.have_state_at(&blocks[3], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[3]).unwrap(); + assert!(!backend.have_state_at(&blocks[3], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); + + assert!(backend.have_state_at(&blocks[2], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[2]).unwrap(); + assert!(!backend.have_state_at(&blocks[2], 2)); + assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); + assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); + + assert!(backend.have_state_at(&blocks[1], 1)); + assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); + backend.remove_leaf_block(&blocks[1]).unwrap(); + assert!(!backend.have_state_at(&blocks[1], 1)); + assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); + assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); + assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); } #[test] From e7aa858767c2f19a1f084b98f7a43db52b53b969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 17 Aug 2022 20:19:32 +0200 Subject: [PATCH 044/348] Make state types public (#12032) --- frame/contracts/src/chain_extension.rs | 98 ++++++++++++++++---------- 1 file changed, 60 insertions(+), 38 deletions(-) diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 57101668f794d..29f8ad44a56ac 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -84,7 +84,6 @@ pub use crate::{exec::Ext, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_primitives::ReturnFlags; pub use sp_core::crypto::UncheckedFrom; -pub use state::Init as InitState; /// Result that returns a [`DispatchError`] on error. pub type Result = sp_std::result::Result; @@ -198,7 +197,7 @@ pub enum RetVal { /// /// It uses [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html) /// to enforce the correct usage of the parameters passed to the chain extension. -pub struct Environment<'a, 'b, E: Ext, S: state::State> { +pub struct Environment<'a, 'b, E: Ext, S: State> { /// The actual data of this type. inner: Inner<'a, 'b, E>, /// `S` is only used in the type system but never as value. @@ -206,7 +205,7 @@ pub struct Environment<'a, 'b, E: Ext, S: state::State> { } /// Functions that are available in every state of this type. -impl<'a, 'b, E: Ext, S: state::State> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: State> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -264,7 +263,7 @@ where /// /// Those are the functions that determine how the arguments to the chain extensions /// should be consumed. -impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { +impl<'a, 'b, E: Ext> Environment<'a, 'b, E, InitState> { /// Creates a new environment for consumption by a chain extension. /// /// It is only available to this crate because only the wasm runtime module needs to @@ -284,23 +283,23 @@ impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { } /// Use all arguments as integer values. - pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { + pub fn only_in(self) -> Environment<'a, 'b, E, OnlyInState> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input arguments as integer and output arguments as pointer to a buffer. - pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { + pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, PrimInBufOutState> { Environment { inner: self.inner, phantom: PhantomData } } /// Use input and output arguments as pointers to a buffer. - pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { + pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, BufInBufOutState> { Environment { inner: self.inner, phantom: PhantomData } } } /// Functions to use the input arguments as integers. -impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: PrimIn> Environment<'a, 'b, E, S> { /// The `input_ptr` argument. pub fn val0(&self) -> u32 { self.inner.input_ptr @@ -313,7 +312,7 @@ impl<'a, 'b, E: Ext, S: state::PrimIn> Environment<'a, 'b, E, S> { } /// Functions to use the output arguments as integers. -impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { +impl<'a, 'b, E: Ext, S: PrimOut> Environment<'a, 'b, E, S> { /// The `output_ptr` argument. pub fn val2(&self) -> u32 { self.inner.output_ptr @@ -326,7 +325,7 @@ impl<'a, 'b, E: Ext, S: state::PrimOut> Environment<'a, 'b, E, S> { } /// Functions to use the input arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: state::BufIn> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: BufIn> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -389,7 +388,7 @@ where } /// Functions to use the output arguments as pointer to a buffer. -impl<'a, 'b, E: Ext, S: state::BufOut> Environment<'a, 'b, E, S> +impl<'a, 'b, E: Ext, S: BufOut> Environment<'a, 'b, E, S> where ::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { @@ -438,31 +437,54 @@ struct Inner<'a, 'b, E: Ext> { output_len_ptr: u32, } -/// Private submodule with public types to prevent other modules from naming them. -mod state { - pub trait State {} - - pub trait PrimIn: State {} - pub trait PrimOut: State {} - pub trait BufIn: State {} - pub trait BufOut: State {} - - /// The initial state of an [`Environment`](`super::Environment`). - /// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). - pub enum Init {} - pub enum OnlyIn {} - pub enum PrimInBufOut {} - pub enum BufInBufOut {} - - impl State for Init {} - impl State for OnlyIn {} - impl State for PrimInBufOut {} - impl State for BufInBufOut {} - - impl PrimIn for OnlyIn {} - impl PrimOut for OnlyIn {} - impl PrimIn for PrimInBufOut {} - impl BufOut for PrimInBufOut {} - impl BufIn for BufInBufOut {} - impl BufOut for BufInBufOut {} +/// Any state of an [`Environment`] implements this trait. +/// See [typestate programming](https://docs.rust-embedded.org/book/static-guarantees/typestate-programming.html). +pub trait State: sealed::Sealed {} + +/// A state that uses primitive inputs. +pub trait PrimIn: State {} + +/// A state that uses primitive outputs. +pub trait PrimOut: State {} + +/// A state that uses a buffer as input. +pub trait BufIn: State {} + +/// A state that uses a buffer as output. +pub trait BufOut: State {} + +/// The initial state of an [`Environment`]. +pub enum InitState {} + +/// A state that uses all arguments as primitive inputs. +pub enum OnlyInState {} + +/// A state that uses two arguments as primitive inputs and the other two as buffer output. +pub enum PrimInBufOutState {} + +/// Uses a buffer for input and a buffer for output. +pub enum BufInBufOutState {} + +mod sealed { + use super::*; + + /// Trait to prevent users from implementing `State` for anything else. + pub trait Sealed {} + + impl Sealed for InitState {} + impl Sealed for OnlyInState {} + impl Sealed for PrimInBufOutState {} + impl Sealed for BufInBufOutState {} + + impl State for InitState {} + impl State for OnlyInState {} + impl State for PrimInBufOutState {} + impl State for BufInBufOutState {} + + impl PrimIn for OnlyInState {} + impl PrimOut for OnlyInState {} + impl PrimIn for PrimInBufOutState {} + impl BufOut for PrimInBufOutState {} + impl BufIn for BufInBufOutState {} + impl BufOut for BufInBufOutState {} } From 22b678f7284537b28c0983d6301faea3c607278c Mon Sep 17 00:00:00 2001 From: Doordashcon Date: Thu, 18 Aug 2022 10:30:46 +0100 Subject: [PATCH 045/348] Replace `T::AccountId` with `::Source` (#11670) * initial * update * update * update * cargo fmt * update * update benchmarks * AccountIdLookupOf * cargo fmt * fix conflits * cargo fmt * update Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- frame/alliance/src/benchmarking.rs | 6 +-- frame/alliance/src/lib.rs | 17 ++----- frame/assets/src/benchmarking.rs | 4 +- frame/assets/src/lib.rs | 50 ++++++++++--------- frame/bags-list/src/benchmarks.rs | 12 +++-- frame/bags-list/src/lib.rs | 13 +++-- frame/balances/src/benchmarking.rs | 20 ++++---- frame/balances/src/lib.rs | 16 +++--- frame/bounties/src/benchmarking.rs | 2 +- frame/bounties/src/lib.rs | 6 ++- frame/child-bounties/src/lib.rs | 5 +- frame/contracts/src/benchmarking/mod.rs | 2 +- frame/contracts/src/lib.rs | 5 +- frame/conviction-voting/src/benchmarking.rs | 12 +++-- frame/conviction-voting/src/lib.rs | 12 +++-- frame/democracy/src/benchmarking.rs | 18 ++++--- frame/democracy/src/lib.rs | 12 +++-- frame/elections-phragmen/src/benchmarking.rs | 4 +- frame/elections-phragmen/src/lib.rs | 4 +- frame/identity/src/benchmarking.rs | 19 ++++--- frame/identity/src/lib.rs | 20 ++++---- frame/indices/src/benchmarking.rs | 6 ++- frame/indices/src/lib.rs | 7 ++- frame/indices/src/tests.rs | 13 ++--- frame/membership/src/lib.rs | 42 +++++++++++----- frame/nicks/src/lib.rs | 8 ++- frame/node-authorization/src/lib.rs | 9 +++- .../nomination-pools/benchmarking/src/lib.rs | 25 ++++++---- frame/nomination-pools/src/lib.rs | 25 +++++++--- frame/proxy/src/benchmarking.rs | 32 ++++++++---- frame/proxy/src/lib.rs | 31 ++++++++---- frame/ranked-collective/src/benchmarking.rs | 25 +++++++--- frame/ranked-collective/src/lib.rs | 19 +++++-- frame/recovery/src/benchmarking.rs | 28 +++++++---- frame/recovery/src/lib.rs | 42 ++++++++++++---- frame/scored-pool/src/lib.rs | 5 +- frame/society/src/lib.rs | 15 ++++-- frame/staking/src/benchmarking.rs | 7 ++- frame/staking/src/lib.rs | 4 +- frame/staking/src/pallet/mod.rs | 19 +++---- frame/staking/src/testing_utils.rs | 27 ++++------ frame/sudo/src/lib.rs | 6 ++- frame/tips/src/benchmarking.rs | 18 ++++--- frame/tips/src/lib.rs | 9 ++-- frame/treasury/src/benchmarking.rs | 2 +- frame/treasury/src/lib.rs | 5 +- frame/uniques/src/benchmarking.rs | 11 ++-- frame/uniques/src/lib.rs | 37 +++++++------- frame/vesting/src/benchmarking.rs | 22 ++++---- frame/vesting/src/lib.rs | 16 +++--- 50 files changed, 465 insertions(+), 309 deletions(-) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 527c35b58a5d8..c23ded1640b58 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -662,7 +662,7 @@ benchmarks_instance_pallet! { assert!(!Alliance::::is_member(&outsider)); assert_eq!(DepositOf::::get(&outsider), None); - let outsider_lookup: ::Source = T::Lookup::unlookup(outsider.clone()); + let outsider_lookup = T::Lookup::unlookup(outsider.clone()); }: _(SystemOrigin::Signed(founder1.clone()), outsider_lookup) verify { assert!(Alliance::::is_member_of(&outsider, MemberRole::Ally)); // outsider is now an ally @@ -681,7 +681,7 @@ benchmarks_instance_pallet! { let ally1 = ally::(1); assert!(Alliance::::is_ally(&ally1)); - let ally1_lookup: ::Source = T::Lookup::unlookup(ally1.clone()); + let ally1_lookup = T::Lookup::unlookup(ally1.clone()); let call = Call::::elevate_ally { ally: ally1_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } @@ -720,7 +720,7 @@ benchmarks_instance_pallet! { assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); - let fellow2_lookup: ::Source = T::Lookup::unlookup(fellow2.clone()); + let fellow2_lookup = T::Lookup::unlookup(fellow2.clone()); let call = Call::::kick_member { who: fellow2_lookup }; let origin = T::MembershipManager::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 111ea5dc6e507..0f4d43505e3f9 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -210,6 +210,8 @@ pub enum UnscrupulousItem { type UnscrupulousItemOf = UnscrupulousItem<::AccountId, UrlOf>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -744,10 +746,7 @@ pub mod pallet { /// A founder or fellow can nominate someone to join the alliance as an Ally. /// There is no deposit required to the nominator or nominee. #[pallet::weight(T::WeightInfo::nominate_ally())] - pub fn nominate_ally( - origin: OriginFor, - who: ::Source, - ) -> DispatchResult { + pub fn nominate_ally(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let nominator = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&nominator), Error::::NoVotingRights); let who = T::Lookup::lookup(who)?; @@ -771,10 +770,7 @@ pub mod pallet { /// Elevate an ally to fellow. #[pallet::weight(T::WeightInfo::elevate_ally())] - pub fn elevate_ally( - origin: OriginFor, - ally: ::Source, - ) -> DispatchResult { + pub fn elevate_ally(origin: OriginFor, ally: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let ally = T::Lookup::lookup(ally)?; ensure!(Self::is_ally(&ally), Error::::NotAlly); @@ -807,10 +803,7 @@ pub mod pallet { /// Kick a member from the alliance and slash its deposit. #[pallet::weight(T::WeightInfo::kick_member())] - pub fn kick_member( - origin: OriginFor, - who: ::Source, - ) -> DispatchResult { + pub fn kick_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::MembershipManager::ensure_origin(origin)?; let member = T::Lookup::lookup(who)?; diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index ca88899edf842..043d3f1a1aef4 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -37,7 +37,7 @@ const SEED: u32 = 0; fn create_default_asset, I: 'static>( is_sufficient: bool, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let root = SystemOrigin::Root.into(); @@ -55,7 +55,7 @@ fn create_default_asset, I: 'static>( fn create_default_minted_asset, I: 'static>( is_sufficient: bool, amount: T::Balance, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let (caller, caller_lookup) = create_default_asset::(is_sufficient); if !is_sufficient { T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index e0b00c5642c81..d7ca83c8a84e3 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -164,6 +164,8 @@ use frame_system::Config as SystemConfig; pub use pallet::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -501,7 +503,7 @@ pub mod pallet { pub fn create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - admin: ::Source, + admin: AccountIdLookupOf, min_balance: T::Balance, ) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -557,7 +559,7 @@ pub mod pallet { pub fn force_create( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, + owner: AccountIdLookupOf, is_sufficient: bool, #[pallet::compact] min_balance: T::Balance, ) -> DispatchResult { @@ -623,7 +625,7 @@ pub mod pallet { pub fn mint( origin: OriginFor, #[pallet::compact] id: T::AssetId, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -651,7 +653,7 @@ pub mod pallet { pub fn burn( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -684,7 +686,7 @@ pub mod pallet { pub fn transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: ::Source, + target: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -716,7 +718,7 @@ pub mod pallet { pub fn transfer_keep_alive( origin: OriginFor, #[pallet::compact] id: T::AssetId, - target: ::Source, + target: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let source = ensure_signed(origin)?; @@ -749,8 +751,8 @@ pub mod pallet { pub fn force_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - source: ::Source, - dest: ::Source, + source: AccountIdLookupOf, + dest: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -775,7 +777,7 @@ pub mod pallet { pub fn freeze( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -806,7 +808,7 @@ pub mod pallet { pub fn thaw( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source, + who: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -891,7 +893,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -932,9 +934,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, #[pallet::compact] id: T::AssetId, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -1117,10 +1119,10 @@ pub mod pallet { pub fn force_asset_status( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + owner: AccountIdLookupOf, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, #[pallet::compact] min_balance: T::Balance, is_sufficient: bool, is_frozen: bool, @@ -1167,7 +1169,7 @@ pub mod pallet { pub fn approve_transfer( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: ::Source, + delegate: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let owner = ensure_signed(origin)?; @@ -1192,7 +1194,7 @@ pub mod pallet { pub fn cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - delegate: ::Source, + delegate: AccountIdLookupOf, ) -> DispatchResult { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -1225,8 +1227,8 @@ pub mod pallet { pub fn force_cancel_approval( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - delegate: ::Source, + owner: AccountIdLookupOf, + delegate: AccountIdLookupOf, ) -> DispatchResult { let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; T::ForceOrigin::try_origin(origin) @@ -1272,8 +1274,8 @@ pub mod pallet { pub fn transfer_approved( origin: OriginFor, #[pallet::compact] id: T::AssetId, - owner: ::Source, - destination: ::Source, + owner: AccountIdLookupOf, + destination: AccountIdLookupOf, #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let delegate = ensure_signed(origin)?; diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index bb45da076f751..1f66697cb6765 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -57,6 +57,8 @@ frame_benchmarking::benchmarks_instance_pallet! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + let origin_middle_lookup = T::Lookup::unlookup(origin_middle.clone()); + // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -69,7 +71,7 @@ frame_benchmarking::benchmarks_instance_pallet! { let caller = whitelisted_caller(); // update the weight of `origin_middle` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_middle, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_middle.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_middle_lookup.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -114,6 +116,8 @@ frame_benchmarking::benchmarks_instance_pallet! { let dest_head: T::AccountId = account("dest_head", 0, 0); assert_ok!(List::::insert(dest_head.clone(), dest_bag_thresh)); + let origin_tail_lookup = T::Lookup::unlookup(origin_tail.clone()); + // the bags are in the expected state after initial setup. assert_eq!( List::::get_bags(), @@ -126,7 +130,7 @@ frame_benchmarking::benchmarks_instance_pallet! { let caller = whitelisted_caller(); // update the weight of `origin_tail` to guarantee it will be rebagged into the destination. T::ScoreProvider::set_score_of(&origin_tail, dest_bag_thresh); - }: rebag(SystemOrigin::Signed(caller), origin_tail.clone()) + }: rebag(SystemOrigin::Signed(caller), origin_tail_lookup.clone()) verify { // check the bags have updated as expected. assert_eq!( @@ -166,13 +170,15 @@ frame_benchmarking::benchmarks_instance_pallet! { T::ScoreProvider::set_score_of(&lighter, bag_thresh - One::one()); T::ScoreProvider::set_score_of(&heavier, bag_thresh); + let lighter_lookup = T::Lookup::unlookup(lighter.clone()); + assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), vec![lighter.clone(), heavier_prev.clone(), heavier.clone(), heavier_next.clone()] ); whitelist_account!(heavier); - }: _(SystemOrigin::Signed(heavier.clone()), lighter.clone()) + }: _(SystemOrigin::Signed(heavier.clone()), lighter_lookup.clone()) verify { assert_eq!( List::::iter().map(|n| n.id().clone()).collect::>(), diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 7eee8fdfa23d8..5163a579c6f43 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -56,7 +56,7 @@ use codec::FullCodec; use frame_election_provider_support::{ScoreProvider, SortedListProvider}; use frame_system::ensure_signed; -use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded}; +use sp_runtime::traits::{AtLeast32BitUnsigned, Bounded, StaticLookup}; use sp_std::prelude::*; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -90,6 +90,8 @@ macro_rules! log { }; } +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -222,8 +224,9 @@ pub mod pallet { /// /// If `dislocated` does not exists, it returns an error. #[pallet::weight(T::WeightInfo::rebag_non_terminal().max(T::WeightInfo::rebag_terminal()))] - pub fn rebag(origin: OriginFor, dislocated: T::AccountId) -> DispatchResult { + pub fn rebag(origin: OriginFor, dislocated: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; + let dislocated = T::Lookup::lookup(dislocated)?; let current_score = T::ScoreProvider::score(&dislocated); let _ = Pallet::::do_rebag(&dislocated, current_score) .map_err::, _>(Into::into)?; @@ -239,8 +242,12 @@ pub mod pallet { /// - both nodes are within the same bag, /// - and `origin` has a greater `Score` than `lighter`. #[pallet::weight(T::WeightInfo::put_in_front_of())] - pub fn put_in_front_of(origin: OriginFor, lighter: T::AccountId) -> DispatchResult { + pub fn put_in_front_of( + origin: OriginFor, + lighter: AccountIdLookupOf, + ) -> DispatchResult { let heavier = ensure_signed(origin)?; + let lighter = T::Lookup::lookup(lighter)?; List::::put_in_front_of(&lighter, &heavier) .map_err::, _>(Into::into) .map_err::(Into::into) diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 4a874e4ffa1d5..206adba0f044b 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -46,7 +46,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { @@ -60,7 +60,7 @@ benchmarks_instance_pallet! { transfer_best_case { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -80,7 +80,7 @@ benchmarks_instance_pallet! { transfer_keep_alive { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); @@ -95,7 +95,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always creates an account. set_balance_creating { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -110,7 +110,7 @@ benchmarks_instance_pallet! { // Benchmark `set_balance` coming from ROOT account. This always kills an account. set_balance_killing { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); @@ -127,7 +127,7 @@ benchmarks_instance_pallet! { force_transfer { let existential_deposit = T::ExistentialDeposit::get(); let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); // Give some multiple of the existential deposit let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); @@ -135,7 +135,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) verify { @@ -160,7 +160,7 @@ benchmarks_instance_pallet! { // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); // Create a bunch of users in storage. @@ -182,7 +182,7 @@ benchmarks_instance_pallet! { transfer_all { let caller = whitelisted_caller(); let recipient: T::AccountId = account("recipient", 0, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); @@ -196,7 +196,7 @@ benchmarks_instance_pallet! { force_unreserve { let user: T::AccountId = account("user", 0, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_lookup = T::Lookup::unlookup(user.clone()); // Give some multiple of the existential deposit let existential_deposit = T::ExistentialDeposit::get(); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 683ebce2b1693..0cb32a4e3ecd6 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -192,6 +192,8 @@ pub use weights::WeightInfo; pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -275,7 +277,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -303,7 +305,7 @@ pub mod pallet { )] pub fn set_balance( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, #[pallet::compact] new_free: T::Balance, #[pallet::compact] new_reserved: T::Balance, ) -> DispatchResultWithPostInfo { @@ -353,8 +355,8 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - source: ::Source, - dest: ::Source, + source: AccountIdLookupOf, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -378,7 +380,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_keep_alive())] pub fn transfer_keep_alive( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: T::Balance, ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; @@ -407,7 +409,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer_all())] pub fn transfer_all( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, keep_alive: bool, ) -> DispatchResult { use fungible::Inspect; @@ -425,7 +427,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_unreserve())] pub fn force_unreserve( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, amount: T::Balance, ) -> DispatchResult { ensure_root(origin)?; diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index bc559e1e8b358..0c8c875dc611c 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -62,7 +62,7 @@ fn setup_bounty, I: 'static>( } fn create_bounty, I: 'static>( -) -> Result<(::Source, BountyIndex), &'static str> { +) -> Result<(AccountIdLookupOf, BountyIndex), &'static str> { let (caller, curator, fee, value, reason) = setup_bounty::(0, T::MaximumReasonLength::get()); let curator_lookup = T::Lookup::unlookup(curator.clone()); diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index fca758fd96b8e..74e67b7ea5b92 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -114,6 +114,8 @@ type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf /// An index of a bounty. Just a `u32`. pub type BountyIndex = u32; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + /// A bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct Bounty { @@ -381,7 +383,7 @@ pub mod pallet { pub fn propose_curator( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - curator: ::Source, + curator: AccountIdLookupOf, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; @@ -553,7 +555,7 @@ pub mod pallet { pub fn award_bounty( origin: OriginFor, #[pallet::compact] bounty_id: BountyIndex, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 4f25fdcf8903a..5f396cd2d4567 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -80,6 +80,7 @@ pub use pallet::*; type BalanceOf = pallet_treasury::BalanceOf; type BountiesError = pallet_bounties::Error; type BountyIndex = pallet_bounties::BountyIndex; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A child bounty proposal. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -315,7 +316,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - curator: ::Source, + curator: AccountIdLookupOf, #[pallet::compact] fee: BalanceOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; @@ -574,7 +575,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] parent_bounty_id: BountyIndex, #[pallet::compact] child_bounty_id: BountyIndex, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let signer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index bea469bd0f5a9..1a0b735ac76ce 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -57,7 +57,7 @@ const INSTR_BENCHMARK_BATCHES: u32 = 50; struct Contract { caller: T::AccountId, account_id: T::AccountId, - addr: ::Source, + addr: AccountIdLookupOf, value: BalanceOf, } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 319bacaab7789..06fd419d88bf3 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -136,6 +136,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type CodeVec = BoundedVec::MaxCodeLen>; type RelaxedCodeVec = BoundedVec::RelaxedMaxCodeLen>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// Used as a sentinel value when reading and writing contract memory. /// @@ -435,7 +436,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] pub fn call( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, #[pallet::compact] gas_limit: Weight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, @@ -617,7 +618,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_code())] pub fn set_code( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, code_hash: CodeHash, ) -> DispatchResult { ensure_root(origin)?; diff --git a/frame/conviction-voting/src/benchmarking.rs b/frame/conviction-voting/src/benchmarking.rs index d472770832497..117bb7fe22989 100644 --- a/frame/conviction-voting/src/benchmarking.rs +++ b/frame/conviction-voting/src/benchmarking.rs @@ -149,6 +149,7 @@ benchmarks_instance_pallet! { remove_other_vote { let caller = funded_account::("caller", 0); let voter = funded_account::("caller", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); whitelist_account!(caller); let old_account_vote = account_vote::(100u32.into()); @@ -167,7 +168,7 @@ benchmarks_instance_pallet! { let index = polls[0]; assert!(T::Polls::end_ongoing(index, false).is_ok()); - }: _(RawOrigin::Signed(caller.clone()), voter.clone(), class.clone(), index) + }: _(RawOrigin::Signed(caller.clone()), voter_lookup, class.clone(), index) verify { assert_matches!( VotingFor::::get(&voter, &class), @@ -182,6 +183,7 @@ benchmarks_instance_pallet! { let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; let voter = funded_account::("voter", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); let caller = funded_account::("caller", 0); whitelist_account!(caller); @@ -197,7 +199,7 @@ benchmarks_instance_pallet! { Voting::Casting(Casting { votes, .. }) if votes.len() == r as usize ); - }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter, Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), class.clone(), voter_lookup, Conviction::Locked1x, delegated_balance) verify { assert_matches!(VotingFor::::get(&caller, &class), Voting::Delegating(_)); } @@ -209,6 +211,7 @@ benchmarks_instance_pallet! { let class = T::Polls::max_ongoing().0; let polls = &all_polls[&class]; let voter = funded_account::("voter", 0); + let voter_lookup = T::Lookup::unlookup(voter.clone()); let caller = funded_account::("caller", 0); whitelist_account!(caller); @@ -218,7 +221,7 @@ benchmarks_instance_pallet! { ConvictionVoting::::delegate( RawOrigin::Signed(caller.clone()).into(), class.clone(), - voter.clone(), + voter_lookup, Conviction::Locked1x, delegated_balance, )?; @@ -239,6 +242,7 @@ benchmarks_instance_pallet! { unlock { let caller = funded_account::("caller", 0); + let caller_lookup = T::Lookup::unlookup(caller.clone()); whitelist_account!(caller); let normal_account_vote = account_vote::(T::Currency::free_balance(&caller) - 100u32.into()); let big_account_vote = account_vote::(T::Currency::free_balance(&caller)); @@ -266,7 +270,7 @@ benchmarks_instance_pallet! { ConvictionVoting::::remove_vote(RawOrigin::Signed(caller.clone()).into(), Some(class.clone()), polls[0])?; // We can now unlock on `class` from 200 to 100... - }: _(RawOrigin::Signed(caller.clone()), class, caller.clone()) + }: _(RawOrigin::Signed(caller.clone()), class, caller_lookup) verify { assert_eq!(orig_usable, >::reducible_balance(&caller, false)); } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 54fc1156d1f47..cd16fccd6661d 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -36,7 +36,7 @@ use frame_support::{ }, }; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Saturating, Zero}, + traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, ArithmeticError, Perbill, }; use sp_std::prelude::*; @@ -62,6 +62,7 @@ pub mod benchmarking; const CONVICTION_VOTING_ID: LockIdentifier = *b"pyconvot"; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; type VotingOf = Voting< @@ -245,11 +246,12 @@ pub mod pallet { pub fn delegate( origin: OriginFor, class: ClassOf, - to: T::AccountId, + to: AccountIdLookupOf, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, class, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -294,9 +296,10 @@ pub mod pallet { pub fn unlock( origin: OriginFor, class: ClassOf, - target: T::AccountId, + target: AccountIdLookupOf, ) -> DispatchResult { ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; Self::update_lock(&class, &target); Ok(()) } @@ -359,11 +362,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote())] pub fn remove_other_vote( origin: OriginFor, - target: T::AccountId, + target: AccountIdLookupOf, class: ClassOf, index: PollIndexOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, Some(class), scope)?; Ok(()) diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 0544ee1731484..9b275043df43e 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -483,9 +483,10 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will initially delegate to `old_delegate` let old_delegate: T::AccountId = funded_account::("old_delegate", r); + let old_delegate_lookup = T::Lookup::unlookup(old_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - old_delegate.clone(), + old_delegate_lookup, Conviction::Locked1x, delegated_balance, )?; @@ -497,6 +498,7 @@ benchmarks! { assert_eq!(balance, delegated_balance, "delegation balance didn't work"); // Caller will now switch to `new_delegate` let new_delegate: T::AccountId = funded_account::("new_delegate", r); + let new_delegate_lookup = T::Lookup::unlookup(new_delegate.clone()); let account_vote = account_vote::(initial_balance); // We need to create existing direct votes for the `new_delegate` for i in 0..r { @@ -509,7 +511,7 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes were not recorded."); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), new_delegate.clone(), Conviction::Locked1x, delegated_balance) + }: _(RawOrigin::Signed(caller.clone()), new_delegate_lookup, Conviction::Locked1x, delegated_balance) verify { let (target, balance) = match VotingOf::::get(&caller) { Voting::Delegating { target, balance, .. } => (target, balance), @@ -533,9 +535,10 @@ benchmarks! { let caller = funded_account::("caller", 0); // Caller will delegate let the_delegate: T::AccountId = funded_account::("delegate", r); + let the_delegate_lookup = T::Lookup::unlookup(the_delegate.clone()); Democracy::::delegate( RawOrigin::Signed(caller.clone()).into(), - the_delegate.clone(), + the_delegate_lookup, Conviction::Locked1x, delegated_balance, )?; @@ -642,6 +645,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let locker = funded_account::("locker", 0); + let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); @@ -654,7 +658,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker.clone()) + }: unlock(RawOrigin::Signed(caller), locker_lookup) verify { // Note that we may want to add a `get_lock` api to actually verify let voting = VotingOf::::get(&locker); @@ -666,6 +670,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let locker = funded_account::("locker", 0); + let locker_lookup = T::Lookup::unlookup(locker.clone()); // Populate votes so things are locked let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); @@ -692,7 +697,7 @@ benchmarks! { let caller = funded_account::("caller", 0); whitelist_account!(caller); - }: unlock(RawOrigin::Signed(caller), locker.clone()) + }: unlock(RawOrigin::Signed(caller), locker_lookup) verify { let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -738,6 +743,7 @@ benchmarks! { let r in 1 .. MAX_REFERENDUMS; let caller = funded_account::("caller", r); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { @@ -753,7 +759,7 @@ benchmarks! { let referendum_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller.clone(), referendum_index) + }: _(RawOrigin::Signed(caller.clone()), caller_lookup, referendum_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 443b8579116d0..a347f47efe121 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -165,7 +165,7 @@ use frame_support::{ }; use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, Dispatchable, Hash, Saturating, Zero}, + traits::{Bounded, Dispatchable, Hash, Saturating, StaticLookup, Zero}, ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, }; use sp_std::prelude::*; @@ -206,6 +206,7 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum PreimageStatus { @@ -944,11 +945,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::delegate(T::MaxVotes::get()))] pub fn delegate( origin: OriginFor, - to: T::AccountId, + to: AccountIdLookupOf, conviction: Conviction, balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let to = T::Lookup::lookup(to)?; let votes = Self::try_delegate(who, to, conviction, balance)?; Ok(Some(T::WeightInfo::delegate(votes)).into()) @@ -1127,8 +1129,9 @@ pub mod pallet { T::WeightInfo::unlock_set(T::MaxVotes::get()) .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) )] - pub fn unlock(origin: OriginFor, target: T::AccountId) -> DispatchResult { + pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; Self::update_lock(&target); Ok(()) } @@ -1184,10 +1187,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_other_vote(T::MaxVotes::get()))] pub fn remove_other_vote( origin: OriginFor, - target: T::AccountId, + target: AccountIdLookupOf, index: ReferendumIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; Self::try_remove_vote(&target, index, scope)?; Ok(()) diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 2d1b698940a9d..22d00a912a4f7 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -29,8 +29,6 @@ use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; -type Lookup = <::Lookup as StaticLookup>::Source; - /// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); @@ -46,7 +44,7 @@ fn endowed_account(name: &'static str, index: u32) -> T::AccountId { } /// Account to lookup type of system trait. -fn as_lookup(account: T::AccountId) -> Lookup { +fn as_lookup(account: T::AccountId) -> AccountIdLookupOf { T::Lookup::unlookup(account) } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 03d41fa2af40f..1076ae77fbdda 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -176,6 +176,8 @@ pub struct SeatHolder { pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -508,7 +510,7 @@ pub mod pallet { })] pub fn remove_member( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, slash_bond: bool, rerun_election: bool, ) -> DispatchResultWithPostInfo { diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 4891302695186..20af41d392089 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -40,9 +40,10 @@ fn assert_last_event(generic_event: ::Event) { fn add_registrars(r: u32) -> Result<(), &'static str> { for i in 0..r { let registrar: T::AccountId = account("registrar", i, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, registrar.clone())?; + Identity::::add_registrar(registrar_origin, registrar_lookup)?; Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i, 10u32.into())?; let fields = IdentityFields( @@ -130,7 +131,7 @@ benchmarks! { let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -261,11 +262,12 @@ benchmarks! { set_fee { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller.clone())?; + Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().fee == 0u32.into(), "Fee already set."); }: _(RawOrigin::Signed(caller), r, 100u32.into()) @@ -276,12 +278,13 @@ benchmarks! { set_account_id { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller.clone())?; + Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) @@ -292,12 +295,13 @@ benchmarks! { set_fields { let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller.clone())?; + Identity::::add_registrar(registrar_origin, caller_lookup)?; let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter @@ -318,6 +322,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; @@ -327,7 +332,7 @@ benchmarks! { }; let registrar_origin = T::RegistrarOrigin::successful_origin(); - Identity::::add_registrar(registrar_origin, caller.clone())?; + Identity::::add_registrar(registrar_origin, caller_lookup)?; Identity::::request_judgement(user_origin, r, 10u32.into())?; }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) verify { @@ -341,7 +346,7 @@ benchmarks! { let target: T::AccountId = account("target", 0, SEED); let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); let info = create_identity_info::(x); diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 46f847606903d..0f80acceb949c 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -94,6 +94,7 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -282,9 +283,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] pub fn add_registrar( origin: OriginFor, - account: T::AccountId, + account: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; + let account = T::Lookup::lookup(account)?; let (i, registrar_count) = >::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { @@ -672,9 +674,10 @@ pub mod pallet { pub fn set_account_id( origin: OriginFor, #[pallet::compact] index: RegistrarIndex, - new: T::AccountId, + new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) @@ -760,7 +763,7 @@ pub mod pallet { pub fn provide_judgement( origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, - target: ::Source, + target: AccountIdLookupOf, judgement: Judgement>, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; @@ -827,7 +830,7 @@ pub mod pallet { ))] pub fn kill_identity( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; @@ -863,7 +866,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] pub fn add_sub( origin: OriginFor, - sub: ::Source, + sub: AccountIdLookupOf, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -898,7 +901,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] pub fn rename_sub( origin: OriginFor, - sub: ::Source, + sub: AccountIdLookupOf, data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -917,10 +920,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] - pub fn remove_sub( - origin: OriginFor, - sub: ::Source, - ) -> DispatchResult { + pub fn remove_sub(origin: OriginFor, sub: AccountIdLookupOf) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index cb06cd809f542..f462f22284d40 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -44,10 +44,11 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(caller.clone()).into(), account_index)?; - }: _(RawOrigin::Signed(caller.clone()), recipient.clone(), account_index) + }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, account_index) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } @@ -70,10 +71,11 @@ benchmarks! { let original: T::AccountId = account("original", 0, SEED); T::Currency::make_free_balance_be(&original, BalanceOf::::max_value()); let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup = T::Lookup::unlookup(recipient.clone()); T::Currency::make_free_balance_be(&recipient, BalanceOf::::max_value()); // Claim the index Indices::::claim(RawOrigin::Signed(original).into(), account_index)?; - }: _(RawOrigin::Root, recipient.clone(), account_index, false) + }: _(RawOrigin::Root, recipient_lookup, account_index, false) verify { assert_eq!(Accounts::::get(account_index).unwrap().0, recipient); } diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index ddc03c94b1233..49380f18e24db 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -36,6 +36,7 @@ pub use weights::WeightInfo; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; pub use pallet::*; @@ -133,10 +134,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::transfer())] pub fn transfer( origin: OriginFor, - new: T::AccountId, + new: AccountIdLookupOf, index: T::AccountIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; ensure!(who != new, Error::::NotTransfer); Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -208,11 +210,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_transfer())] pub fn force_transfer( origin: OriginFor, - new: T::AccountId, + new: AccountIdLookupOf, index: T::AccountIndex, freeze: bool, ) -> DispatchResult { ensure_root(origin)?; + let new = T::Lookup::lookup(new)?; Accounts::::mutate(index, |maybe_value| { if let Some((account, amount, _)) = maybe_value.take() { diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index 73d591c38bb2f..4e6c59703ca36 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -22,6 +22,7 @@ use super::{mock::*, *}; use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; +use sp_runtime::MultiAddress::Id; #[test] fn claiming_should_work() { @@ -60,7 +61,7 @@ fn freezing_should_work() { assert_noop!(Indices::freeze(Some(1).into(), 0), Error::::Permanent); assert_noop!(Indices::free(Some(1).into(), 0), Error::::Permanent); - assert_noop!(Indices::transfer(Some(1).into(), 2, 0), Error::::Permanent); + assert_noop!(Indices::transfer(Some(1).into(), Id(2), 0), Error::::Permanent); }); } @@ -90,9 +91,9 @@ fn reclaim_index_on_accounts_should_work() { fn transfer_index_on_accounts_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_noop!(Indices::transfer(Some(1).into(), 2, 1), Error::::NotAssigned); - assert_noop!(Indices::transfer(Some(2).into(), 3, 0), Error::::NotOwner); - assert_ok!(Indices::transfer(Some(1).into(), 3, 0)); + assert_noop!(Indices::transfer(Some(1).into(), Id(2), 1), Error::::NotAssigned); + assert_noop!(Indices::transfer(Some(2).into(), Id(3), 0), Error::::NotOwner); + assert_ok!(Indices::transfer(Some(1).into(), Id(3), 0)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 1); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -103,7 +104,7 @@ fn transfer_index_on_accounts_should_work() { fn force_transfer_index_on_preowned_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -113,7 +114,7 @@ fn force_transfer_index_on_preowned_should_work() { #[test] fn force_transfer_index_on_free_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Indices::force_transfer(Origin::root(), 3, 0, false)); + assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); }); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 24ecfd5333c66..32e1130f3d944 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -27,6 +27,7 @@ use frame_support::{ traits::{ChangeMembers, Contains, Get, InitializeMembers, SortedMembers}, BoundedVec, }; +use sp_runtime::traits::StaticLookup; use sp_std::prelude::*; pub mod migrations; @@ -35,6 +36,8 @@ pub mod weights; pub use pallet::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -163,8 +166,9 @@ pub mod pallet { /// /// May only be called from `T::AddOrigin`. #[pallet::weight(50_000_000)] - pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; @@ -184,8 +188,9 @@ pub mod pallet { /// /// May only be called from `T::RemoveOrigin`. #[pallet::weight(50_000_000)] - pub fn remove_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn remove_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::RemoveOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut members = >::get(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; @@ -208,10 +213,12 @@ pub mod pallet { #[pallet::weight(50_000_000)] pub fn swap_member( origin: OriginFor, - remove: T::AccountId, - add: T::AccountId, + remove: AccountIdLookupOf, + add: AccountIdLookupOf, ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; + let remove = T::Lookup::lookup(remove)?; + let add = T::Lookup::lookup(add)?; if remove == add { return Ok(()) @@ -259,8 +266,9 @@ pub mod pallet { /// /// Prime membership is passed from the origin account to `new`, if extant. #[pallet::weight(50_000_000)] - pub fn change_key(origin: OriginFor, new: T::AccountId) -> DispatchResult { + pub fn change_key(origin: OriginFor, new: AccountIdLookupOf) -> DispatchResult { let remove = ensure_signed(origin)?; + let new = T::Lookup::lookup(new)?; if remove != new { let mut members = >::get(); @@ -292,8 +300,9 @@ pub mod pallet { /// /// May only be called from `T::PrimeOrigin`. #[pallet::weight(50_000_000)] - pub fn set_prime(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn set_prime(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { T::PrimeOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; Prime::::put(&who); T::MembershipChanged::set_prime(Some(who)); @@ -355,7 +364,8 @@ mod benchmark { assert_ok!(>::reset_members(reset_origin, members.clone())); if let Some(prime) = prime.map(|i| members[i].clone()) { - assert_ok!(>::set_prime(prime_origin, prime)); + let prime_lookup = T::Lookup::unlookup(prime); + assert_ok!(>::set_prime(prime_origin, prime_lookup)); } else { assert_ok!(>::clear_prime(prime_origin)); } @@ -368,8 +378,9 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members, None); let new_member = account::("add", m, SEED); + let new_member_lookup = T::Lookup::unlookup(new_member.clone()); }: { - assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member.clone())); + assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member_lookup)); } verify { assert!(>::get().contains(&new_member)); @@ -385,8 +396,9 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let to_remove = members.first().cloned().unwrap(); + let to_remove_lookup = T::Lookup::unlookup(to_remove.clone()); }: { - assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove.clone())); + assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove_lookup)); } verify { assert!(!>::get().contains(&to_remove)); // prime is rejigged @@ -401,12 +413,14 @@ mod benchmark { let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); + let add_lookup = T::Lookup::unlookup(add.clone()); let remove = members.first().cloned().unwrap(); + let remove_lookup = T::Lookup::unlookup(remove.clone()); }: { assert_ok!(>::swap_member( T::SwapOrigin::successful_origin(), - remove.clone(), - add.clone(), + remove_lookup, + add_lookup, )); } verify { assert!(!>::get().contains(&remove)); @@ -442,9 +456,10 @@ mod benchmark { set_members::(members.clone(), Some(members.len() - 1)); let add = account::("member", m, SEED); + let add_lookup = T::Lookup::unlookup(add.clone()); whitelist!(prime); }: { - assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add.clone())); + assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add_lookup)); } verify { assert!(!>::get().contains(&prime)); assert!(>::get().contains(&add)); @@ -457,9 +472,10 @@ mod benchmark { let m in 1 .. T::MaxMembers::get(); let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); let prime = members.last().cloned().unwrap(); + let prime_lookup = T::Lookup::unlookup(prime.clone()); set_members::(members, None); }: { - assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime)); + assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime_lookup)); } verify { assert!(>::get().is_some()); assert!(::get_prime().is_some()); diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 5da06a24df3e5..8eb6936ec0450 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -47,6 +47,7 @@ type AccountIdOf = ::AccountId; type BalanceOf = <::Currency as Currency>>::Balance; type NegativeImbalanceOf = <::Currency as Currency>>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; #[frame_support::pallet] pub mod pallet { @@ -193,10 +194,7 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(70_000_000)] - pub fn kill_name( - origin: OriginFor, - target: ::Source, - ) -> DispatchResult { + pub fn kill_name(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; // Figure out who we're meant to be clearing. @@ -225,7 +223,7 @@ pub mod pallet { #[pallet::weight(70_000_000)] pub fn force_name( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, name: Vec, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 07f2e9de37dde..427e71af6e8c4 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -46,9 +46,12 @@ pub mod weights; pub use pallet::*; use sp_core::OpaquePeerId as PeerId; +use sp_runtime::traits::StaticLookup; use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -211,9 +214,10 @@ pub mod pallet { pub fn add_well_known_node( origin: OriginFor, node: PeerId, - owner: T::AccountId, + owner: AccountIdLookupOf, ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; + let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let mut nodes = WellKnownNodes::::get(); @@ -355,9 +359,10 @@ pub mod pallet { pub fn transfer_node( origin: OriginFor, node: PeerId, - owner: T::AccountId, + owner: AccountIdLookupOf, ) -> DispatchResult { let sender = ensure_signed(origin)?; + let owner = T::Lookup::lookup(owner)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); let pre_owner = Owners::::get(&node).ok_or(Error::::NotClaimed)?; diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 6cc589ceb08bd..f4ecb4a9b0ff1 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -31,7 +31,7 @@ use pallet_nomination_pools::{ MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; -use sp_runtime::traits::{Bounded, Zero}; +use sp_runtime::traits::{Bounded, StaticLookup, Zero}; use sp_staking::{EraIndex, StakingInterface}; // `frame_benchmarking::benchmarks!` macro needs this use pallet_nomination_pools::Call; @@ -73,13 +73,14 @@ fn create_pool_account( let ed = CurrencyOf::::minimum_balance(); let pool_creator: T::AccountId = create_funded_user_with_balance::("pool_creator", n, ed + balance * 2u32.into()); + let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); Pools::::create( Origin::Signed(pool_creator.clone()).into(), balance, - pool_creator.clone(), - pool_creator.clone(), - pool_creator.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup.clone(), + pool_creator_lookup, ) .unwrap(); @@ -310,9 +311,10 @@ frame_benchmarking::benchmarks! { let scenario = scenario.add_joiner(amount); let member_id = scenario.origin1_member.unwrap().clone(); + let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - }: _(Origin::Signed(member_id.clone()), member_id.clone(), all_points) + }: _(Origin::Signed(member_id.clone()), member_id_lookup, all_points) verify { let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag @@ -382,6 +384,7 @@ frame_benchmarking::benchmarks! { // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); + let joiner_lookup = T::Lookup::unlookup(joiner.clone()); Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); @@ -408,7 +411,7 @@ frame_benchmarking::benchmarks! { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner.clone(), s) + }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner_lookup, s) verify { assert_eq!( CurrencyOf::::free_balance(&joiner), @@ -423,6 +426,7 @@ frame_benchmarking::benchmarks! { let min_create_bond = min_create_bond::(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // We set the pool to the destroying state so the depositor can leave BondedPools::::try_mutate(&1, |maybe_bonded_pool| { @@ -465,7 +469,7 @@ frame_benchmarking::benchmarks! { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor.clone(), s) + }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor_lookup, s) verify { // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); @@ -487,6 +491,7 @@ frame_benchmarking::benchmarks! { create { let min_create_bond = min_create_bond::(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); + let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond CurrencyOf::::make_free_balance_be(&depositor, min_create_bond * 2u32.into()); @@ -499,9 +504,9 @@ frame_benchmarking::benchmarks! { }: _( Origin::Signed(depositor.clone()), min_create_bond, - depositor.clone(), - depositor.clone(), - depositor.clone() + depositor_lookup.clone(), + depositor_lookup.clone(), + depositor_lookup ) verify { assert_eq!(RewardPools::::count(), 1); diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 4a31048fb8d8b..62d0b3ddd55cb 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -282,7 +282,10 @@ use frame_support::{ use scale_info::TypeInfo; use sp_core::U256; use sp_runtime::{ - traits::{AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, Zero}, + traits::{ + AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, + Zero, + }, FixedPointNumber, FixedPointOperand, }; use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; @@ -321,6 +324,8 @@ pub type PoolId = u32; type UnbondingPoolsWithEra = BoundedBTreeMap, TotalUnbondingPools>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + pub const POINTS_TO_BALANCE_INIT_RATIO: u32 = 1; /// Possible operations on the configuration values of this pallet. @@ -1629,10 +1634,11 @@ pub mod pallet { #[transactional] pub fn unbond( origin: OriginFor, - member_account: T::AccountId, + member_account: AccountIdLookupOf, #[pallet::compact] unbonding_points: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&member_account)?; @@ -1741,10 +1747,11 @@ pub mod pallet { #[transactional] pub fn withdraw_unbonded( origin: OriginFor, - member_account: T::AccountId, + member_account: AccountIdLookupOf, num_slashing_spans: u32, ) -> DispatchResultWithPostInfo { let caller = ensure_signed(origin)?; + let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; let current_era = T::StakingInterface::current_era(); @@ -1863,11 +1870,14 @@ pub mod pallet { pub fn create( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - root: T::AccountId, - nominator: T::AccountId, - state_toggler: T::AccountId, + root: AccountIdLookupOf, + nominator: AccountIdLookupOf, + state_toggler: AccountIdLookupOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let root = T::Lookup::lookup(root)?; + let nominator = T::Lookup::lookup(nominator)?; + let state_toggler = T::Lookup::lookup(state_toggler)?; ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); ensure!( @@ -2461,7 +2471,8 @@ impl Pallet { member: T::AccountId, ) -> DispatchResult { let points = PoolMembers::::get(&member).map(|d| d.active_points()).unwrap_or_default(); - Self::unbond(origin, member, points) + let member_lookup = T::Lookup::unlookup(member); + Self::unbond(origin, member_lookup, points) } } diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 87017290a3ab9..adaaebb0adc98 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -51,6 +51,7 @@ fn add_announcements( maybe_real: Option, ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); let real = if let Some(real) = maybe_real { real @@ -59,16 +60,17 @@ fn add_announcements( T::Currency::make_free_balance_be(&real, BalanceOf::::max_value() / 2u32.into()); Proxy::::add_proxy( RawOrigin::Signed(real.clone()).into(), - caller.clone(), + caller_lookup, T::ProxyType::default(), T::BlockNumber::zero(), )?; real }; + let real_lookup = T::Lookup::unlookup(real); for _ in 0..n { Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&("add_announcement", n)), )?; } @@ -83,8 +85,9 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); - }: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call)) + }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -95,17 +98,19 @@ benchmarks! { // In this case the caller is the "target" proxy let caller: T::AccountId = account("anonymous", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); + let delegate_lookup = T::Lookup::unlookup(delegate.clone()); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(delegate.clone()), None)?; - }: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call)) + }: _(RawOrigin::Signed(caller), delegate_lookup, real_lookup, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) } @@ -118,14 +123,15 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup.clone(), T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(caller.clone()), real_lookup, T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -136,17 +142,19 @@ benchmarks! { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; // In this case the caller is the "target" proxy let caller: T::AccountId = account("target", p - 1, SEED); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), - real.clone(), + real_lookup, T::CallHasher::hash_of(&call), )?; add_announcements::(a, Some(caller.clone()), None)?; - }: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call)) + }: _(RawOrigin::Signed(real), caller_lookup, T::CallHasher::hash_of(&call)) verify { let (announcements, _) = Announcements::::get(&caller); assert_eq!(announcements.len() as u32, a); @@ -160,10 +168,11 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); + let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); - }: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash) + }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) verify { assert_last_event::(Event::Announced { real, proxy: caller, call_hash }.into()); } @@ -228,6 +237,7 @@ benchmarks! { let p in 0 .. (T::MaxProxies::get() - 2); let caller: T::AccountId = whitelisted_caller(); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); Pallet::::anonymous( RawOrigin::Signed(whitelisted_caller()).into(), @@ -241,7 +251,7 @@ benchmarks! { add_proxies::(p, Some(anon.clone()))?; ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); - }: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index) + }: _(RawOrigin::Signed(anon.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) verify { assert!(!Proxies::::contains_key(&anon)); } diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 9945626efbeb1..761d530954100 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -45,7 +45,7 @@ use frame_system::{self as system}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{Dispatchable, Hash, Saturating, TrailingZeroInput, Zero}, + traits::{Dispatchable, Hash, Saturating, StaticLookup, TrailingZeroInput, Zero}, DispatchResult, }; use sp_std::prelude::*; @@ -58,6 +58,8 @@ type CallHashOf = <::CallHasher as Hash>::Output; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + /// The parameters under which a particular account has a proxy relationship with some other /// account. #[derive( @@ -204,11 +206,12 @@ pub mod pallet { })] pub fn proxy( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, force_proxy_type: Option, call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &who, force_proxy_type)?; ensure!(def.delay.is_zero(), Error::::Unannounced); @@ -233,11 +236,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] pub fn add_proxy( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::add_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -255,11 +259,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, proxy_type: T::ProxyType, delay: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::remove_proxy_delegate(&who, delegate, proxy_type, delay) } @@ -359,13 +364,14 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get()))] pub fn kill_anonymous( origin: OriginFor, - spawner: T::AccountId, + spawner: AccountIdLookupOf, proxy_type: T::ProxyType, index: u16, #[pallet::compact] height: T::BlockNumber, #[pallet::compact] ext_index: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; + let spawner = T::Lookup::lookup(spawner)?; let when = (height, ext_index); let proxy = Self::anonymous_account(&spawner, &proxy_type, index, Some(when)); @@ -401,10 +407,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] pub fn announce( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; Proxies::::get(&real) .0 .into_iter() @@ -458,10 +465,11 @@ pub mod pallet { ))] pub fn remove_announcement( origin: OriginFor, - real: T::AccountId, + real: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let real = T::Lookup::lookup(real)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; Ok(()) @@ -489,10 +497,11 @@ pub mod pallet { ))] pub fn reject_announcement( origin: OriginFor, - delegate: T::AccountId, + delegate: AccountIdLookupOf, call_hash: CallHashOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; Self::edit_announcements(&delegate, |ann| { ann.real != who || ann.call_hash != call_hash })?; @@ -527,12 +536,14 @@ pub mod pallet { })] pub fn proxy_announced( origin: OriginFor, - delegate: T::AccountId, - real: T::AccountId, + delegate: AccountIdLookupOf, + real: AccountIdLookupOf, force_proxy_type: Option, call: Box<::Call>, ) -> DispatchResult { ensure_signed(origin)?; + let delegate = T::Lookup::lookup(delegate)?; + let real = T::Lookup::lookup(real)?; let def = Self::find_proxy(&real, &delegate, force_proxy_type)?; let call_hash = T::CallHasher::hash_of(&call); diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index ab1a5dc283ca5..611a4ce7334a9 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -33,11 +33,15 @@ fn assert_last_event, I: 'static>(generic_event: >:: fn make_member, I: 'static>(rank: Rank) -> T::AccountId { let who = account::("member", MemberCount::::get(0), SEED); - assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), who.clone())); + let who_lookup = T::Lookup::unlookup(who.clone()); + assert_ok!(Pallet::::add_member( + T::PromoteOrigin::successful_origin(), + who_lookup.clone() + )); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - who.clone() + who_lookup.clone() )); } who @@ -46,8 +50,9 @@ fn make_member, I: 'static>(rank: Rank) -> T::AccountId { benchmarks_instance_pallet! { add_member { let who = account::("member", 0, SEED); + let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::add_member { who: who.clone() }; + let call = Call::::add_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(MemberCount::::get(0), 1); @@ -59,10 +64,11 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = (0..=rank).map(|r| IdToIndex::::get(r, &last).unwrap()).collect::>(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::remove_member { who: who.clone(), min_rank: rank }; + let call = Call::::remove_member { who: who_lookup, min_rank: rank }; }: { call.dispatch_bypass_filter(origin)? } verify { for r in 0..=rank { @@ -76,8 +82,9 @@ benchmarks_instance_pallet! { let r in 0 .. 10; let rank = r as u16; let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let origin = T::PromoteOrigin::successful_origin(); - let call = Call::::promote_member { who: who.clone() }; + let call = Call::::promote_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).unwrap().rank, rank + 1); @@ -89,10 +96,11 @@ benchmarks_instance_pallet! { let rank = r as u16; let first = make_member::(rank); let who = make_member::(rank); + let who_lookup = T::Lookup::unlookup(who.clone()); let last = make_member::(rank); let last_index = IdToIndex::::get(rank, &last).unwrap(); let origin = T::DemoteOrigin::successful_origin(); - let call = Call::::demote_member { who: who.clone() }; + let call = Call::::demote_member { who: who_lookup }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_eq!(Members::::get(&who).map(|x| x.rank), rank.checked_sub(1)); @@ -106,14 +114,15 @@ benchmarks_instance_pallet! { vote { let caller: T::AccountId = whitelisted_caller(); - assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller.clone())); + let caller_lookup = T::Lookup::unlookup(caller.clone()); + assert_ok!(Pallet::::add_member(T::PromoteOrigin::successful_origin(), caller_lookup.clone())); // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); let rank = T::MinRankOfClass::convert(class.clone()); for _ in 0..rank { assert_ok!(Pallet::::promote_member( T::PromoteOrigin::successful_origin(), - caller.clone() + caller_lookup.clone() )); } diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 79910c259a7cd..b8eaac9823634 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -43,7 +43,11 @@ use scale_info::TypeInfo; use sp_arithmetic::traits::Saturating; -use sp_runtime::{traits::Convert, ArithmeticError::Overflow, Perbill, RuntimeDebug}; +use sp_runtime::{ + traits::{Convert, StaticLookup}, + ArithmeticError::Overflow, + Perbill, RuntimeDebug, +}; use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ @@ -109,6 +113,7 @@ impl, I: 'static, M: GetMaxVoters> Tally { pub type TallyOf = Tally>; pub type PollIndexOf = <>::Polls as Polling>>::Index; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; impl, I: 'static, M: GetMaxVoters> VoteTally for Tally { fn new(_: Rank) -> Self { @@ -466,8 +471,9 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::add_member())] - pub fn add_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn add_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let _ = T::PromoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; Self::do_add_member(who) } @@ -478,8 +484,9 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::promote_member(0))] - pub fn promote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn promote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::PromoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; Self::do_promote_member(who, Some(max_rank)) } @@ -491,8 +498,9 @@ pub mod pallet { /// /// Weight: `O(1)`, less if the member's index is highest in its rank. #[pallet::weight(T::WeightInfo::demote_member(0))] - pub fn demote_member(origin: OriginFor, who: T::AccountId) -> DispatchResult { + pub fn demote_member(origin: OriginFor, who: AccountIdLookupOf) -> DispatchResult { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let mut record = Self::ensure_member(&who)?; let rank = record.rank; ensure!(max_rank >= rank, Error::::NoPermission); @@ -523,10 +531,11 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::remove_member(*min_rank as u32))] pub fn remove_member( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, min_rank: Rank, ) -> DispatchResultWithPostInfo { let max_rank = T::DemoteOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; let MemberRecord { rank, .. } = Self::ensure_member(&who)?; ensure!(min_rank >= rank, Error::::InvalidWitness); ensure!(max_rank >= rank, Error::::NoPermission); diff --git a/frame/recovery/src/benchmarking.rs b/frame/recovery/src/benchmarking.rs index 5354de6d10b51..56d5df22d49c5 100644 --- a/frame/recovery/src/benchmarking.rs +++ b/frame/recovery/src/benchmarking.rs @@ -106,22 +106,25 @@ benchmarks! { as_recovered { let caller: T::AccountId = whitelisted_caller(); let recovered_account: T::AccountId = account("recovered_account", 0, SEED); + let recovered_account_lookup = T::Lookup::unlookup(recovered_account.clone()); let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::insert(&caller, &recovered_account); }: _( RawOrigin::Signed(caller), - recovered_account, + recovered_account_lookup, Box::new(call) ) set_recovered { let lost: T::AccountId = whitelisted_caller(); + let lost_lookup = T::Lookup::unlookup(lost.clone()); let rescuer: T::AccountId = whitelisted_caller(); + let rescuer_lookup = T::Lookup::unlookup(rescuer.clone()); }: _( RawOrigin::Root, - lost.clone(), - rescuer.clone() + lost_lookup, + rescuer_lookup ) verify { assert_last_event::( Event::AccountRecovered { @@ -153,11 +156,12 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); insert_recovery_account::(&caller, &lost_account); }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone() + lost_account_lookup ) verify { assert_last_event::( Event::RecoveryInitiated { @@ -172,7 +176,10 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); + let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); + // Create friends let friends = add_caller_and_generate_friends::(caller.clone(), n); @@ -206,8 +213,8 @@ benchmarks! { }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone(), - rescuer_account.clone() + lost_account_lookup, + rescuer_account_lookup ) verify { assert_last_event::( Event::RecoveryVouched { @@ -223,6 +230,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let lost_account: T::AccountId = account("lost_account", 0, SEED); + let lost_account_lookup = T::Lookup::unlookup(lost_account.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -257,7 +265,7 @@ benchmarks! { >::insert(&lost_account, &caller, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - lost_account.clone() + lost_account_lookup ) verify { assert_last_event::( Event::AccountRecovered { @@ -270,6 +278,7 @@ benchmarks! { close_recovery { let caller: T::AccountId = whitelisted_caller(); let rescuer_account: T::AccountId = account("rescuer_account", 0, SEED); + let rescuer_account_lookup = T::Lookup::unlookup(rescuer_account.clone()); let n in 1 .. T::MaxFriends::get(); @@ -307,7 +316,7 @@ benchmarks! { >::insert(&caller, &rescuer_account, recovery_status); }: _( RawOrigin::Signed(caller.clone()), - rescuer_account.clone() + rescuer_account_lookup ) verify { assert_last_event::( Event::RecoveryClosed { @@ -356,6 +365,7 @@ benchmarks! { cancel_recovered { let caller: T::AccountId = whitelisted_caller(); let account: T::AccountId = account("account", 0, SEED); + let account_lookup = T::Lookup::unlookup(account.clone()); frame_system::Pallet::::inc_providers(&caller); @@ -364,7 +374,7 @@ benchmarks! { Proxy::::insert(&caller, &account); }: _( RawOrigin::Signed(caller), - account + account_lookup ) impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index b839d25e32b47..45260b577f700 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -156,7 +156,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}; +use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion, StaticLookup}; use sp_std::prelude::*; use frame_support::{ @@ -182,6 +182,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type FriendsOf = BoundedVec<::AccountId, ::MaxFriends>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] @@ -382,10 +383,11 @@ pub mod pallet { )})] pub fn as_recovered( origin: OriginFor, - account: T::AccountId, + account: AccountIdLookupOf, call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(target == account, Error::::NotAllowed); @@ -405,10 +407,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_recovered())] pub fn set_recovered( origin: OriginFor, - lost: T::AccountId, - rescuer: T::AccountId, + lost: AccountIdLookupOf, + rescuer: AccountIdLookupOf, ) -> DispatchResult { ensure_root(origin)?; + let lost = T::Lookup::lookup(lost)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Create the recovery storage item. >::insert(&rescuer, &lost); Self::deposit_event(Event::::AccountRecovered { @@ -486,8 +490,12 @@ pub mod pallet { /// - `account`: The lost account that you want to recover. This account needs to be /// recoverable (i.e. have a recovery configuration). #[pallet::weight(T::WeightInfo::initiate_recovery())] - pub fn initiate_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn initiate_recovery( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); // Check that the recovery process has not already been started @@ -528,10 +536,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vouch_recovery(T::MaxFriends::get()))] pub fn vouch_recovery( origin: OriginFor, - lost: T::AccountId, - rescuer: T::AccountId, + lost: AccountIdLookupOf, + rescuer: AccountIdLookupOf, ) -> DispatchResult { let who = ensure_signed(origin)?; + let lost = T::Lookup::lookup(lost)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Get the recovery configuration for the lost account. let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer. @@ -567,8 +577,12 @@ pub mod pallet { /// - `account`: The lost account that you want to claim has been successfully recovered by /// you. #[pallet::weight(T::WeightInfo::claim_recovery(T::MaxFriends::get()))] - pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn claim_recovery( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Get the recovery configuration for the lost account let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; @@ -610,8 +624,12 @@ pub mod pallet { /// Parameters: /// - `rescuer`: The account trying to rescue this recoverable account. #[pallet::weight(T::WeightInfo::close_recovery(T::MaxFriends::get()))] - pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { + pub fn close_recovery( + origin: OriginFor, + rescuer: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let rescuer = T::Lookup::lookup(rescuer)?; // Take the active recovery process started by the rescuer for this account. let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; @@ -665,8 +683,12 @@ pub mod pallet { /// Parameters: /// - `account`: The recovered account you are able to call on-behalf-of. #[pallet::weight(T::WeightInfo::cancel_recovered())] - pub fn cancel_recovered(origin: OriginFor, account: T::AccountId) -> DispatchResult { + pub fn cancel_recovered( + origin: OriginFor, + account: AccountIdLookupOf, + ) -> DispatchResult { let who = ensure_signed(origin)?; + let account = T::Lookup::lookup(account)?; // Check `who` is allowed to make a call on behalf of `account` ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); Proxy::::remove(&who); diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index abdb9b2acc9b5..aa4f75255b120 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -110,6 +110,7 @@ use sp_std::{fmt::Debug, prelude::*}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated @@ -346,7 +347,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn kick( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, index: u32, ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; @@ -370,7 +371,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn score( origin: OriginFor, - dest: ::Source, + dest: AccountIdLookupOf, index: u32, score: T::Score, ) -> DispatchResult { diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 2a6428e754b9d..ec4cca1813ec6 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -281,6 +281,7 @@ type BalanceOf = type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] @@ -823,11 +824,12 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vouch( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, value: BalanceOf, tip: BalanceOf, ) -> DispatchResult { let voucher = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; // Check user is not suspended. ensure!(!>::contains_key(&who), Error::::Suspended); ensure!(!>::contains_key(&who), Error::::Suspended); @@ -914,7 +916,7 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn vote( origin: OriginFor, - candidate: ::Source, + candidate: AccountIdLookupOf, approve: bool, ) -> DispatchResult { let voter = ensure_signed(origin)?; @@ -1026,11 +1028,12 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn found( origin: OriginFor, - founder: T::AccountId, + founder: AccountIdLookupOf, max_members: u32, rules: Vec, ) -> DispatchResult { T::FounderSetOrigin::ensure_origin(origin)?; + let founder = T::Lookup::lookup(founder)?; ensure!(!>::exists(), Error::::AlreadyFounded); ensure!(max_members > 1, Error::::MaxMembers); // This should never fail in the context of this function... @@ -1104,10 +1107,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_member( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, forgive: bool, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; ensure!(>::contains_key(&who), Error::::NotSuspended); if forgive { @@ -1180,10 +1184,11 @@ pub mod pallet { #[pallet::weight(T::BlockWeights::get().max_block / 10)] pub fn judge_suspended_candidate( origin: OriginFor, - who: T::AccountId, + who: AccountIdLookupOf, judgement: Judgement, ) -> DispatchResult { T::SuspensionJudgementOrigin::ensure_origin(origin)?; + let who = T::Lookup::lookup(who)?; if let Some((value, kind)) = >::get(&who) { match judgement { Judgement::Approve => { diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 12de0ff9cc665..2d5943b51758d 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -83,7 +83,7 @@ pub fn create_validator_with_nominators( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); points_total += 10; points_individual.push((v_stash.clone(), 10)); @@ -217,8 +217,7 @@ benchmarks! { bond { let stash = create_funded_user::("stash", USER_SEED, 100); let controller = create_funded_user::("controller", USER_SEED, 100); - let controller_lookup: ::Source - = T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let reward_destination = RewardDestination::Staked; let amount = T::Currency::minimum_balance() * 10u32.into(); whitelist_account!(stash); @@ -365,7 +364,7 @@ benchmarks! { 100, Default::default(), )?; - let stash_lookup: ::Source = T::Lookup::unlookup(stash.clone()); + let stash_lookup = T::Lookup::unlookup(stash.clone()); // they start validating. Staking::::validate(RawOrigin::Signed(controller.clone()).into(), Default::default())?; diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index ab0ab685e6911..38466c8cb1d62 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -309,7 +309,7 @@ use frame_support::{ use scale_info::TypeInfo; use sp_runtime::{ curve::PiecewiseLinear, - traits::{AtLeast32BitUnsigned, Convert, Saturating, Zero}, + traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, Perbill, Perquintill, RuntimeDebug, }; use sp_staking::{ @@ -347,6 +347,8 @@ type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + parameter_types! { pub MaxUnlockingChunks: u32 = 32; } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 4ce96ab68b11a..1f11f0ff00ac1 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -40,10 +40,10 @@ mod impls; pub use impls::*; use crate::{ - slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, EraRewardPoints, Exposure, - Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, Releases, - RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, - ValidatorPrefs, + slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, + EraRewardPoints, Exposure, Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, + PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, + UnappliedSlash, UnlockChunk, ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -768,7 +768,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::bond())] pub fn bond( origin: OriginFor, - controller: ::Source, + controller: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, payee: RewardDestination, ) -> DispatchResult { @@ -1055,7 +1055,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::nominate(targets.len() as u32))] pub fn nominate( origin: OriginFor, - targets: Vec<::Source>, + targets: Vec>, ) -> DispatchResult { let controller = ensure_signed(origin)?; @@ -1175,7 +1175,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_controller())] pub fn set_controller( origin: OriginFor, - controller: ::Source, + controller: AccountIdLookupOf, ) -> DispatchResult { let stash = ensure_signed(origin)?; let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1521,10 +1521,7 @@ pub mod pallet { /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] - pub fn kick( - origin: OriginFor, - who: Vec<::Source>, - ) -> DispatchResult { + pub fn kick(origin: OriginFor, who: Vec>) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index ba67292ddc434..0e0ac76523471 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -78,8 +78,7 @@ pub fn create_stash_controller( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -98,8 +97,7 @@ pub fn create_stash_controller_with_balance( ) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user_with_balance::("stash", n, balance); let controller = create_funded_user_with_balance::("controller", n, balance); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -120,8 +118,7 @@ pub fn create_stash_and_dead_controller( let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); - let controller_lookup: ::Source = - T::Lookup::unlookup(controller.clone()); + let controller_lookup = T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); Staking::::bond( RawOrigin::Signed(stash.clone()).into(), @@ -136,7 +133,7 @@ pub fn create_stash_and_dead_controller( pub fn create_validators( max: u32, balance_factor: u32, -) -> Result::Source>, &'static str> { +) -> Result>, &'static str> { create_validators_with_seed::(max, balance_factor, 0) } @@ -145,15 +142,15 @@ pub fn create_validators_with_seed( max: u32, balance_factor: u32, seed: u32, -) -> Result::Source>, &'static str> { - let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); +) -> Result>, &'static str> { + let mut validators: Vec> = Vec::with_capacity(max as usize); for i in 0..max { let (stash, controller) = create_stash_controller::(i + seed, balance_factor, RewardDestination::Staked)?; let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(stash); + let stash_lookup = T::Lookup::unlookup(stash); validators.push(stash_lookup); } Ok(validators) @@ -180,11 +177,10 @@ pub fn create_validators_with_nominators_for_era( edge_per_nominator: usize, randomize_stake: bool, to_nominate: Option, -) -> Result::Source>, &'static str> { +) -> Result>, &'static str> { clear_validators_and_nominators::(); - let mut validators_stash: Vec<::Source> = - Vec::with_capacity(validators as usize); + let mut validators_stash: Vec> = Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); // Create validators @@ -195,8 +191,7 @@ pub fn create_validators_with_nominators_for_era( let validator_prefs = ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = - T::Lookup::unlookup(v_stash.clone()); + let stash_lookup = T::Lookup::unlookup(v_stash.clone()); validators_stash.push(stash_lookup.clone()); } @@ -211,7 +206,7 @@ pub fn create_validators_with_nominators_for_era( // Have them randomly validate let mut available_validators = validator_chosen.clone(); - let mut selected_validators: Vec<::Source> = + let mut selected_validators: Vec> = Vec::with_capacity(edge_per_nominator); for _ in 0..validators.min(edge_per_nominator as u32) { diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index a47b5a79bd017..07fdc56e82da6 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -104,6 +104,8 @@ mod tests; pub use pallet::*; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; @@ -192,7 +194,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn set_key( origin: OriginFor, - new: ::Source, + new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -228,7 +230,7 @@ pub mod pallet { })] pub fn sudo_as( origin: OriginFor, - who: ::Source, + who: AccountIdLookupOf, call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 4c14cb4de94e1..4956e2a095688 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -92,18 +92,20 @@ benchmarks_instance_pallet! { report_awesome { let r in 0 .. T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); + let awesome_person_lookup = T::Lookup::unlookup(awesome_person); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, awesome_person) + }: _(RawOrigin::Signed(caller), reason, awesome_person_lookup) retract_tip { let r = T::MaximumReasonLength::get(); let (caller, reason, awesome_person) = setup_awesome::(r); + let awesome_person_lookup = T::Lookup::unlookup(awesome_person.clone()); TipsMod::::report_awesome( RawOrigin::Signed(caller.clone()).into(), reason.clone(), - awesome_person.clone() + awesome_person_lookup )?; let reason_hash = T::Hashing::hash(&reason[..]); let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); @@ -117,19 +119,21 @@ benchmarks_instance_pallet! { let t in 1 .. T::Tippers::max_len() as u32; let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + }: _(RawOrigin::Signed(caller), reason, beneficiary_lookup, value) tip { let t in 1 .. T::Tippers::max_len() as u32; let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; let reason_hash = T::Hashing::hash(&reason[..]); @@ -150,11 +154,12 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; @@ -179,11 +184,12 @@ benchmarks_instance_pallet! { // Set up a new tip proposal let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let beneficiary_lookup = T::Lookup::unlookup(beneficiary.clone()); let value = T::Currency::minimum_balance().saturating_mul(100u32.into()); TipsMod::::tip_new( RawOrigin::Signed(member).into(), reason.clone(), - beneficiary.clone(), + beneficiary_lookup, value )?; diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 71af87b42b55b..a4697284e38a0 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -61,7 +61,7 @@ pub mod migrations; pub mod weights; use sp_runtime::{ - traits::{AccountIdConversion, BadOrigin, Hash, TrailingZeroInput, Zero}, + traits::{AccountIdConversion, BadOrigin, Hash, StaticLookup, TrailingZeroInput, Zero}, Percent, RuntimeDebug, }; use sp_std::prelude::*; @@ -80,6 +80,7 @@ pub use weights::WeightInfo; pub type BalanceOf = pallet_treasury::BalanceOf; pub type NegativeImbalanceOf = pallet_treasury::NegativeImbalanceOf; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. @@ -237,9 +238,10 @@ pub mod pallet { pub fn report_awesome( origin: OriginFor, reason: Vec, - who: T::AccountId, + who: AccountIdLookupOf, ) -> DispatchResult { let finder = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; ensure!( reason.len() <= T::MaximumReasonLength::get() as usize, @@ -331,10 +333,11 @@ pub mod pallet { pub fn tip_new( origin: OriginFor, reason: Vec, - who: T::AccountId, + who: AccountIdLookupOf, #[pallet::compact] tip_value: BalanceOf, ) -> DispatchResult { let tipper = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; ensure!(T::Tippers::contains(&tipper), BadOrigin); let reason_hash = T::Hashing::hash(&reason[..]); ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index ed30b2f7358e5..b2b670d86f07c 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -34,7 +34,7 @@ const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. fn setup_proposal, I: 'static>( u: u32, -) -> (T::AccountId, BalanceOf, ::Source) { +) -> (T::AccountId, BalanceOf, AccountIdLookupOf) { let caller = account("caller", u, SEED); let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); let _ = T::Currency::make_free_balance_be(&caller, value); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 6730f985b16e0..ed38177e1c499 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -93,6 +93,7 @@ pub type PositiveImbalanceOf = <>::Currency as Currenc pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -338,7 +339,7 @@ pub mod pallet { pub fn propose_spend( origin: OriginFor, #[pallet::compact] value: BalanceOf, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let proposer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -419,7 +420,7 @@ pub mod pallet { pub fn spend( origin: OriginFor, #[pallet::compact] amount: BalanceOf, - beneficiary: ::Source, + beneficiary: AccountIdLookupOf, ) -> DispatchResult { let max_amount = T::SpendOrigin::ensure_origin(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 86247fb964ff5..8aa1512883448 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -37,7 +37,7 @@ use crate::Pallet as Uniques; const SEED: u32 = 0; fn create_collection, I: 'static>( -) -> (T::CollectionId, T::AccountId, ::Source) { +) -> (T::CollectionId, T::AccountId, AccountIdLookupOf) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); @@ -49,8 +49,7 @@ fn create_collection, I: 'static>( (collection, caller, caller_lookup) } -fn add_collection_metadata, I: 'static>( -) -> (T::AccountId, ::Source) { +fn add_collection_metadata, I: 'static>() -> (T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -68,7 +67,7 @@ fn add_collection_metadata, I: 'static>( fn mint_item, I: 'static>( index: u16, -) -> (T::ItemId, T::AccountId, ::Source) { +) -> (T::ItemId, T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().admin; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -87,7 +86,7 @@ fn mint_item, I: 'static>( fn add_item_metadata, I: 'static>( item: T::ItemId, -) -> (T::AccountId, ::Source) { +) -> (T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -106,7 +105,7 @@ fn add_item_metadata, I: 'static>( fn add_item_attribute, I: 'static>( item: T::ItemId, -) -> (BoundedVec, T::AccountId, ::Source) { +) -> (BoundedVec, T::AccountId, AccountIdLookupOf) { let caller = Collection::::get(T::Helper::collection(0)).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index b25fb2f87ea97..de5a774d16061 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -60,6 +60,8 @@ pub use pallet::*; pub use types::*; pub use weights::WeightInfo; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -463,10 +465,7 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub fn create( - origin: OriginFor, - admin: ::Source, - ) -> DispatchResult { + pub fn create(origin: OriginFor, admin: AccountIdLookupOf) -> DispatchResult { let collection = NextCollectionId::::get(); let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; @@ -501,7 +500,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, - owner: ::Source, + owner: AccountIdLookupOf, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -599,7 +598,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -628,7 +627,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - check_owner: Option<::Source>, + check_owner: Option>, ) -> DispatchResult { let origin = ensure_signed(origin)?; let check_owner = check_owner.map(T::Lookup::lookup).transpose()?; @@ -664,7 +663,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - dest: ::Source, + dest: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; @@ -876,7 +875,7 @@ pub mod pallet { pub fn transfer_ownership( origin: OriginFor, collection: T::CollectionId, - owner: ::Source, + owner: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; @@ -924,9 +923,9 @@ pub mod pallet { pub fn set_team( origin: OriginFor, collection: T::CollectionId, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, ) -> DispatchResult { let origin = ensure_signed(origin)?; let issuer = T::Lookup::lookup(issuer)?; @@ -962,7 +961,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - delegate: ::Source, + delegate: AccountIdLookupOf, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1015,7 +1014,7 @@ pub mod pallet { origin: OriginFor, collection: T::CollectionId, item: T::ItemId, - maybe_check_delegate: Option<::Source>, + maybe_check_delegate: Option>, ) -> DispatchResult { let maybe_check: Option = T::ForceOrigin::try_origin(origin) .map(|_| None) @@ -1066,10 +1065,10 @@ pub mod pallet { pub fn force_item_status( origin: OriginFor, collection: T::CollectionId, - owner: ::Source, - issuer: ::Source, - admin: ::Source, - freezer: ::Source, + owner: AccountIdLookupOf, + issuer: AccountIdLookupOf, + admin: AccountIdLookupOf, + freezer: AccountIdLookupOf, free_holding: bool, is_frozen: bool, ) -> DispatchResult { @@ -1507,7 +1506,7 @@ pub mod pallet { collection: T::CollectionId, item: T::ItemId, price: Option>, - whitelisted_buyer: Option<::Source>, + whitelisted_buyer: Option>, ) -> DispatchResult { let origin = ensure_signed(origin)?; let whitelisted_buyer = whitelisted_buyer.map(T::Lookup::lookup).transpose()?; diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 2b8150e995240..dde5fe3ac7561 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -42,7 +42,7 @@ fn add_locks(who: &T::AccountId, n: u8) { } fn add_vesting_schedules( - target: ::Source, + target: AccountIdLookupOf, n: u32, ) -> Result, &'static str> { let min_transfer = T::MinVestedTransfer::get(); @@ -52,7 +52,7 @@ fn add_vesting_schedules( let starting_block = 1u32; let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); System::::set_block_number(T::BlockNumber::zero()); @@ -81,7 +81,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -109,7 +109,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = whitelisted_caller(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); add_locks::(&caller, l as u8); @@ -137,7 +137,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other_lookup = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); let expected_balance = add_vesting_schedules::(other_lookup.clone(), s)?; @@ -166,7 +166,7 @@ benchmarks! { let s in 1 .. T::MAX_VESTING_SCHEDULES; let other: T::AccountId = account("other", 0, SEED); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other_lookup = T::Lookup::unlookup(other.clone()); add_locks::(&other, l as u8); add_vesting_schedules::(other_lookup.clone(), s)?; @@ -198,7 +198,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one vesting schedules. @@ -232,11 +232,11 @@ benchmarks! { let s in 0 .. T::MAX_VESTING_SCHEDULES - 1; let source: T::AccountId = account("source", 0, SEED); - let source_lookup: ::Source = T::Lookup::unlookup(source.clone()); + let source_lookup = T::Lookup::unlookup(source.clone()); T::Currency::make_free_balance_be(&source, BalanceOf::::max_value()); let target: T::AccountId = account("target", 0, SEED); - let target_lookup: ::Source = T::Lookup::unlookup(target.clone()); + let target_lookup = T::Lookup::unlookup(target.clone()); // Give target existing locks add_locks::(&target, l as u8); // Add one less than max vesting schedules @@ -270,7 +270,7 @@ benchmarks! { let s in 2 .. T::MAX_VESTING_SCHEDULES; let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target existing locks. add_locks::(&caller, l as u8); // Add max vesting schedules. @@ -320,7 +320,7 @@ benchmarks! { let test_dest: T::AccountId = account("test_dest", 0, SEED); let caller: T::AccountId = account("caller", 0, SEED); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_lookup = T::Lookup::unlookup(caller.clone()); // Give target other locks. add_locks::(&caller, l as u8); // Add max vesting schedules. diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 9fb7eb8037916..8ac625e775e4f 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -84,6 +84,7 @@ type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type MaxLocksOf = <::Currency as LockableCurrency<::AccountId>>::MaxLocks; +type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; const VESTING_ID: LockIdentifier = *b"vesting "; @@ -321,10 +322,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get(), T::MAX_VESTING_SCHEDULES)) )] - pub fn vest_other( - origin: OriginFor, - target: ::Source, - ) -> DispatchResult { + pub fn vest_other(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; let who = T::Lookup::lookup(target)?; Self::do_vest(who) @@ -352,7 +350,7 @@ pub mod pallet { )] pub fn vested_transfer( origin: OriginFor, - target: ::Source, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { let transactor = ensure_signed(origin)?; @@ -383,8 +381,8 @@ pub mod pallet { )] pub fn force_vested_transfer( origin: OriginFor, - source: ::Source, - target: ::Source, + source: AccountIdLookupOf, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { ensure_root(origin)?; @@ -494,8 +492,8 @@ impl Pallet { // Execute a vested transfer from `source` to `target` with the given `schedule`. fn do_vested_transfer( - source: ::Source, - target: ::Source, + source: AccountIdLookupOf, + target: AccountIdLookupOf, schedule: VestingInfo, T::BlockNumber>, ) -> DispatchResult { // Validate user inputs. From 00cc5f104176fac6f5a624bced22a2192c7c0470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 18 Aug 2022 20:59:22 +0200 Subject: [PATCH 046/348] Introduce trie level cache and remove state cache (#11407) * trie state cache * Also cache missing access on read. * fix comp * bis * fix * use has_lru * remove local storage cache on size 0. * No cache. * local cache only * trie cache and local cache * storage cache (with local) * trie cache no local cache * Add state access benchmark * Remove warnings etc * Add trie cache benchmark * No extra "clone" required * Change benchmark to use multiple blocks * Use patches * Integrate shitty implementation * More stuff * Revert "Merge branch 'master' into trie_state_cache" This reverts commit 2c1886d2410f3351a1eb5d2d7e930a05fe0ad796, reversing changes made to 540b4fd0f6bf8157f5402f0095f78f0ed893c57b. * Improve benchmark * Adapt to latest changes * Adapt to changes in trie * Add a test that uses iterator * Start fixing it * Remove obsolete file * Make it compile * Start rewriting the trie node cache * More work on the cache * More docs and code etc * Make data cache an optional * Tests * Remove debug stuff * Recorder * Some docs and a simple test for the recorder * Compile fixes * Make it compile * More fixes * More fixes * Fix fix fix * Make sure cache and recorder work together for basic stuff * Test that data caching and recording works * Test `TrieDBMut` with caching * Try something * Fixes, fixes, fixes * Forward the recorder * Make it compile * Use recorder in more places * Switch to new `with_optional_recorder` fn * Refactor and cleanups * Move `ProvingBackend` tests * Simplify * Move over all functionality to the essence * Fix compilation * Implement estimate encoded size for StorageProof * Start using the `cache` everywhere * Use the cache everywhere * Fix compilation * Fix tests * Adds `TrieBackendBuilder` and enhances the tests * Ensure that recorder drain checks that values are found as expected * Switch over to `TrieBackendBuilder` * Start fixing the problem with child tries and recording * Fix recording of child tries * Make it compile * Overwrite `storage_hash` in `TrieBackend` * Add `storage_cache` to the benchmarks * Fix `no_std` build * Speed up cache lookup * Extend the state access benchmark to also hash a runtime * Fix build * Fix compilation * Rewrite value cache * Add lru cache * Ensure that the cache lru works * Value cache should not be optional * Add support for keeping the shared node cache in its bounds * Make the cache configurable * Check that the cache respects the bounds * Adds a new test * Fixes * Docs and some renamings * More docs * Start using the new recorder * Fix more code * Take `self` argument * Remove warnings * Fix benchmark * Fix accounting * Rip off the state cache * Start fixing fallout after removing the state cache * Make it compile after trie changes * Fix test * Add some logging * Some docs * Some fixups and clean ups * Fix benchmark * Remove unneeded file * Use git for patching * Make CI happy * Update primitives/trie/Cargo.toml Co-authored-by: Koute * Update primitives/state-machine/src/trie_backend.rs Co-authored-by: cheme * Introduce new `AsTrieBackend` trait * Make the LocalTrieCache not clonable * Make it work in no_std and add docs * Remove duplicate dependency * Switch to ahash for better performance * Speedup value cache merge * Output errors on underflow * Ensure the internal LRU map doesn't grow too much * Use const fn to calculate the value cache element size * Remove cache configuration * Fix * Clear the cache in between for more testing * Try to come up with a failing test case * Make the test fail * Fix the child trie recording * Make everything compile after the changes to trie * Adapt to latest trie-db changes * Fix on stable * Update primitives/trie/src/cache.rs Co-authored-by: cheme * Fix wrong merge * Docs * Fix warnings * Cargo.lock * Bump pin-project * Fix warnings * Switch to released crate version * More fixes * Make clippy and rustdocs happy * More clippy * Print error when using deprecated `--state-cache-size` * :facepalm: * Fixes * Fix storage_hash linkings * Update client/rpc/src/dev/mod.rs Co-authored-by: Arkadiy Paronyan * Review feedback * encode bound * Rework the shared value cache Instead of using a `u64` to represent the key we now use an `Arc<[u8]>`. This arc is also stored in some extra `HashSet`. We store the key are in an extra `HashSet` to de-duplicate the keys accross different storage roots. When the latest key usage is dropped in the lru, we also remove the key from the `HashSet`. * Improve of the cache by merging the old and new solution * FMT * Please stop coming back all the time :crying: * Update primitives/trie/src/cache/shared_cache.rs Co-authored-by: Arkadiy Paronyan * Fixes * Make clippy happy * Ensure we don't deadlock * Only use one lock to simplify the code * Do not depend on `Hasher` * Fix tests * FMT * Clippy :facepalm: Co-authored-by: cheme Co-authored-by: Koute Co-authored-by: Arkadiy Paronyan --- Cargo.lock | 32 +- bin/node/bench/src/generator.rs | 5 +- bin/node/bench/src/trie.rs | 7 +- bin/node/cli/benches/block_production.rs | 3 +- bin/node/cli/benches/transaction_pool.rs | 3 +- bin/node/testing/src/bench.rs | 3 +- client/api/src/backend.rs | 10 +- .../basic-authorship/src/basic_authorship.rs | 21 +- client/cli/src/commands/chain_info_cmd.rs | 3 +- client/cli/src/config.rs | 17 +- client/cli/src/params/import_params.rs | 24 +- client/db/Cargo.toml | 12 +- client/db/benches/state_access.rs | 312 ++++++ client/db/src/bench.rs | 107 ++- client/db/src/lib.rs | 181 ++-- client/db/src/record_stats_state.rs | 230 +++++ client/rpc-api/src/dev/error.rs | 5 + client/service/src/builder.rs | 3 +- client/service/src/client/call_executor.rs | 21 +- client/service/src/client/client.rs | 6 +- client/service/src/config.rs | 8 +- client/service/test/src/client/mod.rs | 6 +- client/service/test/src/lib.rs | 3 +- frame/session/src/historical/mod.rs | 35 +- primitives/api/Cargo.toml | 2 + .../api/proc-macro/src/impl_runtime_apis.rs | 10 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 4 +- primitives/api/src/lib.rs | 12 +- primitives/state-machine/Cargo.toml | 1 + primitives/state-machine/src/backend.rs | 32 +- .../state-machine/src/in_memory_backend.rs | 13 +- primitives/state-machine/src/lib.rs | 192 ++-- .../state-machine/src/proving_backend.rs | 611 ------------ primitives/state-machine/src/read_only.rs | 26 +- primitives/state-machine/src/testing.rs | 14 +- primitives/state-machine/src/trie_backend.rs | 891 ++++++++++++++++-- .../state-machine/src/trie_backend_essence.rs | 455 ++++++--- .../transaction-storage-proof/src/lib.rs | 3 +- primitives/trie/Cargo.toml | 18 +- primitives/trie/src/cache/mod.rs | 686 ++++++++++++++ primitives/trie/src/cache/shared_cache.rs | 677 +++++++++++++ primitives/trie/src/error.rs | 21 +- primitives/trie/src/lib.rs | 168 ++-- primitives/trie/src/node_codec.rs | 41 +- primitives/trie/src/recorder.rs | 284 ++++++ primitives/trie/src/storage_proof.rs | 7 +- primitives/trie/src/trie_codec.rs | 6 +- test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/src/lib.rs | 35 +- utils/frame/benchmarking-cli/src/lib.rs | 4 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 16 +- .../benchmarking-cli/src/storage/write.rs | 4 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 19 +- utils/frame/try-runtime/cli/src/lib.rs | 12 +- 55 files changed, 3979 insertions(+), 1346 deletions(-) create mode 100644 client/db/benches/state_access.rs create mode 100644 client/db/src/record_stats_state.rs delete mode 100644 primitives/state-machine/src/proving_backend.rs create mode 100644 primitives/trie/src/cache/mod.rs create mode 100644 primitives/trie/src/cache/shared_cache.rs create mode 100644 primitives/trie/src/recorder.rs diff --git a/Cargo.lock b/Cargo.lock index 6a114065ee56c..1579c887b8fed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2798,9 +2798,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.0" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] @@ -4352,7 +4352,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" dependencies = [ "hash-db", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "parity-util-mem", ] @@ -6572,7 +6572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ "cfg-if 1.0.0", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "impl-trait-for-tuples", "parity-util-mem-derive", "parking_lot 0.12.0", @@ -7505,7 +7505,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f08c8062c1fe1253064043b8fc07bfea1b9702b71b4a86c11ea3588183b12e1" dependencies = [ "bytecheck", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "ptr_meta", "rend", "rkyv_derive", @@ -7884,7 +7884,9 @@ dependencies = [ name = "sc-client-db" version = "0.10.0-dev" dependencies = [ + "criterion", "hash-db", + "kitchensink-runtime", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", @@ -7894,6 +7896,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.0", "quickcheck", + "rand 0.8.4", "sc-client-api", "sc-state-db", "sp-arithmetic", @@ -9396,6 +9399,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", + "sp-trie", "sp-version", "thiserror", ] @@ -10060,6 +10064,7 @@ dependencies = [ "sp-trie", "thiserror", "tracing", + "trie-db", "trie-root", ] @@ -10157,16 +10162,23 @@ dependencies = [ name = "sp-trie" version = "6.0.0" dependencies = [ + "ahash", "criterion", "hash-db", + "hashbrown 0.12.3", "hex-literal", + "lazy_static", + "lru", "memory-db", + "nohash-hasher", "parity-scale-codec", + "parking_lot 0.12.0", "scale-info", "sp-core", "sp-runtime", "sp-std", "thiserror", + "tracing", "trie-bench", "trie-db", "trie-root", @@ -10963,9 +10975,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ecec5d10427b35e9ae374b059dccc0801d02d832617c04c78afc7a8c5c4a34" +checksum = "c5704f0d6130bd83608e4370c19e20c8a6ec03e80363e493d0234efca005265a" dependencies = [ "criterion", "hash-db", @@ -10979,12 +10991,12 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.23.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" +checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908" dependencies = [ "hash-db", - "hashbrown 0.12.0", + "hashbrown 0.12.3", "log", "rustc-hex", "smallvec", diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 2b26ed9089a51..863928c719429 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{trie_types::TrieDBMutV1, TrieMut}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut}; use crate::simple_trie::SimpleTrie; @@ -43,7 +43,8 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = TrieDBMutV1::::new(&mut trie, &mut root); + let mut trie_db = + TrieDBMutBuilderV1::::new(&mut trie, &mut root).build(); for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index d508dc712e1c3..de49a6fe7b6da 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,7 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMutV1, TrieMut as _}; +use sp_trie::{trie_types::TrieDBMutBuilderV1, TrieMut as _}; use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -180,7 +180,7 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(storage, self.root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_backend .storage(&warmup_key[..]) @@ -286,8 +286,7 @@ impl core::Benchmark for TrieWriteBenchmark { let mut overlay = HashMap::new(); let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; - let mut trie_db_mut = TrieDBMutV1::from_existing(&mut trie, &mut new_root) - .expect("Failed to create TrieDBMut"); + let mut trie_db_mut = TrieDBMutBuilderV1::from_existing(&mut trie, &mut new_root).build(); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_db_mut diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 8420a65f8cb80..c0f3b96e093cb 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -72,8 +72,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 67108864, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::All, chain_spec: spec, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index f031f9dae3d21..e6084fba8242a 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -66,8 +66,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { keystore: KeystoreConfig::InMemory, keystore_remote: Default::default(), database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 67108864, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::All, chain_spec: spec, diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index de64910125b86..534f0a4f09732 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -388,8 +388,7 @@ impl BenchDb { keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: 16 * 1024 * 1024, - state_cache_child_ratio: Some((0, 100)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), blocks_pruning: sc_client_db::BlocksPruning::All, diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 54784a2f27b64..bcc7a9bff3b2d 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -32,7 +32,8 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, + backend::AsTrieBackend, ChildStorageCollection, IndexOperation, OffchainChangesCollection, + StorageCollection, }; use sp_storage::{ChildInfo, StorageData, StorageKey}; use std::collections::{HashMap, HashSet}; @@ -448,7 +449,12 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> + Send; + type State: StateBackend> + + Send + + AsTrieBackend< + HashFor, + TrieBackendStorage = >>::TrieBackendStorage, + >; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index bc328c40edb3c..f5ccd9023a3db 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -855,10 +855,18 @@ mod tests { .expect("header get error") .expect("there should be header"); - let extrinsics_num = 4; - let extrinsics = (0..extrinsics_num) - .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) - .collect::>(); + let extrinsics_num = 5; + let extrinsics = std::iter::once( + Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 100, + nonce: 0, + } + .into_signed_tx(), + ) + .chain((0..extrinsics_num - 1).map(|v| Extrinsic::IncludeData(vec![v as u8; 10]))) + .collect::>(); let block_limit = genesis_header.encoded_size() + extrinsics @@ -922,8 +930,9 @@ mod tests { .unwrap(); // The block limit didn't changed, but we now include the proof in the estimation of the - // block size and thus, one less transaction should fit into the limit. - assert_eq!(block.extrinsics().len(), extrinsics_num - 2); + // block size and thus, only the `Transfer` will fit into the block. It reads more data + // than we have reserved in the block limit. + assert_eq!(block.extrinsics().len(), 1); } #[test] diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index 8fea0d7058e50..cbc22cc4d52d9 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -73,8 +73,7 @@ impl ChainInfoCmd { B: BlockT, { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + trie_cache_maximum_size: config.trie_cache_maximum_size, state_pruning: config.state_pruning.clone(), source: config.database.clone(), blocks_pruning: config.blocks_pruning, diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index f513008b17adc..92091ea118ff1 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -230,18 +230,12 @@ pub trait CliConfiguration: Sized { }) } - /// Get the state cache size. + /// Get the trie cache maximum size. /// /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. - fn state_cache_size(&self) -> Result { - Ok(self.import_params().map(|x| x.state_cache_size()).unwrap_or_default()) - } - - /// Get the state cache child ratio (if any). - /// - /// By default this is `None`. - fn state_cache_child_ratio(&self) -> Result> { - Ok(Default::default()) + /// If `None` is returned the trie cache is disabled. + fn trie_cache_maximum_size(&self) -> Result> { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) } /// Get the state pruning mode. @@ -533,8 +527,7 @@ pub trait CliConfiguration: Sized { keystore_remote, keystore, database: self.database_config(&config_dir, database_cache_size, database)?, - state_cache_size: self.state_cache_size()?, - state_cache_child_ratio: self.state_cache_child_ratio()?, + trie_cache_maximum_size: self.trie_cache_maximum_size()?, state_pruning: self.state_pruning()?, blocks_pruning: self.blocks_pruning()?, wasm_method: self.wasm_method()?, diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index aef7511ffc371..c851050838965 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -95,14 +95,30 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. + /// + /// Providing `0` will disable the cache. #[clap(long, value_name = "Bytes", default_value = "67108864")] - pub state_cache_size: usize, + pub trie_cache_size: usize, + + /// DEPRECATED + /// + /// Switch to `--trie-cache-size`. + #[clap(long)] + state_cache_size: Option, } impl ImportParams { - /// Specify the state cache size. - pub fn state_cache_size(&self) -> usize { - self.state_cache_size + /// Specify the trie cache maximum size. + pub fn trie_cache_maximum_size(&self) -> Option { + if self.state_cache_size.is_some() { + eprintln!("`--state-cache-size` was deprecated. Please switch to `--trie-cache-size`."); + } + + if self.trie_cache_size == 0 { + None + } else { + Some(self.trie_cache_size) + } } /// Get the WASM execution method from the parameters diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 3b6402b3f6023..7bfba997767f2 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -35,9 +35,12 @@ sp-state-machine = { version = "0.12.0", path = "../../primitives/state-machine" sp-trie = { version = "6.0.0", path = "../../primitives/trie" } [dev-dependencies] +criterion = "0.3.3" kvdb-rocksdb = "0.15.1" +rand = "0.8.4" +tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } -tempfile = "3" +kitchensink-runtime = { path = "../../bin/node/runtime" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } @@ -46,3 +49,10 @@ default = [] test-helpers = [] runtime-benchmarks = [] rocksdb = ["kvdb-rocksdb"] + +[[bench]] +name = "state_access" +harness = false + +[lib] +bench = false diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs new file mode 100644 index 0000000000000..78aed7858e342 --- /dev/null +++ b/client/db/benches/state_access.rs @@ -0,0 +1,312 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use rand::{distributions::Uniform, rngs::StdRng, Rng, SeedableRng}; +use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBackend}; +use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; +use sp_core::H256; +use sp_runtime::{ + generic::BlockId, + testing::{Block as RawBlock, ExtrinsicWrapper, Header}, + StateVersion, Storage, +}; +use tempfile::TempDir; + +pub(crate) type Block = RawBlock>; + +fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 { + let mut op = db.begin_operation().unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + header.state_root = op + .set_genesis_state( + Storage { + top: vec![( + sp_core::storage::well_known_keys::CODE.to_vec(), + kitchensink_runtime::wasm_binary_unwrap().to_vec(), + )] + .into_iter() + .collect(), + children_default: Default::default(), + }, + true, + StateVersion::V1, + ) + .unwrap(); + + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let mut number = 1; + let mut parent_hash = header.hash(); + + for i in 0..10 { + let mut op = db.begin_operation().unwrap(); + + db.begin_state_operation(&mut op, BlockId::Hash(parent_hash)).unwrap(); + + let mut header = Header { + number, + parent_hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let changes = storage + .iter() + .skip(i * 100_000) + .take(100_000) + .map(|(k, v)| (k.clone(), Some(v.clone()))) + .collect::>(); + + let (state_root, tx) = db.state_at(BlockId::Number(number - 1)).unwrap().storage_root( + changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), + StateVersion::V1, + ); + header.state_root = state_root; + + op.update_db_storage(tx).unwrap(); + op.update_storage(changes.clone(), Default::default()).unwrap(); + + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + number += 1; + parent_hash = header.hash(); + } + + parent_hash +} + +enum BenchmarkConfig { + NoCache, + TrieNodeCache, +} + +fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend { + let path = temp_dir.path().to_owned(); + + let trie_cache_maximum_size = match config { + BenchmarkConfig::NoCache => None, + BenchmarkConfig::TrieNodeCache => Some(2 * 1024 * 1024 * 1024), + }; + + let settings = DatabaseSettings { + trie_cache_maximum_size, + state_pruning: Some(PruningMode::ArchiveAll), + source: DatabaseSource::ParityDb { path }, + blocks_pruning: BlocksPruning::All, + }; + + Backend::new(settings, 100).expect("Creates backend") +} + +/// Generate the storage that will be used for the benchmark +/// +/// Returns the `Vec` and the `Vec<(key, value)>` +fn generate_storage() -> (Vec>, Vec<(Vec, Vec)>) { + let mut rng = StdRng::seed_from_u64(353893213); + + let mut storage = Vec::new(); + let mut keys = Vec::new(); + + for _ in 0..1_000_000 { + let key_len: usize = rng.gen_range(32..128); + let key = (&mut rng) + .sample_iter(Uniform::new_inclusive(0, 255)) + .take(key_len) + .collect::>(); + + let value_len: usize = rng.gen_range(20..60); + let value = (&mut rng) + .sample_iter(Uniform::new_inclusive(0, 255)) + .take(value_len) + .collect::>(); + + keys.push(key.clone()); + storage.push((key, value)); + } + + (keys, storage) +} + +fn state_access_benchmarks(c: &mut Criterion) { + sp_tracing::try_init_simple(); + + let (keys, storage) = generate_storage(); + let path = TempDir::new().expect("Creates temporary directory"); + + let block_hash = { + let backend = create_backend(BenchmarkConfig::NoCache, &path); + insert_blocks(&backend, storage.clone()) + }; + + let mut group = c.benchmark_group("Reading entire state"); + group.sample_size(20); + + let mut bench_multiple_values = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().cycle().take(keys.len() * multiplier) { + let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_multiple_values( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading each key once", + 1, + ); + bench_multiple_values(BenchmarkConfig::NoCache, "no cache and reading each key once", 1); + + bench_multiple_values( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading 4 times each key in a row", + 4, + ); + bench_multiple_values( + BenchmarkConfig::NoCache, + "no cache and reading 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Reading a single value"); + + let mut bench_single_value = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().take(1).cycle().take(multiplier) { + let _ = state.storage(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading the key once", + 1, + ); + bench_single_value(BenchmarkConfig::NoCache, "no cache and reading the key once", 1); + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and reading 4 times each key in a row", + 4, + ); + bench_single_value( + BenchmarkConfig::NoCache, + "no cache and reading 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Hashing a value"); + + let mut bench_single_value = |config, desc, multiplier| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + for key in keys.iter().take(1).cycle().take(multiplier) { + let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); + } + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and hashing the key once", + 1, + ); + bench_single_value(BenchmarkConfig::NoCache, "no cache and hashing the key once", 1); + + bench_single_value( + BenchmarkConfig::TrieNodeCache, + "with trie node cache and hashing 4 times each key in a row", + 4, + ); + bench_single_value( + BenchmarkConfig::NoCache, + "no cache and hashing 4 times each key in a row", + 4, + ); + + group.finish(); + + let mut group = c.benchmark_group("Hashing `:code`"); + + let mut bench_single_value = |config, desc| { + let backend = create_backend(config, &path); + + group.bench_function(desc, |b| { + b.iter_batched( + || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + |state| { + let _ = state + .storage_hash(sp_core::storage::well_known_keys::CODE) + .expect("Doesn't fail") + .unwrap(); + }, + BatchSize::SmallInput, + ) + }); + }; + + bench_single_value(BenchmarkConfig::TrieNodeCache, "with trie node cache"); + bench_single_value(BenchmarkConfig::NoCache, "no cache"); + + group.finish(); +} + +criterion_group!(benches, state_access_benchmarks); +criterion_main!(benches); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index d3d43e742d026..b1f4e3b6c0af5 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -18,13 +18,7 @@ //! State backend that's useful for benchmarking -use std::{ - cell::{Cell, RefCell}, - collections::HashMap, - sync::Arc, -}; - -use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use crate::{DbState, DbStateBuilder}; use hash_db::{Hasher, Prefix}; use kvdb::{DBTransaction, KeyValueDB}; use linked_hash_map::LinkedHashMap; @@ -37,40 +31,31 @@ use sp_runtime::{ StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, - StorageCollection, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, StorageCollection, +}; +use sp_trie::{ + cache::{CacheSize, SharedTrieCache}, + prefixed_key, MemoryDB, +}; +use std::{ + cell::{Cell, RefCell}, + collections::HashMap, + sync::Arc, }; -use sp_trie::{prefixed_key, MemoryDB}; - -type DbState = - sp_state_machine::TrieBackend>>, HashFor>; -type State = CachingState, B>; +type State = DbState; struct StorageDb { db: Arc, - proof_recorder: Option>, _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); - if let Some(recorder) = &self.proof_recorder { - if let Some(v) = recorder.get(key) { - return Ok(v) - } - let backend_value = self - .db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e))?; - recorder.record(*key, backend_value.clone()); - Ok(backend_value) - } else { - self.db - .get(0, &prefixed_key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } + self.db + .get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -82,7 +67,6 @@ pub struct BenchmarkingState { db: Cell>>, genesis: HashMap, (Vec, i32)>, record: Cell>>, - shared_cache: SharedCache, // shared cache is always empty /// Key tracker for keys in the main trie. /// We track the total number of reads and writes to these keys, /// not de-duplicated for repeats. @@ -93,9 +77,10 @@ pub struct BenchmarkingState { /// not de-duplicated for repeats. child_key_tracker: RefCell, LinkedHashMap, TrackedStorageKey>>>, whitelist: RefCell>, - proof_recorder: Option>, + proof_recorder: Option>>, proof_recorder_root: Cell, enable_tracking: bool, + shared_trie_cache: SharedTrieCache>, } impl BenchmarkingState { @@ -109,7 +94,7 @@ impl BenchmarkingState { let state_version = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); let mut state = BenchmarkingState { state: RefCell::new(None), @@ -118,13 +103,14 @@ impl BenchmarkingState { genesis: Default::default(), genesis_root: Default::default(), record: Default::default(), - shared_cache: new_shared_cache(0, (1, 10)), main_key_tracker: Default::default(), child_key_tracker: Default::default(), whitelist: Default::default(), proof_recorder: record_proof.then(Default::default), proof_recorder_root: Cell::new(root), enable_tracking, + // Enable the cache, but do not sync anything to the shared state. + shared_trie_cache: SharedTrieCache::new(CacheSize::Maximum(0)), }; state.add_whitelist_to_tracker(); @@ -160,16 +146,13 @@ impl BenchmarkingState { recorder.reset(); self.proof_recorder_root.set(self.root.get()); } - let storage_db = Arc::new(StorageDb:: { - db, - proof_recorder: self.proof_recorder.clone(), - _block: Default::default(), - }); - *self.state.borrow_mut() = Some(State::new( - DbState::::new(storage_db, self.root.get()), - self.shared_cache.clone(), - None, - )); + let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); + *self.state.borrow_mut() = Some( + DbStateBuilder::::new(storage_db, self.root.get()) + .with_optional_recorder(self.proof_recorder.clone()) + .with_cache(self.shared_trie_cache.local_cache()) + .build(), + ); Ok(()) } @@ -324,6 +307,19 @@ impl StateBackend> for BenchmarkingState { .child_storage(child_info, key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.add_read_key(Some(child_info.storage_key()), key); + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_storage_hash(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.add_read_key(None, key); self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) @@ -604,22 +600,25 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; + let proof = recorder.to_storage_proof(); + let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie proof_size - } else if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) - { - size as u32 } else { - panic!( - "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", - self.proof_recorder_root.get(), - self.root.get(), - self.genesis_root, - proof_size, - ); + if let Some(size) = proof.encoded_compact_size::>(proof_recorder_root) { + size as u32 + } else { + panic!( + "proof rec root {:?}, root {:?}, genesis {:?}, rec_len {:?}", + self.proof_recorder_root.get(), + self.root.get(), + self.genesis_root, + proof_size, + ); + } } }) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 71f291e66e793..465db08fe3afc 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -34,8 +34,8 @@ pub mod bench; mod children; mod parity_db; +mod record_stats_state; mod stats; -mod storage_cache; #[cfg(any(feature = "rocksdb", test))] mod upgrade; mod utils; @@ -51,8 +51,8 @@ use std::{ }; use crate::{ + record_stats_state::RecordStatsState, stats::StateUsageStats, - storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, }; use codec::{Decode, Encode}; @@ -83,10 +83,11 @@ use sp_runtime::{ Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, - OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, + backend::{AsTrieBackend, Backend as StateBackend}, + ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, + StorageCollection, UsageInfo as StateUsageInfo, }; -use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; +use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. pub use sc_state_db::PruningMode; @@ -96,13 +97,16 @@ pub use bench::BenchmarkingState; const CACHE_HEADERS: usize = 8; -/// Default value for storage cache child ratio. -const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); - /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend>>, HashFor>; +/// Builder for [`DbState`]. +pub type DbStateBuilder = sp_state_machine::TrieBackendBuilder< + Arc>>, + HashFor, +>; + /// Length of a [`DbHash`]. const DB_HASH_LEN: usize = 32; @@ -174,6 +178,14 @@ impl StateBackend> for RefTrackingState { self.state.child_storage(child_info, key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.state.child_storage_hash(child_info, key) + } + fn exists_storage(&self, key: &[u8]) -> Result { self.state.exists_storage(key) } @@ -272,12 +284,6 @@ impl StateBackend> for RefTrackingState { self.state.child_keys(child_info, prefix) } - fn as_trie_backend( - &self, - ) -> Option<&sp_state_machine::TrieBackend>> { - self.state.as_trie_backend() - } - fn register_overlay_stats(&self, stats: &StateMachineStats) { self.state.register_overlay_stats(stats); } @@ -287,12 +293,22 @@ impl StateBackend> for RefTrackingState { } } +impl AsTrieBackend> for RefTrackingState { + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + + fn as_trie_backend( + &self, + ) -> &sp_state_machine::TrieBackend> { + &self.state.as_trie_backend() + } +} + /// Database settings. pub struct DatabaseSettings { - /// State cache size. - pub state_cache_size: usize, - /// Ratio of cache size dedicated to child tries. - pub state_cache_child_ratio: Option<(usize, usize)>, + /// The maximum trie cache size in bytes. + /// + /// If `None` is given, the cache is disabled. + pub trie_cache_maximum_size: Option, /// Requested state pruning mode. pub state_pruning: Option, /// Where to find the database. @@ -730,7 +746,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { - old_state: SyncingCachingState, Block>, + old_state: RecordStatsState, Block>, db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, @@ -800,7 +816,7 @@ impl BlockImportOperation { impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { - type State = SyncingCachingState, Block>; + type State = RecordStatsState, Block>; fn state(&self) -> ClientResult> { Ok(Some(&self.old_state)) @@ -949,7 +965,7 @@ impl EmptyStorage { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); // both triedbmut are the same on empty storage. - sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); + sp_trie::trie_types::TrieDBMutBuilderV1::>::new(&mut mdb, &mut root).build(); EmptyStorage(root) } } @@ -1009,13 +1025,13 @@ pub struct Backend { offchain_storage: offchain::LocalStorage, blockchain: BlockchainDb, canonicalization_delay: u64, - shared_cache: SharedCache, import_lock: Arc>, is_archive: bool, blocks_pruning: BlocksPruning, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, state_usage: Arc, genesis_state: RwLock>>>, + shared_trie_cache: Option>>, } impl Backend { @@ -1053,8 +1069,7 @@ impl Backend { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); let db_setting = DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), source: DatabaseSource::Custom { db, require_create_flag: true }, blocks_pruning: BlocksPruning::Some(blocks_pruning), @@ -1116,16 +1131,15 @@ impl Backend { offchain_storage, blockchain, canonicalization_delay, - shared_cache: new_shared_cache( - config.state_cache_size, - config.state_cache_child_ratio.unwrap_or(DEFAULT_CHILD_RATIO), - ), import_lock: Default::default(), is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), state_usage: Arc::new(StateUsageStats::new()), blocks_pruning: config.blocks_pruning, genesis_state: RwLock::new(None), + shared_trie_cache: config.trie_cache_maximum_size.map(|maximum_size| { + SharedTrieCache::new(sp_trie::cache::CacheSize::Maximum(maximum_size)) + }), }; // Older DB versions have no last state key. Check if the state is available and set it. @@ -1194,7 +1208,7 @@ impl Backend { (&r.number, &r.hash) ); - return Err(::sp_blockchain::Error::NotInFinalizedChain) + return Err(sp_blockchain::Error::NotInFinalizedChain) } retracted.push(r.hash); @@ -1358,10 +1372,8 @@ impl Backend { // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; - let (enacted, retracted) = if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? - } else { - (Default::default(), Default::default()) + if pending_block.leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; }; utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; @@ -1488,14 +1500,22 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); - debug!(target: "db", + debug!( + target: "db", "DB Commit {:?} ({}), best={}, state={}, existing={}, finalized={}", - hash, number, is_best, operation.commit_state, existing_header, finalized, + hash, + number, + is_best, + operation.commit_state, + existing_header, + finalized, ); self.state_usage.merge_sm(operation.old_state.usage_info()); + // release state reference so that it can be finalized - let cache = operation.old_state.into_cache_changes(); + // VERY IMPORTANT + drop(operation.old_state); if finalized { // TODO: ensure best chain contains this block. @@ -1584,20 +1604,20 @@ impl Backend { is_finalized: finalized, with_state: operation.commit_state, }); - Some((pending_block.header, number, hash, enacted, retracted, is_best, cache)) + Some((pending_block.header, hash)) } else { None }; - let cache_update = if let Some(set_head) = operation.set_head { + if let Some(set_head) = operation.set_head { if let Some(header) = sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { let number = header.number(); let hash = header.hash(); - let (enacted, retracted) = - self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; + self.set_head_with_transaction(&mut transaction, hash, (*number, hash))?; + meta_updates.push(MetaUpdate { hash, number: *number, @@ -1605,40 +1625,24 @@ impl Backend { is_finalized: false, with_state: false, }); - Some((enacted, retracted)) } else { return Err(sp_blockchain::Error::UnknownBlock(format!( "Cannot set head {:?}", set_head ))) } - } else { - None - }; + } self.storage.db.commit(transaction)?; // Apply all in-memory state changes. // Code beyond this point can't fail. - if let Some((header, number, hash, enacted, retracted, is_best, mut cache)) = imported { + if let Some((header, hash)) = imported { trace!(target: "db", "DB Commit done {:?}", hash); let header_metadata = CachedHeaderMetadata::from(&header); self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); - cache.sync_cache( - &enacted, - &retracted, - operation.storage_updates, - operation.child_storage_updates, - Some(hash), - Some(number), - is_best, - ); - } - - if let Some((enacted, retracted)) = cache_update { - self.shared_cache.write().sync(&enacted, &retracted); } for m in meta_updates { @@ -1770,17 +1774,13 @@ impl Backend { Ok(()) } - fn empty_state(&self) -> ClientResult, Block>> { + fn empty_state(&self) -> ClientResult, Block>> { let root = EmptyStorage::::new().0; // Empty trie - let db_state = DbState::::new(self.storage.clone(), root); + let db_state = DbStateBuilder::::new(self.storage.clone(), root) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new(state, self.shared_cache.clone(), None); - Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )) + Ok(RecordStatsState::new(state, None, self.state_usage.clone())) } } @@ -1902,16 +1902,13 @@ where impl sc_client_api::backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; - type State = SyncingCachingState, Block>; + type State = RecordStatsState, Block>; type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { - let mut old_state = self.empty_state()?; - old_state.disable_syncing(); - Ok(BlockImportOperation { pending_block: None, - old_state, + old_state: self.empty_state()?, db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), @@ -1934,7 +1931,6 @@ impl sc_client_api::backend::Backend for Backend { } else { operation.old_state = self.state_at(block)?; } - operation.old_state.disable_syncing(); operation.commit_state = true; Ok(()) @@ -2035,8 +2031,9 @@ impl sc_client_api::backend::Backend for Backend { ) }); let database_cache = MemorySize::from_bytes(0); - let state_cache = - MemorySize::from_bytes(self.shared_cache.read().used_storage_cache_size()); + let state_cache = MemorySize::from_bytes( + self.shared_trie_cache.as_ref().map_or(0, |c| c.used_memory_size()), + ); let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { @@ -2278,17 +2275,13 @@ impl sc_client_api::backend::Backend for Backend { }; if is_genesis { if let Some(genesis_state) = &*self.genesis_state.read() { - let db_state = DbState::::new(genesis_state.clone(), genesis_state.root); + let root = genesis_state.root; + let db_state = DbStateBuilder::::new(genesis_state.clone(), root) + .with_optional_cache(self.shared_trie_cache.as_ref().map(|c| c.local_cache())) + .build(); + let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new(state, self.shared_cache.clone(), None); - let mut state = SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - ); - state.disable_syncing(); - return Ok(state) + return Ok(RecordStatsState::new(state, None, self.state_usage.clone())) } } @@ -2309,16 +2302,13 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; - let db_state = DbState::::new(self.storage.clone(), root); + let db_state = DbStateBuilder::::new(self.storage.clone(), root) + .with_optional_cache( + self.shared_trie_cache.as_ref().map(|c| c.local_cache()), + ) + .build(); let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - let caching_state = - CachingState::new(state, self.shared_cache.clone(), Some(hash)); - Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )) + Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2494,8 +2484,7 @@ pub(crate) mod tests { let backend = Backend::::new( DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, blocks_pruning: BlocksPruning::All, diff --git a/client/db/src/record_stats_state.rs b/client/db/src/record_stats_state.rs new file mode 100644 index 0000000000000..0b51d3fef2127 --- /dev/null +++ b/client/db/src/record_stats_state.rs @@ -0,0 +1,230 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides [`RecordStatsState`] for recording stats about state access. + +use crate::stats::StateUsageStats; +use sp_core::storage::ChildInfo; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + StateVersion, +}; +use sp_state_machine::{ + backend::{AsTrieBackend, Backend as StateBackend}, + TrieBackend, +}; +use std::sync::Arc; + +/// State abstraction for recording stats about state access. +pub struct RecordStatsState { + /// Usage statistics + usage: StateUsageStats, + /// State machine registered stats + overlay_stats: sp_state_machine::StateMachineStats, + /// Backing state. + state: S, + /// The hash of the block is state belongs to. + block_hash: Option, + /// The usage statistics of the backend. These will be updated on drop. + state_usage: Arc, +} + +impl std::fmt::Debug for RecordStatsState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Block {:?}", self.block_hash) + } +} + +impl Drop for RecordStatsState { + fn drop(&mut self) { + self.state_usage.merge_sm(self.usage.take()); + } +} + +impl>, B: BlockT> RecordStatsState { + /// Create a new instance wrapping generic State and shared cache. + pub(crate) fn new( + state: S, + block_hash: Option, + state_usage: Arc, + ) -> Self { + RecordStatsState { + usage: StateUsageStats::new(), + overlay_stats: sp_state_machine::StateMachineStats::default(), + state, + block_hash, + state_usage, + } + } +} + +impl>, B: BlockT> StateBackend> for RecordStatsState { + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + let value = self.state.storage(key)?; + self.usage.tally_key_read(key, value.as_ref(), false); + Ok(value) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.state.storage_hash(key) + } + + fn child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + let key = (child_info.storage_key().to_vec(), key.to_vec()); + let value = self.state.child_storage(child_info, &key.1)?; + + // just pass it through the usage counter + let value = self.usage.tally_child_key_read(&key, value, false); + + Ok(value) + } + + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.state.child_storage_hash(child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.state.exists_storage(key) + } + + fn exists_child_storage( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result { + self.state.exists_child_storage(child_info, key) + } + + fn apply_to_key_values_while, Vec) -> bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + allow_missing: bool, + ) -> Result { + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + } + + fn apply_to_keys_while bool>( + &self, + child_info: Option<&ChildInfo>, + prefix: Option<&[u8]>, + start_at: Option<&[u8]>, + f: F, + ) { + self.state.apply_to_keys_while(child_info, prefix, start_at, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.next_child_storage_key(child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + child_info: &ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state.for_child_keys_with_prefix(child_info, prefix, f) + } + + fn storage_root<'a>( + &self, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { + self.state.storage_root(delta, state_version) + } + + fn child_storage_root<'a>( + &self, + child_info: &ChildInfo, + delta: impl Iterator)>, + state_version: StateVersion, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { + self.state.child_storage_root(child_info, delta, state_version) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.state.keys(prefix) + } + + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { + self.state.child_keys(child_info, prefix) + } + + fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { + self.overlay_stats.add(stats); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + let mut info = self.usage.take(); + info.include_state_machine_states(&self.overlay_stats); + info + } +} + +impl> + AsTrieBackend>, B: BlockT> AsTrieBackend> + for RecordStatsState +{ + type TrieBackendStorage = >>::TrieBackendStorage; + + fn as_trie_backend(&self) -> &TrieBackend> { + self.state.as_trie_backend() + } +} diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index fe74dea256376..43fd3325fa598 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -32,6 +32,9 @@ pub enum Error { /// The re-execution of the specified block failed. #[error("Failed to re-execute the specified block")] BlockExecutionFailed, + /// Failed to extract the proof. + #[error("Failed to extract the proof")] + ProofExtractionFailed, /// The witness compaction failed. #[error("Failed to create to compact the witness")] WitnessCompactionFailed, @@ -54,6 +57,8 @@ impl From for JsonRpseeError { CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), Error::WitnessCompactionFailed => CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), + Error::ProofExtractionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 5, msg, None::<()>)), Error::UnsafeRpcCalled(e) => e.into(), } .into() diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index f81143583eacf..de04af259600b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -206,8 +206,7 @@ where let (client, backend) = { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + trie_cache_maximum_size: config.trie_cache_maximum_size, state_pruning: config.state_pruning.clone(), source: config.database.clone(), blocks_pruning: config.blocks_pruning, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 1e8114df13339..de851ac848919 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -28,7 +28,7 @@ use sp_core::{ use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ - self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + backend::AsTrieBackend, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, }; use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; @@ -224,15 +224,11 @@ where match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; - - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - ); + let trie_state = state.as_trie_backend(); + + let backend = sp_state_machine::TrieBackendBuilder::wrap(&trie_state) + .with_recorder(recorder.clone()) + .build(); let mut state_machine = StateMachine::new( &backend, @@ -294,10 +290,7 @@ where ) -> sp_blockchain::Result<(Vec, StorageProof)> { let state = self.backend.state_at(*at)?; - let trie_backend = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; + let trie_backend = state.as_trie_backend(); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); let runtime_code = diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d61d7f7fa3781..9d6a2e0457a84 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1327,7 +1327,7 @@ where Some(&root), ) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; - let proving_backend = sp_state_machine::TrieBackend::new(db, root); + let proving_backend = sp_state_machine::TrieBackendBuilder::new(db, root).build(); let state = read_range_proof_check_with_child_on_proving_backend::>( &proving_backend, start_key, @@ -1689,6 +1689,10 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } + + fn state_at(&self, at: &BlockId) -> Result { + self.state_at(at).map_err(Into::into) + } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 7860ff2281fe4..6f5650df5648c 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -70,10 +70,10 @@ pub struct Configuration { pub keystore_remote: Option, /// Configuration for the database. pub database: DatabaseSource, - /// Size of internal state cache in Bytes - pub state_cache_size: usize, - /// Size in percent of cache size dedicated to child tries - pub state_cache_child_ratio: Option, + /// Maximum size of internal trie cache in bytes. + /// + /// If `None` is given the cache is disabled. + pub trie_cache_maximum_size: Option, /// State pruning settings. pub state_pruning: Option, /// Number of blocks to keep in the db. diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 9c4fa84c144c7..f02b1321d2922 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1197,8 +1197,7 @@ fn doesnt_import_blocks_that_revert_finality() { let backend = Arc::new( Backend::new( DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, @@ -1424,8 +1423,7 @@ fn returns_status_for_pruned_blocks() { let backend = Arc::new( Backend::new( DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::blocks_pruning(1)), blocks_pruning: BlocksPruning::All, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 9c720c6fedea0..919c90400e94c 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -232,8 +232,7 @@ fn node_config< keystore_remote: Default::default(), keystore: KeystoreConfig::Path { path: root.join("key"), password: None }, database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, - state_cache_size: 16777216, - state_cache_child_ratio: None, + trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), blocks_pruning: BlocksPruning::All, chain_spec: Box::new((*spec).clone()), diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 2a749f2aae9dd..c72ab8c210d69 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -39,8 +39,8 @@ use sp_session::{MembershipProof, ValidatorCount}; use sp_staking::SessionIndex; use sp_std::prelude::*; use sp_trie::{ - trie_types::{TrieDB, TrieDBMutV0}, - MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, + trie_types::{TrieDBBuilder, TrieDBMutBuilderV0}, + LayoutV0, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; use frame_support::{ @@ -236,7 +236,7 @@ impl ProvingTrie { let mut root = Default::default(); { - let mut trie = TrieDBMutV0::new(&mut db, &mut root); + let mut trie = TrieDBMutBuilderV0::new(&mut db, &mut root).build(); for (i, (validator, full_id)) in validators.into_iter().enumerate() { let i = i as u32; let keys = match >::load_keys(&validator) { @@ -278,19 +278,20 @@ impl ProvingTrie { /// Prove the full verification data for a given key and key ID. pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; - let mut recorder = Recorder::new(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; + let mut recorder = Recorder::>::new(); + { + let trie = + TrieDBBuilder::new(&self.db, &self.root).with_recorder(&mut recorder).build(); + let val_idx = (key_id, key_data).using_encoded(|s| { + trie.get(s).ok()?.and_then(|raw| u32::decode(&mut &*raw).ok()) + })?; + + val_idx.using_encoded(|s| { + trie.get(s) + .ok()? + .and_then(|raw| >::decode(&mut &*raw).ok()) + })?; + } Some(recorder.drain().into_iter().map(|r| r.data).collect()) } @@ -303,7 +304,7 @@ impl ProvingTrie { // Check a proof contained within the current memory-db. Returns `None` if the // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; + let trie = TrieDBBuilder::new(&self.db, &self.root).build(); let val_idx = (key_id, key_data) .using_encoded(|s| trie.get(s)) .ok()? diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index f3d091266d5d4..75c935bf844bd 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -20,6 +20,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-version = { version = "5.0.0", default-features = false, path = "../version" } sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } +sp-trie = { version = "6.0.0", optional = true, path = "../trie" } hash-db = { version = "0.15.2", optional = true } thiserror = { version = "1.0.30", optional = true } @@ -36,6 +37,7 @@ std = [ "sp-std/std", "sp-runtime/std", "sp-state-machine", + "sp-trie", "sp-version/std", "hash-db", "thiserror", diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 0087a1ad37ab9..5ab8fa0521699 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -277,9 +277,13 @@ fn generate_runtime_api_base_structures() -> Result { std::clone::Clone::clone(&self.recorder) } - fn extract_proof(&mut self) -> std::option::Option<#crate_::StorageProof> { - std::option::Option::take(&mut self.recorder) - .map(|recorder| #crate_::ProofRecorder::::to_storage_proof(&recorder)) + fn extract_proof( + &mut self, + ) -> std::option::Option<#crate_::StorageProof> { + let recorder = std::option::Option::take(&mut self.recorder); + std::option::Option::map(recorder, |recorder| { + #crate_::ProofRecorder::::drain_storage_proof(recorder) + }) } fn into_storage_changes( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 4de0f6b9e571b..e43a302e18923 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -104,7 +104,9 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result Option<#crate_::StorageProof> { + fn extract_proof( + &mut self, + ) -> Option<#crate_::StorageProof> { unimplemented!("`extract_proof` not implemented for runtime api mocks") } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index a5230cfc5f116..353ebd91cf5df 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -99,7 +99,8 @@ pub use sp_runtime::{ #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, + backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, + StorageProof, TrieBackend, TrieBackendBuilder, }; #[cfg(feature = "std")] use sp_std::result; @@ -454,7 +455,7 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash>; +pub type ProofRecorder = sp_trie::recorder::Recorder>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] @@ -518,6 +519,8 @@ pub enum ApiError { #[source] error: codec::Error, }, + #[error("The given `StateBackend` isn't a `TrieBackend`.")] + StateBackendIsNotTrie, #[error(transparent)] Application(#[from] Box), } @@ -613,7 +616,7 @@ pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend { /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; + type StateBackend: StateBackend> + AsTrieBackend>; /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. @@ -627,6 +630,9 @@ pub trait CallApiAt { /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; + + /// Get the state `at` the given block. + fn state_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 1c652966180a7..00acca7e86788 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -35,6 +35,7 @@ hex-literal = "0.3.4" pretty_assertions = "1.2.1" rand = "0.7.2" sp-runtime = { version = "6.0.0", path = "../runtime" } +trie-db = "0.24.0" assert_matches = "1.5" [features] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 505b53c800f9e..791183c4d7e4d 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,9 +17,11 @@ //! State machine backends. These manage the code and storage of contracts. +#[cfg(feature = "std")] +use crate::trie_backend::TrieBackend; use crate::{ - trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, - StorageCollection, StorageKey, StorageValue, UsageInfo, + trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, + StorageKey, StorageValue, UsageInfo, }; use codec::Encode; use hash_db::Hasher; @@ -46,9 +48,7 @@ pub trait Backend: sp_std::fmt::Debug { fn storage(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.storage(key).map(|v| v.map(|v| H::hash(&v))) - } + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error>; /// Get keyed child storage or None if there is nothing associated. fn child_storage( @@ -62,13 +62,11 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, key: &[u8], - ) -> Result, Self::Error> { - self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) - } + ) -> Result, Self::Error>; /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) + Ok(self.storage_hash(key)?.is_some()) } /// true if a key exists in child storage. @@ -77,7 +75,7 @@ pub trait Backend: sp_std::fmt::Debug { child_info: &ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage(child_info, key)?.is_some()) + Ok(self.child_storage_hash(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -175,10 +173,6 @@ pub trait Backend: sp_std::fmt::Debug { all } - /// Try convert into trie backend. - fn as_trie_backend(&self) -> Option<&TrieBackend> { - None - } /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. @@ -273,6 +267,16 @@ pub trait Backend: sp_std::fmt::Debug { } } +/// Something that can be converted into a [`TrieBackend`]. +#[cfg(feature = "std")] +pub trait AsTrieBackend> { + /// Type of trie backend storage. + type TrieBackendStorage: TrieBackendStorage; + + /// Return the type as [`TrieBackend`]. + fn as_trie_backend(&self) -> &TrieBackend; +} + /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 6df23cdb7096e..e205e83e85572 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -19,6 +19,7 @@ use crate::{ backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, + TrieBackendBuilder, }; use codec::Codec; use hash_db::Hasher; @@ -46,7 +47,7 @@ where { let db = GenericMemoryDB::default(); // V1 is same as V0 for an empty trie. - TrieBackend::new(db, empty_trie_root::>()) + TrieBackendBuilder::new(db, empty_trie_root::>()).build() } impl TrieBackend, H> @@ -87,14 +88,14 @@ where pub fn update_backend(&self, root: H::Out, changes: GenericMemoryDB) -> Self { let mut clone = self.backend_storage().clone(); clone.consolidate(changes); - Self::new(clone, root) + TrieBackendBuilder::new(clone, root).build() } /// Apply the given transaction to this backend and set the root to the given value. pub fn apply_transaction(&mut self, root: H::Out, transaction: GenericMemoryDB) { let mut storage = sp_std::mem::take(self).into_storage(); storage.consolidate(transaction); - *self = TrieBackend::new(storage, root); + *self = TrieBackendBuilder::new(storage, root).build(); } /// Compare with another in-memory backend. @@ -109,7 +110,7 @@ where KF: KeyFunction + Send + Sync, { fn clone(&self) -> Self { - TrieBackend::new(self.backend_storage().clone(), *self.root()) + TrieBackendBuilder::new(self.backend_storage().clone(), *self.root()).build() } } @@ -203,7 +204,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::backend::Backend; + use crate::backend::{AsTrieBackend, Backend}; use sp_core::storage::StateVersion; use sp_runtime::traits::BlakeTwo256; @@ -218,7 +219,7 @@ mod tests { vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_version, ); - let trie_backend = storage.as_trie_backend().unwrap(); + let trie_backend = storage.as_trie_backend(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index edc3db7a60e36..59488599883e9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -29,8 +29,6 @@ mod ext; mod in_memory_backend; pub(crate) mod overlayed_changes; #[cfg(feature = "std")] -mod proving_backend; -#[cfg(feature = "std")] mod read_only; mod stats; #[cfg(feature = "std")] @@ -134,7 +132,7 @@ pub use crate::{ StorageTransactionCache, StorageValue, }, stats::{StateMachineStats, UsageInfo, UsageUnit}, - trie_backend::TrieBackend, + trie_backend::{TrieBackend, TrieBackendBuilder}, trie_backend_essence::{Storage, TrieBackendStorage}, }; @@ -144,11 +142,9 @@ mod std_reexport { basic::BasicExternalities, error::{Error, ExecutionError}, in_memory_backend::{new_in_mem, new_in_mem_hash_key}, - proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, - }, read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, + trie_backend::create_proof_check_backend, }; pub use sp_trie::{ trie_types::{TrieDBMutV0, TrieDBMutV1}, @@ -158,6 +154,8 @@ mod std_reexport { #[cfg(feature = "std")] mod execution { + use crate::backend::AsTrieBackend; + use super::*; use codec::{Codec, Decode, Encode}; use hash_db::Hasher; @@ -188,9 +186,6 @@ mod execution { /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; - /// Proving Trie backend with in-memory storage. - pub type InMemoryProvingBackend<'a, H> = ProvingBackend<'a, MemoryDB, H>; - /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { @@ -562,15 +557,13 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result<(Vec, StorageProof), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_execution_on_trie_backend::<_, _, _, _>( trie_backend, overlay, @@ -607,23 +600,31 @@ mod execution { Exec: CodeExecutor + 'static + Clone, Spawn: SpawnNamed + Send + 'static, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, Exec>::new( - &proving_backend, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); + + let result = { + let mut sm = StateMachine::<_, H, Exec>::new( + &proving_backend, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); + + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )? + }; + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )?; - let proof = sm.backend.extract_proof(); Ok((result.into_encoded(), proof)) } @@ -639,7 +640,7 @@ mod execution { runtime_code: &RuntimeCode, ) -> Result, Box> where - H: Hasher, + H: Hasher + 'static, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, Spawn: SpawnNamed + Send + 'static, @@ -693,15 +694,13 @@ mod execution { /// Generate storage read proof. pub fn prove_read(backend: B, keys: I) -> Result> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_read_on_trie_backend(trie_backend, keys) } @@ -829,13 +828,11 @@ mod execution { start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) } @@ -856,7 +853,9 @@ mod execution { return Err(Box::new("Invalid start of range.")) } - let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let recorder = sp_trie::recorder::Recorder::default(); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); let mut count = 0; let mut child_roots = HashSet::new(); @@ -924,7 +923,7 @@ mod execution { // do not add two child trie with same root true } - } else if proving_backend.estimate_encoded_size() <= size_limit { + } else if recorder.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -948,7 +947,11 @@ mod execution { start_at = None; } } - Ok((proving_backend.extract_proof(), count)) + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + Ok((proof, count)) } /// Generate range storage read proof. @@ -960,13 +963,11 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(StorageProof, u32), Box> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_range_read_with_size_on_trie_backend( trie_backend, child_info, @@ -989,7 +990,9 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let recorder = sp_trie::recorder::Recorder::default(); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build(); let mut count = 0; proving_backend .apply_to_key_values_while( @@ -997,7 +1000,7 @@ mod execution { prefix, start_at, |_key, _value| { - if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + if count == 0 || recorder.estimate_encoded_size() <= size_limit { count += 1; true } else { @@ -1007,7 +1010,11 @@ mod execution { false, ) .map_err(|e| Box::new(e) as Box)?; - Ok((proving_backend.extract_proof(), count)) + + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); + Ok((proof, count)) } /// Generate child storage read proof. @@ -1017,15 +1024,13 @@ mod execution { keys: I, ) -> Result> where - B: Backend, + B: AsTrieBackend, H: Hasher, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend - .as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + let trie_backend = backend.as_trie_backend(); prove_child_read_on_trie_backend(trie_backend, child_info, keys) } @@ -1041,13 +1046,17 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); for key in keys.into_iter() { proving_backend .storage(key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + + Ok(proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed")) } /// Generate storage read proof on pre-created trie backend. @@ -1063,13 +1072,17 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + let proving_backend = + TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); for key in keys.into_iter() { proving_backend .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } - Ok(proving_backend.extract_proof()) + + Ok(proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed")) } /// Check storage read proof, generated by `prove_read` call. @@ -1079,7 +1092,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1104,7 +1117,7 @@ mod execution { start_at: &[Vec], ) -> Result<(KeyValueStates, usize), Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1121,7 +1134,7 @@ mod execution { start_at: Option<&[u8]>, ) -> Result<(Vec<(Vec, Vec)>, bool), Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; @@ -1142,7 +1155,7 @@ mod execution { keys: I, ) -> Result, Option>>, Box> where - H: Hasher, + H: Hasher + 'static, H::Out: Ord + Codec, I: IntoIterator, I::Item: AsRef<[u8]>, @@ -1346,7 +1359,7 @@ mod execution { #[cfg(test)] mod tests { - use super::{ext::Ext, *}; + use super::{backend::AsTrieBackend, ext::Ext, *}; use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; use assert_matches::assert_matches; use codec::{Decode, Encode}; @@ -1358,6 +1371,7 @@ mod tests { NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; + use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}; use std::{ collections::{BTreeMap, HashMap}, panic::UnwindSafe, @@ -1419,7 +1433,7 @@ mod tests { execute_works_inner(StateVersion::V1); } fn execute_works_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1447,7 +1461,7 @@ mod tests { execute_works_with_native_else_wasm_inner(StateVersion::V1); } fn execute_works_with_native_else_wasm_inner(state_version: StateVersion) { - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1476,7 +1490,7 @@ mod tests { } fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1520,7 +1534,7 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(state_version); + let mut remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let (remote_result, remote_proof) = prove_execution( &mut remote_backend, @@ -1560,7 +1574,7 @@ mod tests { b"bbb".to_vec() => b"3".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); overlay.set_storage(b"aba".to_vec(), Some(b"1312".to_vec())); @@ -1716,7 +1730,7 @@ mod tests { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); @@ -1732,7 +1746,7 @@ mod tests { let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { @@ -1769,7 +1783,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); let state = new_in_mem_hash_key::(); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); // For example, block initialization with event. @@ -1840,7 +1854,7 @@ mod tests { let child_info = &child_info; let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1857,7 +1871,7 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -1924,8 +1938,8 @@ mod tests { let trie: InMemoryBackend = (storage.clone(), StateVersion::default()).into(); - let trie_root = trie.root(); - let backend = crate::ProvingBackend::new(&trie); + let trie_root = *trie.root(); + let backend = TrieBackendBuilder::wrap(&trie).with_recorder(Default::default()).build(); let mut queries = Vec::new(); for c in 0..(5 + nb_child_trie / 2) { // random existing query @@ -1970,10 +1984,10 @@ mod tests { } } - let storage_proof = backend.extract_proof(); + let storage_proof = backend.extract_proof().expect("Failed to extract proof"); let remote_proof = test_compact(storage_proof, &trie_root); let proof_check = - create_proof_check_backend::(*trie_root, remote_proof).unwrap(); + create_proof_check_backend::(trie_root, remote_proof).unwrap(); for (child_info, key, expected) in queries { assert_eq!( @@ -1987,7 +2001,7 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); @@ -1995,7 +2009,7 @@ mod tests { assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); @@ -2018,7 +2032,7 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); @@ -2035,7 +2049,7 @@ mod tests { let mut state_version = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { - let mut trie = TrieDBMutV0::from_existing(&mut mdb, &mut root).unwrap(); + let mut trie = TrieDBMutBuilderV0::from_existing(&mut mdb, &mut root).build(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash @@ -2045,7 +2059,7 @@ mod tests { } let check_proof = |mdb, root, state_version| -> StorageProof { - let remote_backend = TrieBackend::new(mdb, root); + let remote_backend = TrieBackendBuilder::new(mdb, root).build(); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally @@ -2069,7 +2083,7 @@ mod tests { // do switch state_version = StateVersion::V1; { - let mut trie = TrieDBMutV1::from_existing(&mut mdb, &mut root).unwrap(); + let mut trie = TrieDBMutBuilderV1::from_existing(&mut mdb, &mut root).build(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); // update with same value do change @@ -2088,10 +2102,10 @@ mod tests { #[test] fn prove_range_with_child_works() { let state_version = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); - let trie_backend = remote_backend.as_trie_backend().unwrap(); + let trie_backend = remote_backend.as_trie_backend(); let max_iter = 1000; let mut nb_loop = 0; loop { @@ -2138,7 +2152,7 @@ mod tests { let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), @@ -2170,9 +2184,9 @@ mod tests { .into_iter(), state_version, ); - let mut remote_storage = remote_backend.into_storage(); + let mut remote_storage = remote_backend.backend_storage().clone(); remote_storage.consolidate(transaction); - let remote_backend = TrieBackend::new(remote_storage, remote_root); + let remote_backend = TrieBackendBuilder::new(remote_storage, remote_root).build(); let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); @@ -2198,7 +2212,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(state_version); + let backend = test_trie(state_version, None, None); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); @@ -2224,7 +2238,7 @@ mod tests { b"bbb".to_vec() => b"".to_vec() ]; let state = InMemoryBackend::::from((initial, StateVersion::default())); - let backend = state.as_trie_backend().unwrap(); + let backend = state.as_trie_backend(); let mut overlay = OverlayedChanges::default(); overlay.start_transaction(); @@ -2255,7 +2269,7 @@ mod tests { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(state_version); + let backend = trie_backend::tests::test_trie(state_version, None, None); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs deleted file mode 100644 index f5cf542908b10..0000000000000 --- a/primitives/state-machine/src/proving_backend.rs +++ /dev/null @@ -1,611 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Proving state machine backend. - -use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, - Backend, DBValue, Error, ExecutionError, -}; -use codec::{Codec, Decode, Encode}; -use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; -use log::debug; -use parking_lot::RwLock; -use sp_core::storage::{ChildInfo, StateVersion}; -pub use sp_trie::trie_types::TrieError; -use sp_trie::{ - empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, - LayoutV1, MemoryDB, Recorder, StorageProof, -}; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; - -/// Patricia trie-based backend specialized in get value proofs. -pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, -} - -impl<'a, S, H> ProvingBackendRecorder<'a, S, H> -where - S: TrieBackendStorage, - H: Hasher, - H::Out: Codec, -{ - /// Produce proof for a key query. - pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let map_e = |e| format!("Trie lookup error: {}", e); - - // V1 is equivalent to V0 on read. - read_trie_value_with::, _, Ephemeral>( - &eph, - self.backend.root(), - key, - &mut *self.proof_recorder, - ) - .map_err(map_e) - } - - /// Produce proof for a child key query. - pub fn child_storage( - &mut self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, String> { - let storage_key = child_info.storage_key(); - let root = self - .storage(storage_key)? - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - // V1 is equivalent to V0 on empty trie - .unwrap_or_else(empty_child_trie_root::>); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let map_e = |e| format!("Trie lookup error: {}", e); - - // V1 is equivalent to V0 on read - read_child_trie_value_with::, _, _>( - child_info.keyspace(), - &eph, - root.as_ref(), - key, - &mut *self.proof_recorder, - ) - .map_err(map_e) - } - - /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); - - let mut iter = move || -> Result<(), Box>> { - let root = self.backend.root(); - // V1 and V is equivalent to V0 on read and recorder is key read. - record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) - }; - - if let Err(e) = iter() { - debug!(target: "trie", "Error while recording all keys: {}", e); - } - } -} - -#[derive(Default)] -struct ProofRecorderInner { - /// All the records that we have stored so far. - records: HashMap>, - /// The encoded size of all recorded values. - encoded_size: usize, -} - -/// Global proof recorder, act as a layer over a hash db for recording queried data. -#[derive(Clone, Default)] -pub struct ProofRecorder { - inner: Arc>>, -} - -impl ProofRecorder { - /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option) { - let mut inner = self.inner.write(); - let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { - let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); - - entry.insert(val); - encoded_size - } else { - 0 - }; - - inner.encoded_size += encoded_size; - } - - /// Returns the value at the given `key`. - pub fn get(&self, key: &Hash) -> Option> { - self.inner.read().records.get(key).cloned() - } - - /// Returns the estimated encoded size of the proof. - /// - /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual - /// encoded proof. - pub fn estimate_encoded_size(&self) -> usize { - let inner = self.inner.read(); - inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() - } - - /// Convert into a [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { - StorageProof::new( - self.inner - .read() - .records - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())), - ) - } - - /// Reset the internal state. - pub fn reset(&self) { - let mut inner = self.inner.write(); - inner.records.clear(); - inner.encoded_size = 0; - } -} - -/// Patricia trie-based backend which also tracks all touched storage trie values. -/// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( - TrieBackend, H>, -); - -/// Trie backend storage with its proof recorder. -pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a S, - proof_recorder: ProofRecorder, -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> -where - H::Out: Codec, -{ - /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { - let proof_recorder = Default::default(); - Self::new_with_recorder(backend, proof_recorder) - } - - /// Create new proving backend with the given recorder. - pub fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, - ) -> Self { - let essence = backend.essence(); - let root = *essence.root(); - let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; - ProvingBackend(TrieBackend::new(recorder, root)) - } - - /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> StorageProof { - self.0.essence().backend_storage().proof_recorder.to_storage_proof() - } - - /// Returns the estimated encoded size of the proof. - /// - /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual - /// encoded proof. - pub fn estimate_encoded_size(&self) -> usize { - self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() - } - - /// Clear the proof recorded data. - pub fn clear_recorder(&self) { - self.0.essence().backend_storage().proof_recorder.reset() - } -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage - for ProofRecorderBackend<'a, S, H> -{ - type Overlay = S::Overlay; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.get(key) { - return Ok(v) - } - - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.record(*key, backend_value.clone()); - Ok(backend_value) - } -} - -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug - for ProvingBackend<'a, S, H> -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ProvingBackend") - } -} - -impl<'a, S, H> Backend for ProvingBackend<'a, S, H> -where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + Codec, -{ - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = S; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.storage(key) - } - - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.child_storage(child_info, key) - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - allow_missing: bool, - ) -> Result { - self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - f: F, - ) { - self.0.apply_to_keys_while(child_info, prefix, start_at, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.next_child_storage_key(child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - f: F, - ) { - self.0.for_child_keys_with_prefix(child_info, prefix, f) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.0.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.0.keys(prefix) - } - - fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { - self.0.child_keys(child_info, prefix) - } - - fn storage_root<'b>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, Self::Transaction) - where - H::Out: Ord, - { - self.0.storage_root(delta, state_version) - } - - fn child_storage_root<'b>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, bool, Self::Transaction) - where - H::Out: Ord, - { - self.0.child_storage_root(child_info, delta, state_version) - } - - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} - - fn usage_info(&self) -> crate::stats::UsageInfo { - self.0.usage_info() - } -} - -/// Create a backend used for checking the proof., using `H` as hasher. -/// -/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. -pub fn create_proof_check_backend( - root: H::Out, - proof: StorageProof, -) -> Result, H>, Box> -where - H: Hasher, - H::Out: Codec, -{ - let db = proof.into_memory_db(); - - if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) - } else { - Err(Box::new(ExecutionError::InvalidProof)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, - InMemoryBackend, - }; - use sp_core::H256; - use sp_runtime::traits::BlakeTwo256; - use sp_trie::PrefixedMemoryDB; - - fn test_proving( - trie_backend: &TrieBackend, BlakeTwo256>, - ) -> ProvingBackend, BlakeTwo256> { - ProvingBackend::new(trie_backend) - } - - #[test] - fn proof_is_empty_until_value_is_read() { - proof_is_empty_until_value_is_read_inner(StateVersion::V0); - proof_is_empty_until_value_is_read_inner(StateVersion::V1); - } - fn proof_is_empty_until_value_is_read_inner(test_hash: StateVersion) { - let trie_backend = test_trie(test_hash); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); - } - - #[test] - fn proof_is_non_empty_after_value_is_read() { - proof_is_non_empty_after_value_is_read_inner(StateVersion::V0); - proof_is_non_empty_after_value_is_read_inner(StateVersion::V1); - } - fn proof_is_non_empty_after_value_is_read_inner(test_hash: StateVersion) { - let trie_backend = test_trie(test_hash); - let backend = test_proving(&trie_backend); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); - } - - #[test] - fn proof_is_invalid_when_does_not_contains_root() { - let result = create_proof_check_backend::( - H256::from_low_u64_be(1), - StorageProof::empty(), - ); - assert!(result.is_err()); - } - - #[test] - fn passes_through_backend_calls() { - passes_through_backend_calls_inner(StateVersion::V0); - passes_through_backend_calls_inner(StateVersion::V1); - } - fn passes_through_backend_calls_inner(state_version: StateVersion) { - let trie_backend = test_trie(state_version); - let proving_backend = test_proving(&trie_backend); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = - trie_backend.storage_root(std::iter::empty(), state_version); - let (proving_root, mut proving_mdb) = - proving_backend.storage_root(std::iter::empty(), state_version); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - - #[test] - fn proof_recorded_and_checked_top() { - proof_recorded_and_checked_inner(StateVersion::V0); - proof_recorded_and_checked_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_inner(state_version: StateVersion) { - let size_content = 34; // above hashable value treshold. - let value_range = 0..64; - let contents = value_range - .clone() - .map(|i| (vec![i], Some(vec![i; size_content]))) - .collect::>(); - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_version); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; - value_range.clone().for_each(|i| { - assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) - }); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - value_range - .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - } - - #[test] - fn proof_recorded_and_checked_with_child() { - proof_recorded_and_checked_with_child_inner(StateVersion::V0); - proof_recorded_and_checked_with_child_inner(StateVersion::V1); - } - fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { - let child_info_1 = ChildInfo::new_default(b"sub1"); - let child_info_2 = ChildInfo::new_default(b"sub2"); - let child_info_1 = &child_info_1; - let child_info_2 = &child_info_2; - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), - (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), - ]; - let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(contents, state_version); - let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory - .full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_version, - ) - .0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - (28..65).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) - }); - (10..15).for_each(|i| { - assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) - }); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_version).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); - - let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root, proof).unwrap(); - assert_eq!(proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64]); - } - - #[test] - fn storage_proof_encoded_size_estimation_works() { - storage_proof_encoded_size_estimation_works_inner(StateVersion::V0); - storage_proof_encoded_size_estimation_works_inner(StateVersion::V1); - } - fn storage_proof_encoded_size_estimation_works_inner(state_version: StateVersion) { - let trie_backend = test_trie(state_version); - let backend = test_proving(&trie_backend); - - let check_estimation = - |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { - let storage_proof = backend.extract_proof(); - let estimation = - backend.0.essence().backend_storage().proof_recorder.estimate_encoded_size(); - - assert_eq!(storage_proof.encoded_size(), estimation); - }; - - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - check_estimation(&backend); - - assert_eq!(backend.storage(b"value1").unwrap(), Some(vec![42])); - check_estimation(&backend); - - assert_eq!(backend.storage(b"value2").unwrap(), Some(vec![24])); - check_estimation(&backend); - - assert!(backend.storage(b"doesnotexist").unwrap().is_none()); - check_estimation(&backend); - - assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); - check_estimation(&backend); - } - - #[test] - fn proof_recorded_for_same_execution_should_be_deterministic() { - let storage_changes = vec![ - (H256::random(), Some(b"value1".to_vec())), - (H256::random(), Some(b"value2".to_vec())), - (H256::random(), Some(b"value3".to_vec())), - (H256::random(), Some(b"value4".to_vec())), - (H256::random(), Some(b"value5".to_vec())), - (H256::random(), Some(b"value6".to_vec())), - (H256::random(), Some(b"value7".to_vec())), - (H256::random(), Some(b"value8".to_vec())), - ]; - - let proof_recorder = - ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; - storage_changes - .clone() - .into_iter() - .for_each(|(key, val)| proof_recorder.record(key, val)); - let proof1 = proof_recorder.to_storage_proof(); - - let proof_recorder = - ProofRecorder:: { inner: Arc::new(RwLock::new(ProofRecorderInner::default())) }; - storage_changes - .into_iter() - .for_each(|(key, val)| proof_recorder.record(key, val)); - let proof2 = proof_recorder.to_storage_proof(); - - assert_eq!(proof1, proof2); - } -} diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 622915a2d0525..e58fb760f4d7e 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -23,7 +23,6 @@ use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, StateVersion, TrackedStorageKey}, traits::Externalities, - Blake2Hasher, }; use sp_externalities::MultiRemovalResults; use std::{ @@ -44,7 +43,10 @@ pub trait InspectState> { fn inspect_state R, R>(&self, f: F) -> R; } -impl> InspectState for B { +impl> InspectState for B +where + H::Out: Encode, +{ fn inspect_state R, R>(&self, f: F) -> R { ReadOnlyExternalities::from(self).execute_with(f) } @@ -66,7 +68,10 @@ impl<'a, H: Hasher, B: 'a + Backend> From<&'a B> for ReadOnlyExternalities<'a } } -impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> +where + H::Out: Encode, +{ /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure. @@ -75,7 +80,10 @@ impl<'a, H: Hasher, B: 'a + Backend> ReadOnlyExternalities<'a, H, B> { } } -impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities<'a, H, B> +where + H::Out: Encode, +{ fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) { panic!("Should not be used in read-only externalities!") } @@ -87,7 +95,10 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn storage_hash(&self, key: &[u8]) -> Option> { - self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) + self.backend + .storage_hash(key) + .expect("Backed failed for storage_hash in ReadOnlyExternalities") + .map(|h| h.encode()) } fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { @@ -97,7 +108,10 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { - self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) + self.backend + .child_storage_hash(child_info, key) + .expect("Backed failed for child_storage_hash in ReadOnlyExternalities") + .map(|h| h.encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 57d4f0b4898eb..46b7573d9d1a6 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -24,7 +24,7 @@ use std::{ use crate::{ backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, - StorageTransactionCache, StorageValue, + StorageTransactionCache, StorageValue, TrieBackendBuilder, }; use hash_db::Hasher; @@ -41,8 +41,9 @@ use sp_externalities::{Extension, ExtensionStore, Extensions}; use sp_trie::StorageProof; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where + H: Hasher + 'static, H::Out: codec::Codec + Ord, { /// The overlay changed storage. @@ -58,8 +59,9 @@ where pub state_version: StateVersion, } -impl TestExternalities +impl TestExternalities where + H: Hasher + 'static, H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. @@ -202,7 +204,9 @@ where /// This implementation will wipe the proof recorded in between calls. Consecutive calls will /// get their own proof from scratch. pub fn execute_and_prove(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) { - let proving_backend = crate::InMemoryProvingBackend::new(&self.backend); + let proving_backend = TrieBackendBuilder::wrap(&self.backend) + .with_recorder(Default::default()) + .build(); let mut proving_ext = Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, @@ -211,7 +215,7 @@ where ); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); - let proof = proving_backend.extract_proof(); + let proof = proving_backend.extract_proof().expect("Failed to extract storage proof"); (outcome, proof) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 130b4bf178202..447c276a6049c 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -17,31 +17,182 @@ //! Trie-based state machine backend. +#[cfg(feature = "std")] +use crate::backend::AsTrieBackend; use crate::{ trie_backend_essence::{TrieBackendEssence, TrieBackendStorage}, Backend, StorageKey, StorageValue, }; use codec::Codec; +#[cfg(feature = "std")] +use hash_db::HashDB; use hash_db::Hasher; use sp_core::storage::{ChildInfo, StateVersion}; use sp_std::vec::Vec; +#[cfg(feature = "std")] +use sp_trie::{cache::LocalTrieCache, recorder::Recorder}; +#[cfg(feature = "std")] +use sp_trie::{MemoryDB, StorageProof}; -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher> { - pub(crate) essence: TrieBackendEssence, +/// Dummy type to be used in `no_std`. +/// +/// This is required to have the type available for [`TrieBackendBuilder`] and [`TrieBackend`]. +#[cfg(not(feature = "std"))] +pub struct LocalTrieCache(sp_std::marker::PhantomData); + +/// Special trait to support taking the [`LocalTrieCache`] by value or by reference. +/// +/// This trait is internal use only and to emphasize this, the trait is sealed. +pub trait AsLocalTrieCache: sealed::Sealed { + /// Returns `self` as [`LocalTrieCache`]. + #[cfg(feature = "std")] + fn as_local_trie_cache(&self) -> &LocalTrieCache; +} + +impl AsLocalTrieCache for LocalTrieCache { + #[cfg(feature = "std")] + fn as_local_trie_cache(&self) -> &LocalTrieCache { + self + } +} + +#[cfg(feature = "std")] +impl AsLocalTrieCache for &LocalTrieCache { + fn as_local_trie_cache(&self) -> &LocalTrieCache { + self + } +} + +/// Special module that contains the `Sealed` trait. +mod sealed { + use super::*; + + /// A special trait which prevents externals to implement the [`AsLocalTrieCache`] outside + /// of this crate. + pub trait Sealed {} + + impl Sealed for LocalTrieCache {} + impl Sealed for &LocalTrieCache {} +} + +/// Builder for creating a [`TrieBackend`]. +pub struct TrieBackendBuilder, H: Hasher, C = LocalTrieCache> { + storage: S, + root: H::Out, + #[cfg(feature = "std")] + recorder: Option>, + cache: Option, } -impl, H: Hasher> TrieBackend +impl TrieBackendBuilder> where - H::Out: Codec, + S: TrieBackendStorage, + H: Hasher, { - /// Create new trie-based backend. + /// Create a new builder instance. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { essence: TrieBackendEssence::new(storage, root) } + Self { + storage, + root, + #[cfg(feature = "std")] + recorder: None, + cache: None, + } + } +} + +impl TrieBackendBuilder +where + S: TrieBackendStorage, + H: Hasher, +{ + /// Wrap the given [`TrieBackend`]. + /// + /// This can be used for example if all accesses to the trie should + /// be recorded while some other functionality still uses the non-recording + /// backend. + /// + /// The backend storage and the cache will be taken from `other`. + pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { + TrieBackendBuilder { + storage: other.essence.backend_storage(), + root: *other.essence.root(), + #[cfg(feature = "std")] + recorder: None, + #[cfg(feature = "std")] + cache: other.essence.trie_node_cache.as_ref(), + #[cfg(not(feature = "std"))] + cache: None, + } + } + + /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_optional_recorder(self, recorder: Option>) -> Self { + Self { recorder, ..self } } + /// Use the given `recorder` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_recorder(self, recorder: Recorder) -> Self { + Self { recorder: Some(recorder), ..self } + } + + /// Use the given optional `cache` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + TrieBackendBuilder { + cache, + root: self.root, + storage: self.storage, + recorder: self.recorder, + } + } + + /// Use the given `cache` for the to be configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + TrieBackendBuilder { + cache: Some(cache), + root: self.root, + storage: self.storage, + recorder: self.recorder, + } + } + + /// Build the configured [`TrieBackend`]. + #[cfg(feature = "std")] + pub fn build(self) -> TrieBackend { + TrieBackend { + essence: TrieBackendEssence::new_with_cache_and_recorder( + self.storage, + self.root, + self.cache, + self.recorder, + ), + } + } + + /// Build the configured [`TrieBackend`]. + #[cfg(not(feature = "std"))] + pub fn build(self) -> TrieBackend { + let _ = self.cache; + + TrieBackend { essence: TrieBackendEssence::new(self.storage, self.root) } + } +} + +/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +pub struct TrieBackend, H: Hasher, C = LocalTrieCache> { + pub(crate) essence: TrieBackendEssence, +} + +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> TrieBackend +where + H::Out: Codec, +{ /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -59,15 +210,26 @@ where pub fn into_storage(self) -> S { self.essence.into_storage() } + + /// Extract the [`StorageProof`]. + /// + /// This only returns `Some` when there was a recorder set. + #[cfg(feature = "std")] + pub fn extract_proof(mut self) -> Option { + self.essence.recorder.take().map(|r| r.drain_storage_proof()) + } } -impl, H: Hasher> sp_std::fmt::Debug for TrieBackend { +impl, H: Hasher, C: AsLocalTrieCache> sp_std::fmt::Debug + for TrieBackend +{ fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher> Backend for TrieBackend +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> Backend + for TrieBackend where H::Out: Ord + Codec, { @@ -75,10 +237,22 @@ where type Transaction = S::Overlay; type TrieBackendStorage = S; + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.essence.storage_hash(key) + } + fn storage(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage(key) } + fn child_storage_hash( + &self, + child_info: &ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.essence.child_storage_hash(child_info, key) + } + fn child_storage( &self, child_info: &ChildInfo, @@ -169,10 +343,6 @@ where self.essence.child_storage_root(child_info, delta, state_version) } - fn as_trie_backend(&self) -> Option<&TrieBackend> { - Some(self) - } - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { @@ -184,20 +354,97 @@ where } } +#[cfg(feature = "std")] +impl, H: Hasher, C> AsTrieBackend for TrieBackend { + type TrieBackendStorage = S; + + fn as_trie_backend(&self) -> &TrieBackend { + self + } +} + +/// Create a backend used for checking the proof, using `H` as hasher. +/// +/// `proof` and `root` must match, i.e. `root` must be the correct root of `proof` nodes. +#[cfg(feature = "std")] +pub fn create_proof_check_backend( + root: H::Out, + proof: StorageProof, +) -> Result, H>, Box> +where + H: Hasher, + H::Out: Codec, +{ + let db = proof.into_memory_db(); + + if db.contains(&root, hash_db::EMPTY_PREFIX) { + Ok(TrieBackendBuilder::new(db, root).build()) + } else { + Err(Box::new(crate::ExecutionError::InvalidProof)) + } +} + #[cfg(test)] pub mod tests { + use crate::{new_in_mem, InMemoryBackend}; + use super::*; use codec::Encode; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::{ - trie_types::{TrieDB, TrieDBMutV0, TrieDBMutV1}, - KeySpacedDBMut, PrefixedMemoryDB, Trie, TrieMut, + cache::{CacheSize, SharedTrieCache}, + trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1}, + KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut, }; use std::{collections::HashSet, iter}; + use trie_db::NodeCodec; const CHILD_KEY_1: &[u8] = b"sub1"; + type Recorder = sp_trie::recorder::Recorder; + type Cache = LocalTrieCache; + type SharedCache = SharedTrieCache; + + macro_rules! parameterized_test { + ($name:ident, $internal_name:ident) => { + #[test] + fn $name() { + let parameters = vec![ + (StateVersion::V0, None, None), + (StateVersion::V0, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V0, None, Some(Recorder::default())), + ( + StateVersion::V0, + Some(SharedCache::new(CacheSize::Unlimited)), + Some(Recorder::default()), + ), + (StateVersion::V1, None, None), + (StateVersion::V1, Some(SharedCache::new(CacheSize::Unlimited)), None), + (StateVersion::V1, None, Some(Recorder::default())), + ( + StateVersion::V1, + Some(SharedCache::new(CacheSize::Unlimited)), + Some(Recorder::default()), + ), + ]; + + for (version, cache, recorder) in parameters { + eprintln!( + "Running with version {:?}, cache enabled {} and recorder enabled {}", + version, + cache.is_some(), + recorder.is_some() + ); + + let cache = cache.as_ref().map(|c| c.local_cache()); + + $internal_name(version, cache, recorder.clone()); + } + } + }; + } + pub(crate) fn test_db(state_version: StateVersion) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); @@ -206,12 +453,12 @@ pub mod tests { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); match state_version { StateVersion::V0 => { - let mut trie = TrieDBMutV0::new(&mut mdb, &mut root); + let mut trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, StateVersion::V1 => { - let mut trie = TrieDBMutV1::new(&mut mdb, &mut root); + let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142; 33]).expect("insert failed"); trie.insert(b"value4", &[124; 33]).expect("insert failed"); }, @@ -240,11 +487,11 @@ pub mod tests { match state_version { StateVersion::V0 => { - let trie = TrieDBMutV0::new(&mut mdb, &mut root); + let trie = TrieDBMutBuilderV0::new(&mut mdb, &mut root).build(); build(trie, &child_info, &sub_root[..]) }, StateVersion::V1 => { - let trie = TrieDBMutV1::new(&mut mdb, &mut root); + let trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); build(trie, &child_info, &sub_root[..]) }, }; @@ -254,27 +501,39 @@ pub mod tests { pub(crate) fn test_trie( hashed_value: StateVersion, + cache: Option, + recorder: Option, ) -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(hashed_value); - TrieBackend::new(mdb, root) - } - #[test] - fn read_from_storage_returns_some() { - read_from_storage_returns_some_inner(StateVersion::V0); - read_from_storage_returns_some_inner(StateVersion::V1); - } - fn read_from_storage_returns_some_inner(state_version: StateVersion) { - assert_eq!(test_trie(state_version).storage(b"key").unwrap(), Some(b"value".to_vec())); + TrieBackendBuilder::new(mdb, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() } - #[test] - fn read_from_child_storage_returns_some() { - read_from_child_storage_returns_some_inner(StateVersion::V0); - read_from_child_storage_returns_some_inner(StateVersion::V1); + parameterized_test!(read_from_storage_returns_some, read_from_storage_returns_some_inner); + fn read_from_storage_returns_some_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert_eq!( + test_trie(state_version, cache, recorder).storage(b"key").unwrap(), + Some(b"value".to_vec()) + ); } - fn read_from_child_storage_returns_some_inner(state_version: StateVersion) { - let test_trie = test_trie(state_version); + + parameterized_test!( + read_from_child_storage_returns_some, + read_from_child_storage_returns_some_inner + ); + fn read_from_child_storage_returns_some_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let test_trie = test_trie(state_version, cache, recorder); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -299,65 +558,81 @@ pub mod tests { ); } - #[test] - fn read_from_storage_returns_none() { - read_from_storage_returns_none_inner(StateVersion::V0); - read_from_storage_returns_none_inner(StateVersion::V1); - } - fn read_from_storage_returns_none_inner(state_version: StateVersion) { - assert_eq!(test_trie(state_version).storage(b"non-existing-key").unwrap(), None); + parameterized_test!(read_from_storage_returns_none, read_from_storage_returns_none_inner); + fn read_from_storage_returns_none_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert_eq!( + test_trie(state_version, cache, recorder).storage(b"non-existing-key").unwrap(), + None + ); } - #[test] - fn pairs_are_not_empty_on_non_empty_storage() { - pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V0); - pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V1); - } - fn pairs_are_not_empty_on_non_empty_storage_inner(state_version: StateVersion) { - assert!(!test_trie(state_version).pairs().is_empty()); + parameterized_test!( + pairs_are_not_empty_on_non_empty_storage, + pairs_are_not_empty_on_non_empty_storage_inner + ); + fn pairs_are_not_empty_on_non_empty_storage_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + assert!(!test_trie(state_version, cache, recorder).pairs().is_empty()); } #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, BlakeTwo256>::new( + assert!(TrieBackendBuilder::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), ) + .build() .pairs() .is_empty()); } - #[test] - fn storage_root_is_non_default() { - storage_root_is_non_default_inner(StateVersion::V0); - storage_root_is_non_default_inner(StateVersion::V1); - } - fn storage_root_is_non_default_inner(state_version: StateVersion) { + parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner); + fn storage_root_is_non_default_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { assert!( - test_trie(state_version).storage_root(iter::empty(), state_version).0 != - H256::repeat_byte(0) + test_trie(state_version, cache, recorder) + .storage_root(iter::empty(), state_version) + .0 != H256::repeat_byte(0) ); } - #[test] - fn storage_root_transaction_is_non_empty() { - storage_root_transaction_is_non_empty_inner(StateVersion::V0); - storage_root_transaction_is_non_empty_inner(StateVersion::V1); - } - fn storage_root_transaction_is_non_empty_inner(state_version: StateVersion) { - let (new_root, mut tx) = test_trie(state_version) + parameterized_test!( + storage_root_transaction_is_non_empty, + storage_root_transaction_is_non_empty_inner + ); + fn storage_root_transaction_is_non_empty_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let (new_root, mut tx) = test_trie(state_version, cache, recorder) .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie(state_version).storage_root(iter::empty(), state_version).0); + assert!( + new_root != + test_trie(state_version, None, None) + .storage_root(iter::empty(), state_version) + .0 + ); } - #[test] - fn prefix_walking_works() { - prefix_walking_works_inner(StateVersion::V0); - prefix_walking_works_inner(StateVersion::V1); - } - fn prefix_walking_works_inner(state_version: StateVersion) { - let trie = test_trie(state_version); + parameterized_test!(prefix_walking_works, prefix_walking_works_inner); + fn prefix_walking_works_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie = test_trie(state_version, cache, recorder); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { @@ -371,23 +646,475 @@ pub mod tests { assert_eq!(seen, expected); } - #[test] - fn keys_with_empty_prefix_returns_all_keys() { - keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V0); - keys_with_empty_prefix_returns_all_keys_inner(StateVersion::V1); - } - fn keys_with_empty_prefix_returns_all_keys_inner(state_version: StateVersion) { + parameterized_test!( + keys_with_empty_prefix_returns_all_keys, + keys_with_empty_prefix_returns_all_keys_inner + ); + fn keys_with_empty_prefix_returns_all_keys_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { let (test_db, test_root) = test_db(state_version); - let expected = TrieDB::new(&test_db, &test_root) - .unwrap() + let expected = TrieDBBuilder::new(&test_db, &test_root) + .build() .iter() .unwrap() .map(|d| d.unwrap().0.to_vec()) .collect::>(); - let trie = test_trie(state_version); + let trie = test_trie(state_version, cache, recorder); let keys = trie.keys(&[]); assert_eq!(expected, keys); } + + parameterized_test!( + proof_is_empty_until_value_is_read, + proof_is_empty_until_value_is_read_inner + ); + fn proof_is_empty_until_value_is_read_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + assert!(TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build() + .extract_proof() + .unwrap() + .is_empty()); + } + + parameterized_test!( + proof_is_non_empty_after_value_is_read, + proof_is_non_empty_after_value_is_read_inner + ); + fn proof_is_non_empty_after_value_is_read_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + let backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().unwrap().is_empty()); + } + + #[test] + fn proof_is_invalid_when_does_not_contains_root() { + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + StorageProof::empty(), + ); + assert!(result.is_err()); + } + + parameterized_test!(passes_through_backend_calls, passes_through_backend_calls_inner); + fn passes_through_backend_calls_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let trie_backend = test_trie(state_version, cache, recorder); + let proving_backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = + trie_backend.storage_root(std::iter::empty(), state_version); + let (proving_root, mut proving_mdb) = + proving_backend.storage_root(std::iter::empty(), state_version); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + } + + #[test] + fn proof_recorded_and_checked_top() { + proof_recorded_and_checked_inner(StateVersion::V0); + proof_recorded_and_checked_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_inner(state_version: StateVersion) { + let size_content = 34; // above hashable value threshold. + let value_range = 0..64; + let contents = value_range + .clone() + .map(|i| (vec![i], Some(vec![i; size_content]))) + .collect::>(); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + value_range.clone().for_each(|i| { + assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) + }); + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + value_range + .clone() + .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + } + } + } + + #[test] + fn proof_record_works_with_iter() { + proof_record_works_with_iter_inner(StateVersion::V0); + proof_record_works_with_iter_inner(StateVersion::V1); + } + fn proof_record_works_with_iter_inner(state_version: StateVersion) { + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; + (0..64) + .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + + (0..63).for_each(|i| { + assert_eq!(proving.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) + }); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + (0..63).for_each(|i| { + assert_eq!(proof_check.next_storage_key(&[i]).unwrap(), Some(vec![i + 1])) + }); + } + } + } + + #[test] + fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner(StateVersion::V0); + proof_recorded_and_checked_with_child_inner(StateVersion::V1); + } + fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + ]; + let in_memory = new_in_mem::>(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); + + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); + + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(child_info_1, &[25]), Ok(None)); + assert_eq!(proving.child_storage(child_info_2, &[14]), Ok(Some(vec![14]))); + assert_eq!(proving.child_storage(child_info_2, &[25]), Ok(None)); + + let proof = proving.extract_proof().unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + assert_eq!( + proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), + vec![64] + ); + assert_eq!(proof_check.child_storage(child_info_1, &[25]).unwrap(), None); + + assert_eq!( + proof_check.child_storage(child_info_2, &[14]).unwrap().unwrap(), + vec![14] + ); + assert_eq!(proof_check.child_storage(child_info_2, &[25]).unwrap(), None); + } + } + } + + /// This tests an edge case when recording a child trie access with a cache. + /// + /// The accessed value/node is in the cache, but not the nodes to get to this value. So, + /// the recorder will need to traverse the trie to access these nodes from the backend when the + /// storage proof is generated. + #[test] + fn child_proof_recording_with_edge_cases_works() { + child_proof_recording_with_edge_cases_works_inner(StateVersion::V0); + child_proof_recording_with_edge_cases_works_inner(StateVersion::V1); + } + fn child_proof_recording_with_edge_cases_works_inner(state_version: StateVersion) { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_1 = &child_info_1; + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), + ( + Some(child_info_1.clone()), + (28..65) + .map(|i| (vec![i], Some(vec![i]))) + // Some big value to ensure we get a new node + .chain(std::iter::once((vec![65], Some(vec![65; 128])))) + .collect(), + ), + ]; + let in_memory = new_in_mem::>(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + + let child_1_root = + in_memory.child_storage_root(child_info_1, std::iter::empty(), state_version).0; + let trie = in_memory.as_trie_backend(); + let nodes = { + let backend = TrieBackendBuilder::wrap(trie).with_recorder(Default::default()).build(); + let value = backend.child_storage(child_info_1, &[65]).unwrap().unwrap(); + let value_hash = BlakeTwo256::hash(&value); + assert_eq!(value, vec![65; 128]); + + let proof = backend.extract_proof().unwrap(); + + let mut nodes = Vec::new(); + for node in proof.iter_nodes() { + let hash = BlakeTwo256::hash(&node); + // Only insert the node/value that contains the important data. + if hash != value_hash { + let node = sp_trie::NodeCodec::::decode(&node) + .unwrap() + .to_owned_node::>() + .unwrap(); + + if let Some(data) = node.data() { + if data == &vec![65; 128] { + nodes.push((hash, node)); + } + } + } else if hash == value_hash { + nodes.push((hash, trie_db::node::NodeOwned::Value(node.into(), hash))); + } + } + + nodes + }; + + let cache = SharedTrieCache::::new(CacheSize::Unlimited); + { + let local_cache = cache.local_cache(); + let mut trie_cache = local_cache.as_trie_db_cache(child_1_root); + + // Put the value/node into the cache. + for (hash, node) in nodes { + trie_cache.get_or_insert_node(hash, &mut || Ok(node.clone())).unwrap(); + + if let Some(data) = node.data() { + trie_cache.cache_value_for_key(&[65], (data.clone(), hash).into()); + } + } + } + + { + // Record the access + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_cache(cache.local_cache()) + .build(); + assert_eq!(proving.child_storage(child_info_1, &[65]), Ok(Some(vec![65; 128]))); + + let proof = proving.extract_proof().unwrap(); + // And check that we have a correct proof. + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!( + proof_check.child_storage(child_info_1, &[65]).unwrap().unwrap(), + vec![65; 128] + ); + } + } + + parameterized_test!( + storage_proof_encoded_size_estimation_works, + storage_proof_encoded_size_estimation_works_inner + ); + fn storage_proof_encoded_size_estimation_works_inner( + state_version: StateVersion, + cache: Option, + recorder: Option, + ) { + let has_cache = cache.is_some(); + let trie_backend = test_trie(state_version, cache, recorder); + let keys = &[ + &b"key"[..], + &b"value1"[..], + &b"value2"[..], + &b"doesnotexist"[..], + &b"doesnotexist2"[..], + ]; + + fn check_estimation( + backend: TrieBackend< + impl TrieBackendStorage, + BlakeTwo256, + &'_ LocalTrieCache, + >, + has_cache: bool, + ) { + let estimation = backend.essence.recorder.as_ref().unwrap().estimate_encoded_size(); + let storage_proof = backend.extract_proof().unwrap(); + let storage_proof_size = + storage_proof.into_nodes().into_iter().map(|n| n.encoded_size()).sum::(); + + if has_cache { + // Estimation is not entirely correct when we have values already cached. + assert!(estimation >= storage_proof_size) + } else { + assert_eq!(storage_proof_size, estimation); + } + } + + for n in 0..keys.len() { + let backend = TrieBackendBuilder::wrap(&trie_backend) + .with_recorder(Recorder::default()) + .build(); + + // Read n keys + (0..n).for_each(|i| { + backend.storage(keys[i]).unwrap(); + }); + + // Check the estimation + check_estimation(backend, has_cache); + } + } + + #[test] + fn new_data_is_added_to_the_cache() { + let shared_cache = SharedTrieCache::new(CacheSize::Unlimited); + let new_data = vec![ + (&b"new_data0"[..], Some(&b"0"[..])), + (&b"new_data1"[..], Some(&b"1"[..])), + (&b"new_data2"[..], Some(&b"2"[..])), + (&b"new_data3"[..], Some(&b"3"[..])), + (&b"new_data4"[..], Some(&b"4"[..])), + ]; + + let new_root = { + let trie = test_trie(StateVersion::V1, Some(shared_cache.local_cache()), None); + trie.storage_root(new_data.clone().into_iter(), StateVersion::V1).0 + }; + + let local_cache = shared_cache.local_cache(); + let mut cache = local_cache.as_trie_db_cache(new_root); + // All the data should be cached now + for (key, value) in new_data { + assert_eq!( + value.unwrap(), + cache.lookup_value_for_key(key).unwrap().data().flatten().unwrap().as_ref() + ); + } + } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 7d910cc9602cc..dda7b51ab08c6 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -18,23 +18,32 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; -use codec::Encode; +use crate::{ + backend::Consolidate, debug, trie_backend::AsLocalTrieCache, warn, StorageKey, StorageValue, +}; +use codec::Codec; use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; +#[cfg(not(feature = "std"))] +use sp_std::marker::PhantomData; use sp_std::{boxed::Box, vec::Vec}; +#[cfg(feature = "std")] +use sp_trie::recorder::Recorder; use sp_trie::{ - child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_value, - read_trie_value, - trie_types::{TrieDB, TrieError}, - DBValue, KeySpacedDB, LayoutV1 as Layout, Trie, TrieDBIterator, TrieDBKeyIterator, + child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash, + read_child_trie_value, read_trie_value, + trie_types::{TrieDBBuilder, TrieError}, + DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBIterator, TrieDBKeyIterator, + TrieRecorder, }; #[cfg(feature = "std")] -use std::collections::HashMap; -#[cfg(feature = "std")] -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; + +// In this module, we only use layout for read operation and empty root, +// where V1 and V0 are equivalent. +use sp_trie::LayoutV1 as Layout; #[cfg(not(feature = "std"))] macro_rules! format { @@ -68,18 +77,21 @@ impl Cache { } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher, C> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, + #[cfg(feature = "std")] + pub(crate) trie_node_cache: Option, + #[cfg(feature = "std")] + pub(crate) recorder: Option>, + #[cfg(not(feature = "std"))] + _phantom: PhantomData, } -impl, H: Hasher> TrieBackendEssence -where - H::Out: Encode, -{ +impl, H: Hasher, C> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -88,6 +100,30 @@ where empty: H::hash(&[0u8]), #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), + #[cfg(feature = "std")] + trie_node_cache: None, + #[cfg(feature = "std")] + recorder: None, + #[cfg(not(feature = "std"))] + _phantom: PhantomData, + } + } + + /// Create new trie-based backend. + #[cfg(feature = "std")] + pub fn new_with_cache_and_recorder( + storage: S, + root: H::Out, + cache: Option, + recorder: Option>, + ) -> Self { + TrieBackendEssence { + storage, + root, + empty: H::hash(&[0u8]), + cache: Arc::new(RwLock::new(Cache::new())), + trie_node_cache: cache, + recorder, } } @@ -96,6 +132,11 @@ where &self.storage } + /// Get backend storage mutable reference. + pub fn backend_storage_mut(&mut self) -> &mut S { + &mut self.storage + } + /// Get trie root. pub fn root(&self) -> &H::Out { &self.root @@ -120,7 +161,97 @@ where pub fn into_storage(self) -> S { self.storage } +} + +impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEssence { + /// Call the given closure passing it the recorder and the cache. + /// + /// If the given `storage_root` is `None`, `self.root` will be used. + #[cfg(feature = "std")] + fn with_recorder_and_cache( + &self, + storage_root: Option, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> R, + ) -> R { + let storage_root = storage_root.unwrap_or_else(|| self.root); + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let recorder = recorder.as_mut().map(|r| r as _); + let mut cache = self + .trie_node_cache + .as_ref() + .map(|c| c.as_local_trie_cache().as_trie_db_cache(storage_root)); + let cache = cache.as_mut().map(|c| c as _); + + callback(recorder, cache) + } + + #[cfg(not(feature = "std"))] + fn with_recorder_and_cache( + &self, + _: Option, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> R, + ) -> R { + callback(None, None) + } + + /// Call the given closure passing it the recorder and the cache. + /// + /// This function must only be used when the operation in `callback` is + /// calculating a `storage_root`. It is expected that `callback` returns + /// the new storage root. This is required to register the changes in the cache + /// for the correct storage root. + #[cfg(feature = "std")] + fn with_recorder_and_cache_for_storage_root( + &self, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> (Option, R), + ) -> R { + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let recorder = recorder.as_mut().map(|r| r as _); + + let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { + let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); + + let (new_root, r) = callback(recorder, Some(&mut cache)); + + if let Some(new_root) = new_root { + cache.merge_into(local_cache.as_local_trie_cache(), new_root); + } + + r + } else { + callback(recorder, None).1 + }; + + result + } + + #[cfg(not(feature = "std"))] + fn with_recorder_and_cache_for_storage_root( + &self, + callback: impl FnOnce( + Option<&mut dyn TrieRecorder>, + Option<&mut dyn TrieCache>>, + ) -> (Option, R), + ) -> R { + callback(None, None).1 + } +} + +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> + TrieBackendEssence +where + H::Out: Codec + Ord, +{ /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result> { @@ -184,39 +315,82 @@ where dyn_eph = self; } - let trie = - TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(dyn_eph, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); - // The key just after the one given in input, basically `key++0`. - // Note: We are sure this is the next key if: - // * size of key has no limit (i.e. we can always add 0 to the path), - // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). - let mut potential_next_key = Vec::with_capacity(key.len() + 1); - potential_next_key.extend_from_slice(key); - potential_next_key.push(0); + let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; - iter.seek(&potential_next_key) - .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; + // The key just after the one given in input, basically `key++0`. + // Note: We are sure this is the next key if: + // * size of key has no limit (i.e. we can always add 0 to the path), + // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). + let mut potential_next_key = Vec::with_capacity(key.len() + 1); + potential_next_key.extend_from_slice(key); + potential_next_key.push(0); - let next_element = iter.next(); + iter.seek(&potential_next_key) + .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; - let next_key = if let Some(next_element) = next_element { - let next_key = - next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; - Some(next_key) - } else { - None - }; + let next_element = iter.next(); + + let next_key = if let Some(next_element) = next_element { + let next_key = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; + Some(next_key) + } else { + None + }; - Ok(next_key) + Ok(next_key) + }) + } + + /// Returns the hash value + pub fn storage_hash(&self, key: &[u8]) -> Result> { + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(None, |recorder, cache| { + TrieDBBuilder::new(self, &self.root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .get_hash(key) + .map_err(map_e) + }) } /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value::, _>(self, &self.root, key).map_err(map_e) + self.with_recorder_and_cache(None, |recorder, cache| { + read_trie_value::, _>(self, &self.root, key, recorder, cache).map_err(map_e) + }) + } + + /// Returns the hash value + pub fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Result> { + let child_root = match self.child_root(child_info)? { + Some(root) => root, + None => return Ok(None), + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_hash::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) + .map_err(map_e) + }) } /// Get the value of child storage at given key. @@ -225,15 +399,24 @@ where child_info: &ChildInfo, key: &[u8], ) -> Result> { - let root = match self.child_root(child_info)? { + let child_root = match self.child_root(child_info)? { Some(root) => root, None => return Ok(None), }; let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(child_info.keyspace(), self, &root, key) + self.with_recorder_and_cache(Some(child_root), |recorder, cache| { + read_child_trie_value::, _>( + child_info.keyspace(), + self, + &child_root, + key, + recorder, + cache, + ) .map_err(map_e) + }) } /// Retrieve all entries keys of storage and call `f` for each of those keys. @@ -338,28 +521,33 @@ where maybe_start_at: Option<&[u8]>, ) { let mut iter = move |db| -> sp_std::result::Result<(), Box>> { - let trie = TrieDB::::new(db, root)?; - let prefix = maybe_prefix.unwrap_or(&[]); - let iter = match maybe_start_at { - Some(start_at) => - TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), - None => TrieDBKeyIterator::new_prefixed(&trie, prefix), - }?; - - for x in iter { - let key = x?; - - debug_assert!(maybe_prefix - .as_ref() - .map(|prefix| key.starts_with(prefix)) - .unwrap_or(true)); - - if !f(&key) { - break + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(db, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); + let prefix = maybe_prefix.unwrap_or(&[]); + let iter = match maybe_start_at { + Some(start_at) => + TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at), + None => TrieDBKeyIterator::new_prefixed(&trie, prefix), + }?; + + for x in iter { + let key = x?; + + debug_assert!(maybe_prefix + .as_ref() + .map(|prefix| key.starts_with(prefix)) + .unwrap_or(true)); + + if !f(&key) { + break + } } - } - Ok(()) + Ok(()) + }) }; let result = if let Some(child_info) = child_info { @@ -383,25 +571,30 @@ where allow_missing_nodes: bool, ) -> Result { let mut iter = move |db| -> sp_std::result::Result>> { - let trie = TrieDB::::new(db, root)?; - - let prefix = prefix.unwrap_or(&[]); - let iterator = if let Some(start_at) = start_at { - TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? - } else { - TrieDBIterator::new_prefixed(&trie, prefix)? - }; - for x in iterator { - let (key, value) = x?; - - debug_assert!(key.starts_with(prefix)); - - if !f(key, value) { - return Ok(false) + self.with_recorder_and_cache(Some(*root), |recorder, cache| { + let trie = TrieDBBuilder::::new(db, root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build(); + + let prefix = prefix.unwrap_or(&[]); + let iterator = if let Some(start_at) = start_at { + TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)? + } else { + TrieDBIterator::new_prefixed(&trie, prefix)? + }; + for x in iterator { + let (key, value) = x?; + + debug_assert!(key.starts_with(prefix)); + + if !f(key, value) { + return Ok(false) + } } - } - Ok(true) + Ok(true) + }) }; let result = if let Some(child_info) = child_info { @@ -436,14 +629,20 @@ where /// Returns all `(key, value)` pairs in the trie. pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { let collect_all = || -> sp_std::result::Result<_, Box>> { - let trie = TrieDB::::new(self, &self.root)?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, value) = x?; - v.push((key.to_vec(), value.to_vec())); - } + self.with_recorder_and_cache(None, |recorder, cache| { + let trie = TrieDBBuilder::::new(self, self.root()) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); + + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } - Ok(v) + Ok(v) + }) }; match collect_all() { @@ -467,27 +666,28 @@ where &self, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, S::Overlay) - where - H::Out: Ord, - { + ) -> (H::Out, S::Overlay) { let mut write_overlay = S::Overlay::default(); - let mut root = self.root; - { + let root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); let res = match state_version { - StateVersion::V0 => - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), - StateVersion::V1 => - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), + StateVersion::V0 => delta_trie_root::, _, _, _, _, _>( + &mut eph, self.root, delta, recorder, cache, + ), + StateVersion::V1 => delta_trie_root::, _, _, _, _, _>( + &mut eph, self.root, delta, recorder, cache, + ), }; match res { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + Ok(ret) => (Some(ret), ret), + Err(e) => { + warn!(target: "trie", "Failed to write to trie: {}", e); + (None, self.root) + }, } - } + }); (root, write_overlay) } @@ -499,15 +699,12 @@ where child_info: &ChildInfo, delta: impl Iterator)>, state_version: StateVersion, - ) -> (H::Out, bool, S::Overlay) - where - H::Out: Ord, - { + ) -> (H::Out, bool, S::Overlay) { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; let mut write_overlay = S::Overlay::default(); - let mut root = match self.child_root(child_info) { + let child_root = match self.child_root(child_info) { Ok(Some(hash)) => hash, Ok(None) => default_root, Err(e) => { @@ -516,32 +713,39 @@ where }, }; - { + let new_child_root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); match match state_version { StateVersion::V0 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, - root, + child_root, delta, + recorder, + cache, ), StateVersion::V1 => child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), &mut eph, - root, + child_root, delta, + recorder, + cache, ), } { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + Ok(ret) => (Some(ret), ret), + Err(e) => { + warn!(target: "trie", "Failed to write to trie: {}", e); + (None, child_root) + }, } - } + }); - let is_default = root == default_root; + let is_default = new_child_root == default_root; - (root, is_default, write_overlay) + (new_child_root, is_default, write_overlay) } } @@ -615,6 +819,14 @@ pub trait TrieBackendStorage: Send + Sync { fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; } +impl, H: Hasher> TrieBackendStorage for &T { + type Overlay = T::Overlay; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + (*self).get(key, prefix) + } +} + // This implementation is used by normal storage trie clients. #[cfg(feature = "std")] impl TrieBackendStorage for Arc> { @@ -637,7 +849,9 @@ where } } -impl, H: Hasher> AsHashDB for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> AsHashDB + for TrieBackendEssence +{ fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } @@ -646,7 +860,9 @@ impl, H: Hasher> AsHashDB for TrieBackendEs } } -impl, H: Hasher> HashDB for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> HashDB + for TrieBackendEssence +{ fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -677,7 +893,9 @@ impl, H: Hasher> HashDB for TrieBackendEsse } } -impl, H: Hasher> HashDBRef for TrieBackendEssence { +impl, H: Hasher, C: AsLocalTrieCache + Send + Sync> + HashDBRef for TrieBackendEssence +{ fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } @@ -692,7 +910,8 @@ mod test { use super::*; use sp_core::{Blake2Hasher, H256}; use sp_trie::{ - trie_types::TrieDBMutV1 as TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut, + cache::LocalTrieCache, trie_types::TrieDBMutBuilderV1 as TrieDBMutBuilder, KeySpacedDBMut, + PrefixedMemoryDB, TrieMut, }; #[test] @@ -706,7 +925,7 @@ mod test { let mut mdb = PrefixedMemoryDB::::default(); { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); @@ -715,18 +934,18 @@ mod test { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); // reuse of root_1 implicitly assert child trie root is same // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_1).build(); trie.insert(b"3", &[1]).expect("insert failed"); trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); + let mut trie = TrieDBMutBuilder::new(&mut mdb, &mut root_2).build(); trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -734,8 +953,8 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let mdb = essence_1.backend_storage().clone(); + let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index ee0c8e4ec8e29..fde84c1c58b1a 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -200,7 +200,8 @@ pub mod registration { let mut transaction_root = sp_trie::empty_trie_root::(); { let mut trie = - sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); + sp_trie::TrieDBMutBuilder::::new(&mut db, &mut transaction_root) + .build(); let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); for (index, chunk) in chunks.enumerate() { let index = encode_index(index as u32); diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index a3754461f890e..84b97a9f88da8 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -18,12 +18,19 @@ name = "bench" harness = false [dependencies] +ahash = { version = "0.7.6", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } +lazy_static = { version = "1.4.0", optional = true } +lru = { version = "0.7.5", optional = true } memory-db = { version = "0.29.0", default-features = false } +nohash-hasher = { version = "0.2.0", optional = true } +parking_lot = { version = "0.12.0", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } -trie-db = { version = "0.23.1", default-features = false } +tracing = { version = "0.1.29", optional = true } +trie-db = { version = "0.24.0", default-features = false } trie-root = { version = "0.17.0", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -31,20 +38,27 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.3.3" hex-literal = "0.3.4" -trie-bench = "0.30.0" +trie-bench = "0.31.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } [features] default = ["std"] std = [ + "ahash", "codec/std", + "hashbrown", "hash-db/std", + "lazy_static", + "lru", "memory-db/std", + "nohash-hasher", + "parking_lot", "scale-info/std", "sp-core/std", "sp-std/std", "thiserror", + "tracing", "trie-db/std", "trie-root/std", ] diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs new file mode 100644 index 0000000000000..a588f20bdb7b4 --- /dev/null +++ b/primitives/trie/src/cache/mod.rs @@ -0,0 +1,686 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trie Cache +//! +//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait. +//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and +//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire +//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then +//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the +//! backend. As there are very likely multiple accesses to the state per instance, this +//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the +//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to +//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache +//! requests. If both of them don't provide the requested data it will be inserted into the +//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`]. +//! +//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never +//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't +//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a +//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum. + +use crate::{Error, NodeCodec}; +use hash_db::Hasher; +use hashbrown::HashSet; +use nohash_hasher::BuildNoHashHasher; +use parking_lot::{Mutex, MutexGuard, RwLockReadGuard}; +use shared_cache::{SharedValueCache, ValueCacheKey}; +use std::{ + collections::{hash_map::Entry as MapEntry, HashMap}, + sync::Arc, +}; +use trie_db::{node::NodeOwned, CachedValue}; + +mod shared_cache; + +pub use shared_cache::SharedTrieCache; + +use self::shared_cache::{SharedTrieCacheInner, ValueCacheKeyHash}; + +const LOG_TARGET: &str = "trie-cache"; + +/// The size of the cache. +#[derive(Debug, Clone, Copy)] +pub enum CacheSize { + /// Do not limit the cache size. + Unlimited, + /// Let the cache in maximum use the given amount of bytes. + Maximum(usize), +} + +impl CacheSize { + /// Returns `true` if the `current_size` exceeds the allowed size. + fn exceeds(&self, current_size: usize) -> bool { + match self { + Self::Unlimited => false, + Self::Maximum(max) => *max < current_size, + } + } +} + +/// The local trie cache. +/// +/// This cache should be used per state instance created by the backend. One state instance is +/// referring to the state of one block. It will cache all the accesses that are done to the state +/// which could not be fullfilled by the [`SharedTrieCache`]. These locally cached items are merged +/// back to the shared trie cache when this instance is dropped. +/// +/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes. +/// So, it is important that these methods are not called multiple times, because they otherwise +/// deadlock. +pub struct LocalTrieCache { + /// The shared trie cache that created this instance. + shared: SharedTrieCache, + /// The local cache for the trie nodes. + node_cache: Mutex>>, + /// Keeps track of all the trie nodes accessed in the shared cache. + /// + /// This will be used to ensure that these nodes are brought to the front of the lru when this + /// local instance is merged back to the shared cache. + shared_node_cache_access: Mutex>, + /// The local cache for the values. + value_cache: Mutex< + HashMap< + ValueCacheKey<'static, H::Out>, + CachedValue, + BuildNoHashHasher>, + >, + >, + /// Keeps track of all values accessed in the shared cache. + /// + /// This will be used to ensure that these nodes are brought to the front of the lru when this + /// local instance is merged back to the shared cache. This can actually lead to collision when + /// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However, + /// as we only use this set to update the lru position it is fine, even if we bring the wrong + /// value to the top. The important part is that we always get the correct value from the value + /// cache for a given key. + shared_value_cache_access: + Mutex>>, +} + +impl LocalTrieCache { + /// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache. + /// + /// The given `storage_root` needs to be the storage root of the trie this cache is used for. + pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> { + let shared_inner = self.shared.read_lock_inner(); + + let value_cache = ValueCache::ForStorageRoot { + storage_root, + local_value_cache: self.value_cache.lock(), + shared_value_cache_access: self.shared_value_cache_access.lock(), + }; + + TrieCache { + shared_inner, + local_cache: self.node_cache.lock(), + value_cache, + shared_node_cache_access: self.shared_node_cache_access.lock(), + } + } + + /// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache. + /// + /// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained + /// the new storage root, [`TrieCache::merge_into`] should be called to update this local + /// cache instance. If the function is not called, cached data is just thrown away and not + /// propagated to the shared cache. So, accessing these new items will be slower, but nothing + /// would break because of this. + pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> { + TrieCache { + shared_inner: self.shared.read_lock_inner(), + local_cache: self.node_cache.lock(), + value_cache: ValueCache::Fresh(Default::default()), + shared_node_cache_access: self.shared_node_cache_access.lock(), + } + } +} + +impl Drop for LocalTrieCache { + fn drop(&mut self) { + let mut shared_inner = self.shared.write_lock_inner(); + + shared_inner + .node_cache_mut() + .update(self.node_cache.lock().drain(), self.shared_node_cache_access.lock().drain()); + + shared_inner + .value_cache_mut() + .update(self.value_cache.lock().drain(), self.shared_value_cache_access.lock().drain()); + } +} + +/// The abstraction of the value cache for the [`TrieCache`]. +enum ValueCache<'a, H> { + /// The value cache is fresh, aka not yet associated to any storage root. + /// This is used for example when a new trie is being build, to cache new values. + Fresh(HashMap, CachedValue>), + /// The value cache is already bound to a specific storage root. + ForStorageRoot { + shared_value_cache_access: MutexGuard< + 'a, + HashSet>, + >, + local_value_cache: MutexGuard< + 'a, + HashMap< + ValueCacheKey<'static, H>, + CachedValue, + nohash_hasher::BuildNoHashHasher>, + >, + >, + storage_root: H, + }, +} + +impl + std::hash::Hash + Eq + Clone + Copy> ValueCache<'_, H> { + /// Get the value for the given `key`. + fn get<'a>( + &'a mut self, + key: &[u8], + shared_value_cache: &'a SharedValueCache, + ) -> Option<&CachedValue> { + match self { + Self::Fresh(map) => map.get(key), + Self::ForStorageRoot { local_value_cache, shared_value_cache_access, storage_root } => { + let key = ValueCacheKey::new_ref(key, *storage_root); + + // We first need to look up in the local cache and then the shared cache. + // It can happen that some value is cached in the shared cache, but the + // weak reference of the data can not be upgraded anymore. This for example + // happens when the node is dropped that contains the strong reference to the data. + // + // So, the logic of the trie would lookup the data and the node and store both + // in our local caches. + local_value_cache + .get(unsafe { + // SAFETY + // + // We need to convert the lifetime to make the compiler happy. However, as + // we only use the `key` to looking up the value this lifetime conversion is + // safe. + std::mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>( + &key, + ) + }) + .or_else(|| { + shared_value_cache.get(&key).map(|v| { + shared_value_cache_access.insert(key.get_hash()); + v + }) + }) + }, + } + } + + /// Insert some new `value` under the given `key`. + fn insert(&mut self, key: &[u8], value: CachedValue) { + match self { + Self::Fresh(map) => { + map.insert(key.into(), value); + }, + Self::ForStorageRoot { local_value_cache, storage_root, .. } => { + local_value_cache.insert(ValueCacheKey::new_value(key, *storage_root), value); + }, + } + } +} + +/// The actual [`TrieCache`](trie_db::TrieCache) implementation. +/// +/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to +/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are +/// done. +pub struct TrieCache<'a, H: Hasher> { + shared_inner: RwLockReadGuard<'a, SharedTrieCacheInner>, + shared_node_cache_access: MutexGuard<'a, HashSet>, + local_cache: MutexGuard<'a, HashMap>>, + value_cache: ValueCache<'a, H::Out>, +} + +impl<'a, H: Hasher> TrieCache<'a, H> { + /// Merge this cache into the given [`LocalTrieCache`]. + /// + /// This function is only required to be called when this instance was created through + /// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given + /// `storage_root` is the new storage root that was obtained after finishing all operations + /// using the [`TrieDBMut`](trie_db::TrieDBMut). + pub fn merge_into(self, local: &LocalTrieCache, storage_root: H::Out) { + let cache = if let ValueCache::Fresh(cache) = self.value_cache { cache } else { return }; + + if !cache.is_empty() { + let mut value_cache = local.value_cache.lock(); + let partial_hash = ValueCacheKey::hash_partial_data(&storage_root); + + cache + .into_iter() + .map(|(k, v)| { + let hash = + ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k); + (ValueCacheKey::Value { storage_key: k, storage_root, hash }, v) + }) + .for_each(|(k, v)| { + value_cache.insert(k, v); + }); + } + } +} + +impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { + fn get_or_insert_node( + &mut self, + hash: H::Out, + fetch_node: &mut dyn FnMut() -> trie_db::Result, H::Out, Error>, + ) -> trie_db::Result<&NodeOwned, H::Out, Error> { + if let Some(res) = self.shared_inner.node_cache().get(&hash) { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache"); + self.shared_node_cache_access.insert(hash); + return Ok(res) + } + + match self.local_cache.entry(hash) { + MapEntry::Occupied(res) => { + tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache"); + Ok(res.into_mut()) + }, + MapEntry::Vacant(vacant) => { + let node = (*fetch_node)(); + + tracing::trace!( + target: LOG_TARGET, + ?hash, + fetch_successful = node.is_ok(), + "Node not found, needed to fetch it." + ); + + Ok(vacant.insert(node?)) + }, + } + } + + fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned> { + if let Some(node) = self.shared_inner.node_cache().get(hash) { + tracing::trace!(target: LOG_TARGET, ?hash, "Getting node from shared cache"); + self.shared_node_cache_access.insert(*hash); + return Some(node) + } + + let res = self.local_cache.get(hash); + + tracing::trace!( + target: LOG_TARGET, + ?hash, + found = res.is_some(), + "Getting node from local cache" + ); + + res + } + + fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue> { + let res = self.value_cache.get(key, self.shared_inner.value_cache()); + + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&key), + found = res.is_some(), + "Looked up value for key", + ); + + res + } + + fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue) { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&key), + "Caching value for key", + ); + + self.value_cache.insert(key.into(), data); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + + type MemoryDB = crate::MemoryDB; + type Layout = crate::LayoutV1; + type Cache = super::SharedTrieCache; + type Recorder = crate::recorder::Recorder; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])]; + const CACHE_SIZE_RAW: usize = 1024 * 10; + const CACHE_SIZE: CacheSize = CacheSize::Maximum(CACHE_SIZE_RAW); + + fn create_trie() -> (MemoryDB, TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root) + } + + #[test] + fn basic_cache_works() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + let local_cache = shared_cache.local_cache(); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } + + // Local cache wasn't dropped yet, so there should nothing in the shared caches. + assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty()); + assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty()); + + drop(local_cache); + + // Now we should have the cached items in the shared cache. + assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1); + let cached_data = shared_cache + .read_lock_inner() + .value_cache() + .lru + .peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root)) + .unwrap() + .clone(); + assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap()); + + let fake_data = Bytes::from(&b"fake_data"[..]); + + let local_cache = shared_cache.local_cache(); + shared_cache.write_lock_inner().value_cache_mut().lru.put( + ValueCacheKey::new_value(TEST_DATA[1].0, root), + (fake_data.clone(), Default::default()).into(), + ); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + // We should now get the "fake_data", because we inserted this manually to the cache. + assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap()); + } + } + + #[test] + fn trie_db_mut_cache_works() { + let (mut db, root) = create_trie(); + + let new_key = b"new_key".to_vec(); + // Use some long value to not have it inlined + let new_value = vec![23; 64]; + + let shared_cache = Cache::new(CACHE_SIZE); + let mut new_root = root; + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_mut_cache(); + + { + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .build(); + + trie.insert(&new_key, &new_value).unwrap(); + } + + cache.merge_into(&local_cache, new_root); + } + + // After the local cache is dropped, all changes should have been merged back to the shared + // cache. + let cached_data = shared_cache + .read_lock_inner() + .value_cache() + .lru + .peek(&ValueCacheKey::new_value(new_key, new_root)) + .unwrap() + .clone(); + assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap()); + } + + #[test] + fn trie_db_cache_and_recorder_work_together() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + for i in 0..5 { + // Clear some of the caches. + if i == 2 { + shared_cache.reset_node_cache(); + } else if i == 3 { + shared_cache.reset_value_cache(); + } + + let local_cache = shared_cache.local_cache(); + let recorder = Recorder::default(); + + { + let mut cache = local_cache.as_trie_db_cache(root); + let mut recorder = recorder.as_trie_recorder(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_cache(&mut cache) + .with_recorder(&mut recorder) + .build(); + + for (key, value) in TEST_DATA { + assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + } + } + + let storage_proof = recorder.drain_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + { + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + + for (key, value) in TEST_DATA { + assert_eq!(*value, trie.get(&key).unwrap().unwrap()); + } + } + } + } + + #[test] + fn trie_db_mut_cache_and_recorder_work_together() { + const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])]; + + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + // Run this twice so that we use the data cache in the second run. + for i in 0..5 { + // Clear some of the caches. + if i == 2 { + shared_cache.reset_node_cache(); + } else if i == 3 { + shared_cache.reset_value_cache(); + } + + let recorder = Recorder::default(); + let local_cache = shared_cache.local_cache(); + let mut new_root = root; + + { + let mut db = db.clone(); + let mut cache = local_cache.as_trie_db_cache(root); + let mut recorder = recorder.as_trie_recorder(); + let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .with_recorder(&mut recorder) + .build(); + + for (key, value) in DATA_TO_ADD { + trie.insert(key, value).unwrap(); + } + } + + let storage_proof = recorder.drain_storage_proof(); + let mut memory_db: MemoryDB = storage_proof.into_memory_db(); + let mut proof_root = root; + + { + let mut trie = + TrieDBMutBuilder::::from_existing(&mut memory_db, &mut proof_root) + .build(); + + for (key, value) in DATA_TO_ADD { + trie.insert(key, value).unwrap(); + } + } + + assert_eq!(new_root, proof_root) + } + } + + #[test] + fn cache_lru_works() { + let (db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA { + trie.get(k).unwrap().unwrap(); + } + } + + // Check that all items are there. + assert!(shared_cache + .read_lock_inner() + .value_cache() + .lru + .iter() + .map(|d| d.0) + .all(|l| TEST_DATA.iter().any(|d| l.storage_key().unwrap() == d.0))); + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA.iter().take(2) { + trie.get(k).unwrap().unwrap(); + } + } + + // Ensure that the accessed items are most recently used items of the shared value cache. + assert!(shared_cache + .read_lock_inner() + .value_cache() + .lru + .iter() + .take(2) + .map(|d| d.0) + .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); + + let most_recently_used_nodes = shared_cache + .read_lock_inner() + .node_cache() + .lru + .iter() + .map(|d| *d.0) + .collect::>(); + + // Delete the value cache, so that we access the nodes. + shared_cache.reset_value_cache(); + + { + let local_cache = shared_cache.local_cache(); + + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + + for (k, _) in TEST_DATA.iter().take(2) { + trie.get(k).unwrap().unwrap(); + } + } + + // Ensure that the most recently used nodes changed as well. + assert_ne!( + most_recently_used_nodes, + shared_cache + .read_lock_inner() + .node_cache() + .lru + .iter() + .map(|d| *d.0) + .collect::>() + ); + } + + #[test] + fn cache_respects_bounds() { + let (mut db, root) = create_trie(); + + let shared_cache = Cache::new(CACHE_SIZE); + { + let local_cache = shared_cache.local_cache(); + + let mut new_root = root; + + { + let mut cache = local_cache.as_trie_db_cache(root); + { + let mut trie = + TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) + .with_cache(&mut cache) + .build(); + + let value = vec![10u8; 100]; + // Ensure we add enough data that would overflow the cache. + for i in 0..CACHE_SIZE_RAW / 100 * 2 { + trie.insert(format!("key{}", i).as_bytes(), &value).unwrap(); + } + } + + cache.merge_into(&local_cache, new_root); + } + } + + let node_cache_size = shared_cache.read_lock_inner().node_cache().size_in_bytes; + let value_cache_size = shared_cache.read_lock_inner().value_cache().size_in_bytes; + + assert!(node_cache_size + value_cache_size < CACHE_SIZE_RAW); + } +} diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs new file mode 100644 index 0000000000000..abac8c9f946ca --- /dev/null +++ b/primitives/trie/src/cache/shared_cache.rs @@ -0,0 +1,677 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`] +///! that combines both caches and is exported to the outside. +use super::{CacheSize, LOG_TARGET}; +use hash_db::Hasher; +use hashbrown::{hash_set::Entry as SetEntry, HashSet}; +use lru::LruCache; +use nohash_hasher::BuildNoHashHasher; +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use std::{ + hash::{BuildHasher, Hasher as _}, + mem, + sync::Arc, +}; +use trie_db::{node::NodeOwned, CachedValue}; + +lazy_static::lazy_static! { + static ref RANDOM_STATE: ahash::RandomState = ahash::RandomState::default(); +} + +/// No hashing [`LruCache`]. +type NoHashingLruCache = lru::LruCache>; + +/// The shared node cache. +/// +/// Internally this stores all cached nodes in a [`LruCache`]. It ensures that when updating the +/// cache, that the cache stays within its allowed bounds. +pub(super) struct SharedNodeCache { + /// The cached nodes, ordered by least recently used. + pub(super) lru: LruCache>, + /// The size of [`Self::lru`] in bytes. + pub(super) size_in_bytes: usize, + /// The maximum cache size of [`Self::lru`]. + maximum_cache_size: CacheSize, +} + +impl + Eq + std::hash::Hash> SharedNodeCache { + /// Create a new instance. + fn new(cache_size: CacheSize) -> Self { + Self { lru: LruCache::unbounded(), size_in_bytes: 0, maximum_cache_size: cache_size } + } + + /// Get the node for `key`. + /// + /// This doesn't change the least recently order in the internal [`LruCache`]. + pub fn get(&self, key: &H) -> Option<&NodeOwned> { + self.lru.peek(key) + } + + /// Update the cache with the `added` nodes and the `accessed` nodes. + /// + /// The `added` nodes are the ones that have been collected by doing operations on the trie and + /// now should be stored in the shared cache. The `accessed` nodes are only referenced by hash + /// and represent the nodes that were retrieved from this shared cache through [`Self::get`]. + /// These `accessed` nodes are being put to the front of the internal [`LruCache`] like the + /// `added` ones. + /// + /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is + /// inside its bounds ([`Self::maximum_size_in_bytes`]). + pub fn update( + &mut self, + added: impl IntoIterator)>, + accessed: impl IntoIterator, + ) { + let update_size_in_bytes = |size_in_bytes: &mut usize, key: &H, node: &NodeOwned| { + if let Some(new_size_in_bytes) = + size_in_bytes.checked_sub(key.as_ref().len() + node.size_in_bytes()) + { + *size_in_bytes = new_size_in_bytes; + } else { + *size_in_bytes = 0; + tracing::error!(target: LOG_TARGET, "`SharedNodeCache` underflow detected!",); + } + }; + + accessed.into_iter().for_each(|key| { + // Access every node in the lru to put it to the front. + self.lru.get(&key); + }); + added.into_iter().for_each(|(key, node)| { + self.size_in_bytes += key.as_ref().len() + node.size_in_bytes(); + + if let Some((r_key, r_node)) = self.lru.push(key, node) { + update_size_in_bytes(&mut self.size_in_bytes, &r_key, &r_node); + } + + // Directly ensure that we respect the maximum size. By doing it directly here we ensure + // that the internal map of the [`LruCache`] doesn't grow too much. + while self.maximum_cache_size.exceeds(self.size_in_bytes) { + // This should always be `Some(_)`, otherwise something is wrong! + if let Some((key, node)) = self.lru.pop_lru() { + update_size_in_bytes(&mut self.size_in_bytes, &key, &node); + } + } + }); + } + + /// Reset the cache. + fn reset(&mut self) { + self.size_in_bytes = 0; + self.lru.clear(); + } +} + +/// The hash of [`ValueCacheKey`]. +#[derive(Eq, Clone, Copy)] +pub struct ValueCacheKeyHash(u64); + +impl ValueCacheKeyHash { + pub fn from_hasher_and_storage_key( + mut hasher: impl std::hash::Hasher, + storage_key: &[u8], + ) -> Self { + hasher.write(storage_key); + + Self(hasher.finish()) + } +} + +impl PartialEq for ValueCacheKeyHash { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl std::hash::Hash for ValueCacheKeyHash { + fn hash(&self, state: &mut Hasher) { + state.write_u64(self.0); + } +} + +impl nohash_hasher::IsEnabled for ValueCacheKeyHash {} + +/// A type that can only be constructed inside of this file. +/// +/// It "requires" that the user has read the docs to prevent fuck ups. +#[derive(Eq, PartialEq)] +pub(super) struct IReadTheDocumentation(()); + +/// The key type that is being used to address a [`CachedValue`]. +/// +/// This type is implemented as `enum` to improve the performance when accessing the value cache. +/// The problem being that we need to calculate the `hash` of [`Self`] in worst case three times +/// when trying to find a value in the value cache. First to lookup the local cache, then the shared +/// cache and if we found it in the shared cache a third time to insert it into the list of accessed +/// values. To work around each variant stores the `hash` to identify a unique combination of +/// `storage_key` and `storage_root`. However, be aware that this `hash` can lead to collisions when +/// there are two different `storage_key` and `storage_root` pairs that map to the same `hash`. This +/// type also has the `Hash` variant. This variant should only be used for the use case of updating +/// the lru for a key. Because when using only the `Hash` variant to getting a value from a hash map +/// it could happen that a wrong value is returned when there is another key in the same hash map +/// that maps to the same `hash`. The [`PartialEq`] implementation is written in a way that when one +/// of the two compared instances is the `Hash` variant, we will only compare the hashes. This +/// ensures that we can use the `Hash` variant to bring values up in the lru. +#[derive(Eq)] +pub(super) enum ValueCacheKey<'a, H> { + /// Variant that stores the `storage_key` by value. + Value { + /// The storage root of the trie this key belongs to. + storage_root: H, + /// The key to access the value in the storage. + storage_key: Arc<[u8]>, + /// The hash that identifying this instance of `storage_root` and `storage_key`. + hash: ValueCacheKeyHash, + }, + /// Variant that only references the `storage_key`. + Ref { + /// The storage root of the trie this key belongs to. + storage_root: H, + /// The key to access the value in the storage. + storage_key: &'a [u8], + /// The hash that identifying this instance of `storage_root` and `storage_key`. + hash: ValueCacheKeyHash, + }, + /// Variant that only stores the hash that represents the `storage_root` and `storage_key`. + /// + /// This should be used by caution, because it can lead to accessing the wrong value in a + /// hash map/set when there exists two different `storage_root`s and `storage_key`s that + /// map to the same `hash`. + Hash { hash: ValueCacheKeyHash, _i_read_the_documentation: IReadTheDocumentation }, +} + +impl<'a, H> ValueCacheKey<'a, H> { + /// Constructs [`Self::Value`]. + pub fn new_value(storage_key: impl Into>, storage_root: H) -> Self + where + H: AsRef<[u8]>, + { + let storage_key = storage_key.into(); + let hash = Self::hash_data(&storage_key, &storage_root); + Self::Value { storage_root, storage_key, hash } + } + + /// Constructs [`Self::Ref`]. + pub fn new_ref(storage_key: &'a [u8], storage_root: H) -> Self + where + H: AsRef<[u8]>, + { + let storage_key = storage_key.into(); + let hash = Self::hash_data(storage_key, &storage_root); + Self::Ref { storage_root, storage_key, hash } + } + + /// Returns a hasher prepared to build the final hash to identify [`Self`]. + /// + /// See [`Self::hash_data`] for building the hash directly. + pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone + where + H: AsRef<[u8]>, + { + let mut hasher = RANDOM_STATE.build_hasher(); + hasher.write(storage_root.as_ref()); + hasher + } + + /// Hash the `key` and `storage_root` that identify [`Self`]. + /// + /// Returns a `u64` which represents the unique hash for the given inputs. + pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash + where + H: AsRef<[u8]>, + { + let hasher = Self::hash_partial_data(storage_root); + + ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key) + } + + /// Returns the `hash` that identifies the current instance. + pub fn get_hash(&self) -> ValueCacheKeyHash { + match self { + Self::Value { hash, .. } | Self::Ref { hash, .. } | Self::Hash { hash, .. } => *hash, + } + } + + /// Returns the stored storage root. + pub fn storage_root(&self) -> Option<&H> { + match self { + Self::Value { storage_root, .. } | Self::Ref { storage_root, .. } => Some(storage_root), + Self::Hash { .. } => None, + } + } + + /// Returns the stored storage key. + pub fn storage_key(&self) -> Option<&[u8]> { + match self { + Self::Ref { storage_key, .. } => Some(&storage_key), + Self::Value { storage_key, .. } => Some(storage_key), + Self::Hash { .. } => None, + } + } +} + +// Implement manually to ensure that the `Value` and `Hash` are treated equally. +impl std::hash::Hash for ValueCacheKey<'_, H> { + fn hash(&self, state: &mut Hasher) { + self.get_hash().hash(state) + } +} + +impl nohash_hasher::IsEnabled for ValueCacheKey<'_, H> {} + +// Implement manually to ensure that the `Value` and `Hash` are treated equally. +impl PartialEq for ValueCacheKey<'_, H> { + fn eq(&self, other: &Self) -> bool { + // First check if `self` or `other` is only the `Hash`. + // Then we only compare the `hash`. So, there could actually be some collision + // if two different storage roots and keys are mapping to the same key. See the + // [`ValueCacheKey`] docs for more information. + match (self, other) { + (Self::Hash { hash, .. }, Self::Hash { hash: other_hash, .. }) => hash == other_hash, + (Self::Hash { hash, .. }, _) => *hash == other.get_hash(), + (_, Self::Hash { hash: other_hash, .. }) => self.get_hash() == *other_hash, + // If both are not the `Hash` variant, we compare all the values. + _ => + self.get_hash() == other.get_hash() && + self.storage_root() == other.storage_root() && + self.storage_key() == other.storage_key(), + } + } +} + +/// The shared value cache. +/// +/// The cache ensures that it stays in the configured size bounds. +pub(super) struct SharedValueCache { + /// The cached nodes, ordered by least recently used. + pub(super) lru: NoHashingLruCache, CachedValue>, + /// The size of [`Self::lru`] in bytes. + pub(super) size_in_bytes: usize, + /// The maximum cache size of [`Self::lru`]. + maximum_cache_size: CacheSize, + /// All known storage keys that are stored in [`Self::lru`]. + /// + /// This is used to de-duplicate keys in memory that use the + /// same [`SharedValueCache::storage_key`], but have a different + /// [`SharedValueCache::storage_root`]. + known_storage_keys: HashSet>, +} + +impl> SharedValueCache { + /// Create a new instance. + fn new(cache_size: CacheSize) -> Self { + Self { + lru: NoHashingLruCache::unbounded_with_hasher(Default::default()), + size_in_bytes: 0, + maximum_cache_size: cache_size, + known_storage_keys: Default::default(), + } + } + + /// Get the [`CachedValue`] for `key`. + /// + /// This doesn't change the least recently order in the internal [`LruCache`]. + pub fn get<'a>(&'a self, key: &ValueCacheKey) -> Option<&'a CachedValue> { + debug_assert!( + !matches!(key, ValueCacheKey::Hash { .. }), + "`get` can not be called with `Hash` variant as this may returns the wrong value." + ); + + self.lru.peek(unsafe { + // SAFETY + // + // We need to convert the lifetime to make the compiler happy. However, as + // we only use the `key` to looking up the value this lifetime conversion is + // safe. + mem::transmute::<&ValueCacheKey<'_, H>, &ValueCacheKey<'static, H>>(key) + }) + } + + /// Update the cache with the `added` values and the `accessed` values. + /// + /// The `added` values are the ones that have been collected by doing operations on the trie and + /// now should be stored in the shared cache. The `accessed` values are only referenced by the + /// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache + /// through [`Self::get`]. These `accessed` values are being put to the front of the internal + /// [`LruCache`] like the `added` ones. + /// + /// After the internal [`LruCache`] was updated, it is ensured that the internal [`LruCache`] is + /// inside its bounds ([`Self::maximum_size_in_bytes`]). + pub fn update( + &mut self, + added: impl IntoIterator, CachedValue)>, + accessed: impl IntoIterator, + ) { + // The base size in memory per ([`ValueCacheKey`], [`CachedValue`]). + let base_size = mem::size_of::>() + mem::size_of::>(); + let known_keys_entry_size = mem::size_of::>(); + + let update_size_in_bytes = + |size_in_bytes: &mut usize, r_key: Arc<[u8]>, known_keys: &mut HashSet>| { + // If the `strong_count == 2`, it means this is the last instance of the key. + // One being `r_key` and the other being stored in `known_storage_keys`. + let last_instance = Arc::strong_count(&r_key) == 2; + + let key_len = if last_instance { + known_keys.remove(&r_key); + r_key.len() + known_keys_entry_size + } else { + // The key is still in `keys`, because it is still used by another + // `ValueCacheKey`. + 0 + }; + + if let Some(new_size_in_bytes) = size_in_bytes.checked_sub(key_len + base_size) { + *size_in_bytes = new_size_in_bytes; + } else { + *size_in_bytes = 0; + tracing::error!(target: LOG_TARGET, "`SharedValueCache` underflow detected!",); + } + }; + + accessed.into_iter().for_each(|key| { + // Access every node in the lru to put it to the front. + // As we are using the `Hash` variant here, it may leads to putting the wrong value to + // the top. However, the only consequence of this is that we may prune a recently used + // value to early. + self.lru.get(&ValueCacheKey::Hash { + hash: key, + _i_read_the_documentation: IReadTheDocumentation(()), + }); + }); + + added.into_iter().for_each(|(key, value)| { + let (storage_root, storage_key, key_hash) = match key { + ValueCacheKey::Hash { .. } => { + // Ignore the hash variant and try the next. + tracing::error!( + target: LOG_TARGET, + "`SharedValueCached::update` was called with a key to add \ + that uses the `Hash` variant. This would lead to potential hash collision!", + ); + return + }, + ValueCacheKey::Ref { storage_key, storage_root, hash } => + (storage_root, storage_key.into(), hash), + ValueCacheKey::Value { storage_root, storage_key, hash } => + (storage_root, storage_key, hash), + }; + + let (size_update, storage_key) = + match self.known_storage_keys.entry(storage_key.clone()) { + SetEntry::Vacant(v) => { + let len = v.get().len(); + v.insert(); + + // If the key was unknown, we need to also take its length and the size of + // the entry of `known_keys` into account. + (len + base_size + known_keys_entry_size, storage_key) + }, + SetEntry::Occupied(o) => { + // Key is known + (base_size, o.get().clone()) + }, + }; + + self.size_in_bytes += size_update; + + if let Some((r_key, _)) = self + .lru + .push(ValueCacheKey::Value { storage_key, storage_root, hash: key_hash }, value) + { + if let ValueCacheKey::Value { storage_key, .. } = r_key { + update_size_in_bytes( + &mut self.size_in_bytes, + storage_key, + &mut self.known_storage_keys, + ); + } + } + + // Directly ensure that we respect the maximum size. By doing it directly here we + // ensure that the internal map of the [`LruCache`] doesn't grow too much. + while self.maximum_cache_size.exceeds(self.size_in_bytes) { + // This should always be `Some(_)`, otherwise something is wrong! + if let Some((r_key, _)) = self.lru.pop_lru() { + if let ValueCacheKey::Value { storage_key, .. } = r_key { + update_size_in_bytes( + &mut self.size_in_bytes, + storage_key, + &mut self.known_storage_keys, + ); + } + } + } + }); + } + + /// Reset the cache. + fn reset(&mut self) { + self.size_in_bytes = 0; + self.lru.clear(); + self.known_storage_keys.clear(); + } +} + +/// The inner of [`SharedTrieCache`]. +pub(super) struct SharedTrieCacheInner { + node_cache: SharedNodeCache, + value_cache: SharedValueCache, +} + +impl SharedTrieCacheInner { + /// Returns a reference to the [`SharedValueCache`]. + pub(super) fn value_cache(&self) -> &SharedValueCache { + &self.value_cache + } + + /// Returns a mutable reference to the [`SharedValueCache`]. + pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache { + &mut self.value_cache + } + + /// Returns a reference to the [`SharedNodeCache`]. + pub(super) fn node_cache(&self) -> &SharedNodeCache { + &self.node_cache + } + + /// Returns a mutable reference to the [`SharedNodeCache`]. + pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache { + &mut self.node_cache + } +} + +/// The shared trie cache. +/// +/// It should be instantiated once per node. It will hold the trie nodes and values of all +/// operations to the state. To not use all available memory it will ensure to stay in the +/// bounds given via the [`CacheSize`] at startup. +/// +/// The instance of this object can be shared between multiple threads. +pub struct SharedTrieCache { + inner: Arc>>, +} + +impl Clone for SharedTrieCache { + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + +impl SharedTrieCache { + /// Create a new [`SharedTrieCache`]. + pub fn new(cache_size: CacheSize) -> Self { + let (node_cache_size, value_cache_size) = match cache_size { + CacheSize::Maximum(max) => { + // Allocate 20% for the value cache. + let value_cache_size_in_bytes = (max as f32 * 0.20) as usize; + + ( + CacheSize::Maximum(max - value_cache_size_in_bytes), + CacheSize::Maximum(value_cache_size_in_bytes), + ) + }, + CacheSize::Unlimited => (CacheSize::Unlimited, CacheSize::Unlimited), + }; + + Self { + inner: Arc::new(RwLock::new(SharedTrieCacheInner { + node_cache: SharedNodeCache::new(node_cache_size), + value_cache: SharedValueCache::new(value_cache_size), + })), + } + } + + /// Create a new [`LocalTrieCache`](super::LocalTrieCache) instance from this shared cache. + pub fn local_cache(&self) -> super::LocalTrieCache { + super::LocalTrieCache { + shared: self.clone(), + node_cache: Default::default(), + value_cache: Default::default(), + shared_node_cache_access: Default::default(), + shared_value_cache_access: Default::default(), + } + } + + /// Returns the used memory size of this cache in bytes. + pub fn used_memory_size(&self) -> usize { + let inner = self.inner.read(); + let value_cache_size = inner.value_cache.size_in_bytes; + let node_cache_size = inner.node_cache.size_in_bytes; + + node_cache_size + value_cache_size + } + + /// Reset the node cache. + pub fn reset_node_cache(&self) { + self.inner.write().node_cache.reset(); + } + + /// Reset the value cache. + pub fn reset_value_cache(&self) { + self.inner.write().value_cache.reset(); + } + + /// Reset the entire cache. + pub fn reset(&self) { + self.reset_node_cache(); + self.reset_value_cache(); + } + + /// Returns the read locked inner. + pub(super) fn read_lock_inner(&self) -> RwLockReadGuard<'_, SharedTrieCacheInner> { + self.inner.read() + } + + /// Returns the write locked inner. + pub(super) fn write_lock_inner(&self) -> RwLockWriteGuard<'_, SharedTrieCacheInner> { + self.inner.write() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256 as Hash; + + #[test] + fn shared_value_cache_works() { + let base_size = mem::size_of::>() + mem::size_of::>(); + let arc_size = mem::size_of::>(); + + let mut cache = SharedValueCache::::new(CacheSize::Maximum( + (base_size + arc_size + 10) * 10, + )); + + let key = vec![0; 10]; + + let root0 = Hash::repeat_byte(1); + let root1 = Hash::repeat_byte(2); + + cache.update( + vec![ + (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting), + (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting), + ], + vec![], + ); + + // Ensure that the basics are working + assert_eq!(1, cache.known_storage_keys.len()); + assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); + + // Just accessing a key should not change anything on the size and number of entries. + cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]); + assert_eq!(1, cache.known_storage_keys.len()); + assert_eq!(3, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!(base_size * 2 + key.len() + arc_size, cache.size_in_bytes); + + // Add 9 other entries and this should move out the key for `root1`. + cache.update( + (1..10) + .map(|i| vec![i; 10]) + .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)), + vec![], + ); + + assert_eq!(10, cache.known_storage_keys.len()); + assert_eq!(2, Arc::strong_count(cache.known_storage_keys.get(&key[..]).unwrap())); + assert_eq!((base_size + key.len() + arc_size) * 10, cache.size_in_bytes); + assert!(matches!( + cache.get(&ValueCacheKey::new_ref(&key, root0)).unwrap(), + CachedValue::::NonExisting + )); + assert!(cache.get(&ValueCacheKey::new_ref(&key, root1)).is_none()); + + cache.update( + vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)], + vec![], + ); + + assert!(cache.known_storage_keys.get(&key[..]).is_none()); + } + + #[test] + fn value_cache_key_eq_works() { + let storage_key = &b"something"[..]; + let storage_key2 = &b"something2"[..]; + let storage_root = Hash::random(); + + let value = ValueCacheKey::new_value(storage_key, storage_root); + // Ref gets the same hash, but a different storage key + let ref_ = + ValueCacheKey::Ref { storage_root, storage_key: storage_key2, hash: value.get_hash() }; + let hash = ValueCacheKey::Hash { + hash: value.get_hash(), + _i_read_the_documentation: IReadTheDocumentation(()), + }; + + // Ensure that the hash variants is equal to `value`, `ref_` and itself. + assert!(hash == value); + assert!(value == hash); + assert!(hash == ref_); + assert!(ref_ == hash); + assert!(hash == hash); + + // But when we compare `value` and `ref_` the different storage key is detected. + assert!(value != ref_); + assert!(ref_ != value); + } +} diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index e0b3642b6db76..a781d408e994f 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -15,18 +15,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -/// Error for trie node decoding. +use sp_std::{boxed::Box, vec::Vec}; + +/// Error type used for trie related errors. #[derive(Debug, PartialEq, Eq, Clone)] #[cfg_attr(feature = "std", derive(thiserror::Error))] -pub enum Error { +pub enum Error { #[cfg_attr(feature = "std", error("Bad format"))] BadFormat, #[cfg_attr(feature = "std", error("Decoding failed: {0}"))] Decode(#[cfg_attr(feature = "std", source)] codec::Error), + #[cfg_attr( + feature = "std", + error("Recorded key ({0:x?}) access with value as found={1}, but could not confirm with trie.") + )] + InvalidRecording(Vec, bool), + #[cfg_attr(feature = "std", error("Trie error: {0:?}"))] + TrieError(Box>), } -impl From for Error { +impl From for Error { fn from(x: codec::Error) -> Self { Error::Decode(x) } } + +impl From>> for Error { + fn from(x: Box>) -> Self { + Error::TrieError(x) + } +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 7a17d44aa5b69..fafa2a2891ce4 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -19,9 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +pub mod cache; mod error; mod node_codec; mod node_header; +#[cfg(feature = "std")] +pub mod recorder; mod storage_proof; mod trie_codec; mod trie_stream; @@ -46,8 +50,8 @@ use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::{ nibble_ops, node::{NodePlan, ValuePlan}, - CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, - TrieLayout, TrieMut, + CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator, + TrieDBKeyIterator, TrieLayout, TrieMut, TrieRecorder, }; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; @@ -167,11 +171,15 @@ pub type MemoryDB = memory_db::MemoryDB, trie_db::DB pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. -pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; +pub type TrieDB<'a, 'cache, L> = trie_db::TrieDB<'a, 'cache, L>; +/// Builder for creating a [`TrieDB`]. +pub type TrieDBBuilder<'a, 'cache, L> = trie_db::TrieDBBuilder<'a, 'cache, L>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; +/// Builder for creating a [`TrieDBMut`]. +pub type TrieDBMutBuilder<'a, L> = trie_db::TrieDBMutBuilder<'a, L>; /// Querying interface, as in `trie_db` but less generic. -pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; +pub type Lookup<'a, 'cache, L, Q> = trie_db::Lookup<'a, 'cache, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. @@ -180,18 +188,23 @@ pub mod trie_types { use super::*; /// Persistent trie database read-access interface for the a given hasher. + /// /// Read only V1 and V0 are compatible, thus we always use V1. - pub type TrieDB<'a, H> = super::TrieDB<'a, LayoutV1>; + pub type TrieDB<'a, 'cache, H> = super::TrieDB<'a, 'cache, LayoutV1>; + /// Builder for creating a [`TrieDB`]. + pub type TrieDBBuilder<'a, 'cache, H> = super::TrieDBBuilder<'a, 'cache, LayoutV1>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; + /// Builder for creating a [`TrieDBMutV0`]. + pub type TrieDBMutBuilderV0<'a, H> = super::TrieDBMutBuilder<'a, LayoutV0>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; + /// Builder for creating a [`TrieDBMutV1`]. + pub type TrieDBMutBuilderV1<'a, H> = super::TrieDBMutBuilder<'a, LayoutV1>; /// Querying interface, as in `trie_db` but less generic. - pub type LookupV0<'a, H, Q> = trie_db::Lookup<'a, LayoutV0, Q>; - /// Querying interface, as in `trie_db` but less generic. - pub type LookupV1<'a, H, Q> = trie_db::Lookup<'a, LayoutV1, Q>; + pub type Lookup<'a, 'cache, H, Q> = trie_db::Lookup<'a, 'cache, LayoutV1, Q>; /// As in `trie_db`, but less generic, error type for the crate. - pub type TrieError = trie_db::TrieError; + pub type TrieError = trie_db::TrieError>; } /// Create a proof for a subset of keys in a trie. @@ -213,9 +226,7 @@ where K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { - // Can use default layout (read only). - let trie = TrieDB::::new(db, &root)?; - generate_proof(&trie, keys) + generate_proof::<_, L, _, _>(db, &root, keys) } /// Verify a set of key-value pairs against a trie root and a proof. @@ -245,6 +256,8 @@ pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, delta: I, + recorder: Option<&mut dyn trie_db::TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, ) -> Result, Box>> where I: IntoIterator, @@ -254,7 +267,10 @@ where DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let mut trie = TrieDBMutBuilder::::from_existing(db, &mut root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build(); let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -271,33 +287,32 @@ where } /// Read a value from the trie. -pub fn read_trie_value( +pub fn read_trie_value>( db: &DB, root: &TrieHash, key: &[u8], -) -> Result>, Box>> -where - L: TrieConfiguration, - DB: hash_db::HashDBRef, -{ - TrieDB::::new(db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> { + TrieDBBuilder::::new(db, root) + .with_optional_cache(cache) + .with_optional_recorder(recorder) + .build() + .get(key) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with( +pub fn read_trie_value_with< + L: TrieLayout, + Q: Query>, + DB: hash_db::HashDBRef, +>( db: &DB, root: &TrieHash, key: &[u8], query: Q, -) -> Result>, Box>> -where - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef, -{ - TrieDB::::new(db, root)? - .get_with(key, query) - .map(|x| x.map(|val| val.to_vec())) +) -> Result>, Box>> { + TrieDBBuilder::::new(db, root).build().get_with(key, query) } /// Determine the empty trie root. @@ -328,6 +343,8 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -341,46 +358,49 @@ where // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::(&mut db, root, delta) + let mut db = KeySpacedDBMut::new(db, keyspace); + delta_trie_root::(&mut db, root, delta, recorder, cache) } -/// Record all keys for a given root. -pub fn record_all_keys( +/// Read a value from the child trie. +pub fn read_child_trie_value( + keyspace: &[u8], db: &DB, root: &TrieHash, - recorder: &mut Recorder>, -) -> Result<(), Box>> + key: &[u8], + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> where DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(db, root)?; - let iter = trie.iter()?; - - for x in iter { - let (key, _) = x?; - - // there's currently no API like iter_with() - // => use iter to enumerate all keys AND lookup each - // key using get_with - trie.get_with(&key, &mut *recorder)?; - } - - Ok(()) + let db = KeySpacedDB::new(db, keyspace); + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .get(key) + .map(|x| x.map(|val| val.to_vec())) } -/// Read a value from the child trie. -pub fn read_child_trie_value( +/// Read a hash from the child trie. +pub fn read_child_trie_hash( keyspace: &[u8], db: &DB, root: &TrieHash, key: &[u8], -) -> Result>, Box>> + recorder: Option<&mut dyn TrieRecorder>>, + cache: Option<&mut dyn TrieCache>, +) -> Result>, Box>> where DB: hash_db::HashDBRef, { let db = KeySpacedDB::new(db, keyspace); - TrieDB::::new(&db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) + TrieDBBuilder::::new(&db, &root) + .with_optional_recorder(recorder) + .with_optional_cache(cache) + .build() + .get_hash(key) } /// Read a value from the child trie with given query. @@ -401,20 +421,21 @@ where root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(db, keyspace); - TrieDB::::new(&db, &root)? + TrieDBBuilder::::new(&db, &root) + .build() .get_with(key, query) .map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); +pub struct KeySpacedDB<'a, DB: ?Sized, H>(&'a DB, &'a [u8], PhantomData); /// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. /// /// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); +pub struct KeySpacedDBMut<'a, DB: ?Sized, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. @@ -425,20 +446,14 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB, H> KeySpacedDB<'a, DB, H> -where - H: Hasher, -{ +impl<'a, DB: ?Sized, H> KeySpacedDB<'a, DB, H> { /// instantiate new keyspaced db pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { KeySpacedDB(db, ks, PhantomData) } } -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> -where - H: Hasher, -{ +impl<'a, DB: ?Sized, H> KeySpacedDBMut<'a, DB, H> { /// instantiate new keyspaced db pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { KeySpacedDBMut(db, ks, PhantomData) @@ -447,7 +462,7 @@ where impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, + DB: hash_db::HashDBRef + ?Sized, H: Hasher, T: From<&'static [u8]>, { @@ -550,7 +565,7 @@ mod tests { let persistent = { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -564,13 +579,13 @@ mod tests { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new(&memdb, &root).unwrap(); + let t = TrieDBBuilder::::new(&memdb, &root).build(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter() @@ -592,7 +607,7 @@ mod tests { fn default_trie_root() { let mut db = MemoryDB::default(); let mut root = TrieHash::::default(); - let mut empty = TrieDBMut::::new(&mut db, &mut root); + let mut empty = TrieDBMutBuilder::::new(&mut db, &mut root).build(); empty.commit(); let root1 = empty.root().as_ref().to_vec(); let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) @@ -695,15 +710,12 @@ mod tests { check_input(&input); } - fn populate_trie<'db, T>( + fn populate_trie<'db, T: TrieConfiguration>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], - ) -> TrieDBMut<'db, T> - where - T: TrieConfiguration, - { - let mut t = TrieDBMut::::new(db, root); + ) -> TrieDBMut<'db, T> { + let mut t = TrieDBMutBuilder::::new(db, root).build(); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -841,7 +853,7 @@ mod tests { let mut root = Default::default(); let _ = populate_trie::(&mut mdb, &mut root, &pairs); - let trie = TrieDB::::new(&mdb, &root).unwrap(); + let trie = TrieDBBuilder::::new(&mdb, &root).build(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -954,12 +966,16 @@ mod tests { &mut proof_db.clone(), storage_root, valid_delta, + None, + None, ) .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, + None, + None, ) .unwrap(); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index bd0ba27483e66..4b3e69adb7041 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -25,7 +25,7 @@ use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; use trie_db::{ nibble_ops, node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, - ChildReference, NodeCodec as NodeCodecT, Partial, + ChildReference, NodeCodec as NodeCodecT, }; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while @@ -85,7 +85,7 @@ where H: Hasher, { const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); - type Error = Error; + type Error = Error; type HashOut = H::Out; fn hashed_null_node() -> ::Out { @@ -185,19 +185,19 @@ where &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: Value) -> Vec { + fn leaf_node(partial: impl Iterator, number_nibble: usize, value: Value) -> Vec { let contains_hash = matches!(&value, Value::Node(..)); let mut output = if contains_hash { - partial_encode(partial, NodeKind::HashedValueLeaf) + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueLeaf) } else { - partial_encode(partial, NodeKind::Leaf) + partial_from_iterator_encode(partial, number_nibble, NodeKind::Leaf) }; match value { Value::Inline(value) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::Node(hash, _) => { + Value::Node(hash) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -244,7 +244,7 @@ where Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Some(Value::Node(hash, _)) => { + Some(Value::Node(hash)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -295,31 +295,6 @@ fn partial_from_iterator_encode>( output } -/// Encode and allocate node type header (type and size), and partial value. -/// Same as `partial_from_iterator_encode` but uses non encoded `Partial` as input. -fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { - let number_nibble_encoded = (partial.0).0 as usize; - let nibble_count = partial.1.len() * nibble_ops::NIBBLE_PER_BYTE + number_nibble_encoded; - - let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - - let mut output = Vec::with_capacity(4 + partial.1.len()); - match node_kind { - NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), - NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), - NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::HashedValueLeaf => - NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), - NodeKind::HashedValueBranch => - NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), - }; - if number_nibble_encoded > 0 { - output.push(nibble_ops::pad_right((partial.0).1)); - } - output.extend_from_slice(partial.1); - output -} - const BITMAP_LENGTH: usize = 2; /// Radix 16 trie, bitmap encoding implementation, @@ -329,7 +304,7 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(mut data: &[u8]) -> Result { + pub fn decode(mut data: &[u8]) -> Result { Ok(Bitmap(u16::decode(&mut data)?)) } diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs new file mode 100644 index 0000000000000..5599ad1c36904 --- /dev/null +++ b/primitives/trie/src/recorder.rs @@ -0,0 +1,284 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trie recorder +//! +//! Provides an implementation of the [`TrieRecorder`](trie_db::TrieRecorder) trait. It can be used +//! to record storage accesses to the state to generate a [`StorageProof`]. + +use crate::{NodeCodec, StorageProof}; +use codec::Encode; +use hash_db::Hasher; +use parking_lot::Mutex; +use std::{ + collections::HashMap, + marker::PhantomData, + mem, + ops::DerefMut, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; +use trie_db::{RecordedForKey, TrieAccess}; + +const LOG_TARGET: &str = "trie-recorder"; + +/// The internals of [`Recorder`]. +struct RecorderInner { + /// The keys for that we have recorded the trie nodes and if we have recorded up to the value. + recorded_keys: HashMap, RecordedForKey>, + /// The encoded nodes we accessed while recording. + accessed_nodes: HashMap>, +} + +impl Default for RecorderInner { + fn default() -> Self { + Self { recorded_keys: Default::default(), accessed_nodes: Default::default() } + } +} + +/// The trie recorder. +/// +/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. +pub struct Recorder { + inner: Arc>>, + /// The estimated encoded size of the storage proof this recorder will produce. + /// + /// We store this in an atomic to be able to fetch the value while the `inner` is may locked. + encoded_size_estimation: Arc, +} + +impl Default for Recorder { + fn default() -> Self { + Self { inner: Default::default(), encoded_size_estimation: Arc::new(0.into()) } + } +} + +impl Clone for Recorder { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + encoded_size_estimation: self.encoded_size_estimation.clone(), + } + } +} + +impl Recorder { + /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. + pub fn as_trie_recorder(&self) -> impl trie_db::TrieRecorder + '_ { + TrieRecorder:: { + inner: self.inner.lock(), + encoded_size_estimation: self.encoded_size_estimation.clone(), + _phantom: PhantomData, + } + } + + /// Drain the recording into a [`StorageProof`]. + /// + /// While a recorder can be cloned, all share the same internal state. After calling this + /// function, all other instances will have their internal state reset as well. + /// + /// If you don't want to drain the recorded state, use [`Self::to_storage_proof`]. + /// + /// Returns the [`StorageProof`]. + pub fn drain_storage_proof(self) -> StorageProof { + let mut recorder = mem::take(&mut *self.inner.lock()); + StorageProof::new(recorder.accessed_nodes.drain().map(|(_, v)| v)) + } + + /// Convert the recording to a [`StorageProof`]. + /// + /// In contrast to [`Self::drain_storage_proof`] this doesn't consumes and doesn't clears the + /// recordings. + /// + /// Returns the [`StorageProof`]. + pub fn to_storage_proof(&self) -> StorageProof { + let recorder = self.inner.lock(); + StorageProof::new(recorder.accessed_nodes.iter().map(|(_, v)| v.clone())) + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is based on all the nodes that were accessed until now while + /// accessing the trie. + pub fn estimate_encoded_size(&self) -> usize { + self.encoded_size_estimation.load(Ordering::Relaxed) + } + + /// Reset the state. + /// + /// This discards all recorded data. + pub fn reset(&self) { + mem::take(&mut *self.inner.lock()); + self.encoded_size_estimation.store(0, Ordering::Relaxed); + } +} + +/// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. +struct TrieRecorder { + inner: I, + encoded_size_estimation: Arc, + _phantom: PhantomData, +} + +impl>> trie_db::TrieRecorder + for TrieRecorder +{ + fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) { + let mut encoded_size_update = 0; + + match access { + TrieAccess::NodeOwned { hash, node_owned } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + "Recording node", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let node = node_owned.to_encoded::>(); + + encoded_size_update += node.encoded_size(); + + node + }); + }, + TrieAccess::EncodedNode { hash, encoded_node } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + "Recording node", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let node = encoded_node.into_owned(); + + encoded_size_update += node.encoded_size(); + + node + }); + }, + TrieAccess::Value { hash, value, full_key } => { + tracing::trace!( + target: LOG_TARGET, + hash = ?hash, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recording value", + ); + + self.inner.accessed_nodes.entry(hash).or_insert_with(|| { + let value = value.into_owned(); + + encoded_size_update += value.encoded_size(); + + value + }); + + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert(RecordedForKey::Value); + }, + TrieAccess::Hash { full_key } => { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recorded hash access for key", + ); + + // We don't need to update the `encoded_size_update` as the hash was already + // accounted for by the recorded node that holds the hash. + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .or_insert(RecordedForKey::Hash); + }, + TrieAccess::NonExisting { full_key } => { + tracing::trace!( + target: LOG_TARGET, + key = ?sp_core::hexdisplay::HexDisplay::from(&full_key), + "Recorded non-existing value access for key", + ); + + // Non-existing access means we recorded all trie nodes up to the value. + // Not the actual value, as it doesn't exist, but all trie nodes to know + // that the value doesn't exist in the trie. + self.inner + .recorded_keys + .entry(full_key.to_vec()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert(RecordedForKey::Value); + }, + }; + + self.encoded_size_estimation.fetch_add(encoded_size_update, Ordering::Relaxed); + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + self.inner.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) + } +} + +#[cfg(test)] +mod tests { + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut}; + + type MemoryDB = crate::MemoryDB; + type Layout = crate::LayoutV1; + type Recorder = super::Recorder; + + const TEST_DATA: &[(&[u8], &[u8])] = + &[(b"key1", b"val1"), (b"key2", b"val2"), (b"key3", b"val3"), (b"key4", b"val4")]; + + fn create_trie() -> (MemoryDB, TrieHash) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root) + } + + #[test] + fn recorder_works() { + let (db, root) = create_trie(); + + let recorder = Recorder::default(); + + { + let mut trie_recorder = recorder.as_trie_recorder(); + let trie = TrieDBBuilder::::new(&db, &root) + .with_recorder(&mut trie_recorder) + .build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } + + let storage_proof = recorder.drain_storage_proof(); + let memory_db: MemoryDB = storage_proof.into_memory_db(); + + // Check that we recorded the required data + let trie = TrieDBBuilder::::new(&memory_db, &root).build(); + assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap()); + } +} diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f6139584dbbad..8fdb04ee20ed0 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -88,7 +88,7 @@ impl StorageProof { pub fn into_compact_proof( self, root: H::Out, - ) -> Result> { + ) -> Result>> { crate::encode_compact::>(self, root) } @@ -130,7 +130,7 @@ impl CompactProof { pub fn to_storage_proof( &self, expected_root: Option<&H::Out>, - ) -> Result<(StorageProof, H::Out), crate::CompactProofError> { + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, @@ -157,7 +157,8 @@ impl CompactProof { pub fn to_memory_db( &self, expected_root: Option<&H::Out>, - ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError> { + ) -> Result<(crate::MemoryDB, H::Out), crate::CompactProofError>> + { let mut db = crate::MemoryDB::::new(&[]); let root = crate::decode_compact::, _, _>( &mut db, diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index d29f5a98f31b9..949f9a6e284eb 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -78,7 +78,7 @@ where let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDB::::new(db, &top_root)?; + let trie = crate::TrieDBBuilder::::new(db, &top_root).build(); let mut iter = trie.iter()?; @@ -159,7 +159,7 @@ where let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); let mut compact_proof = { - let trie = crate::TrieDB::::new(&partial_db, &root)?; + let trie = crate::TrieDBBuilder::::new(&partial_db, &root).build(); let mut iter = trie.iter()?; @@ -197,7 +197,7 @@ where continue } - let trie = crate::TrieDB::::new(&partial_db, &child_root)?; + let trie = crate::TrieDBBuilder::::new(&partial_db, &child_root).build(); let child_proof = trie_db::encode_compact::(&trie)?; compact_proof.extend(child_proof); diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 1c2707b3719ad..6cea6282f5bd8 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -41,7 +41,7 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "6.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.23.1", default-features = false } +trie-db = { version = "0.24.0", default-features = false } parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.12.0", default-features = false, path = "../../primitives/state-machine" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e5cfae49da56d..aa436f1ad2a91 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,7 +29,10 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; -use sp_trie::{trie_types::TrieDB, PrefixedMemoryDB, StorageProof}; +use sp_trie::{ + trie_types::{TrieDBBuilder, TrieDBMutBuilderV1}, + PrefixedMemoryDB, StorageProof, +}; use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; @@ -59,8 +62,6 @@ use sp_runtime::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; -// bench on latest state. -use sp_trie::trie_types::TrieDBMutV1 as TrieDBMut; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; @@ -663,25 +664,19 @@ fn code_using_trie() -> u64 { let mut mdb = PrefixedMemoryDB::default(); let mut root = sp_std::default::Default::default(); - let _ = { - let mut t = TrieDBMut::::new(&mut mdb, &mut root); + { + let mut t = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); for (key, value) in &pairs { if t.insert(key, value).is_err() { return 101 } } - t - }; - - if let Ok(trie) = TrieDB::::new(&mdb, &root) { - if let Ok(iter) = trie.iter() { - iter.flatten().count() as u64 - } else { - 102 - } - } else { - 103 } + + let trie = TrieDBBuilder::::new(&mdb, &root).build(); + let res = if let Ok(iter) = trie.iter() { iter.flatten().count() as u64 } else { 102 }; + + res } impl_opaque_keys! { @@ -1277,7 +1272,7 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( @@ -1354,7 +1349,8 @@ mod tests { let mut root = crate::Hash::default(); let mut mdb = sp_trie::MemoryDB::::default(); { - let mut trie = sp_trie::trie_types::TrieDBMutV1::new(&mut mdb, &mut root); + let mut trie = + sp_trie::trie_types::TrieDBMutBuilderV1::new(&mut mdb, &mut root).build(); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); }; @@ -1364,7 +1360,8 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); + let backend = + sp_state_machine::TrieBackendBuilder::<_, crate::Hashing>::new(db, root).build(); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 96c7a997895d4..6e4f092084ef5 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -93,9 +93,9 @@ impl CliConfiguration for BenchmarkCmd { } } - fn state_cache_size(&self) -> Result { + fn trie_cache_maximum_size(&self) -> Result> { unwrap_cmd! { - self, cmd, cmd.state_cache_size() + self, cmd, cmd.trie_cache_maximum_size() } } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 0a92636b151a2..de5e189b40db0 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -96,9 +96,11 @@ pub struct StorageParams { #[clap(long, possible_values = ["0", "1"])] pub state_version: u8, - /// State cache size. - #[clap(long, default_value = "0")] - pub state_cache_size: usize, + /// Trie cache size in bytes. + /// + /// Providing `0` will disable the cache. + #[clap(long, default_value = "1024")] + pub trie_cache_size: usize, /// Include child trees in benchmark. #[clap(long)] @@ -211,7 +213,11 @@ impl CliConfiguration for StorageCmd { Some(&self.pruning_params) } - fn state_cache_size(&self) -> Result { - Ok(self.params.state_cache_size) + fn trie_cache_maximum_size(&self) -> Result> { + if self.params.trie_cache_size == 0 { + Ok(None) + } else { + Ok(Some(self.params.trie_cache_size)) + } } } diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 0d10ef1b7414d..9a3821a7095f8 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -17,7 +17,7 @@ use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; -use sc_client_db::{DbHash, DbState}; +use sc_client_db::{DbHash, DbState, DbStateBuilder}; use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; @@ -60,7 +60,7 @@ impl StorageCmd { let block = BlockId::Number(client.usage_info().chain.best_number); let header = client.header(block)?.ok_or("Header not found")?; let original_root = *header.state_root(); - let trie = DbState::::new(storage.clone(), original_root); + let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); info!("Preparing keys from block {}", block); // Load all KV pairs and randomly shuffle them. diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 00fdc87a506e8..d45e502df276c 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -23,7 +23,7 @@ sp-io = { path = "../../../../primitives/io" } sp-core = { path = "../../../../primitives/core" } sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } -trie-db = { version = "0.23.1" } +trie-db = "0.24.0" jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index b6d403ff2fcfd..f9a57206ece4d 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -31,8 +31,11 @@ use sp_core::{ storage::{ChildInfo, ChildType, PrefixedStorageKey}, Hasher, }; -use sp_state_machine::Backend; -use sp_trie::{trie_types::TrieDB, KeySpacedDB, Trie}; +use sp_state_machine::backend::AsTrieBackend; +use sp_trie::{ + trie_types::{TrieDB, TrieDBBuilder}, + KeySpacedDB, Trie, +}; use trie_db::{ node::{NodePlan, ValuePlan}, TrieDBNodeIterator, @@ -41,9 +44,9 @@ use trie_db::{ fn count_migrate<'a, H: Hasher>( storage: &'a dyn trie_db::HashDBRef>, root: &'a H::Out, -) -> std::result::Result<(u64, TrieDB<'a, H>), String> { +) -> std::result::Result<(u64, TrieDB<'a, 'a, H>), String> { let mut nb = 0u64; - let trie = TrieDB::new(storage, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let trie = TrieDBBuilder::new(storage, root).build(); let iter_node = TrieDBNodeIterator::new(&trie).map_err(|e| format!("TrieDB node iterator error: {}", e))?; for node in iter_node { @@ -68,13 +71,9 @@ pub fn migration_status(backend: &B) -> std::result::Result<(u64, u64), St where H: Hasher, H::Out: codec::Codec, - B: Backend, + B: AsTrieBackend, { - let trie_backend = if let Some(backend) = backend.as_trie_backend() { - backend - } else { - return Err("No access to trie from backend.".to_string()) - }; + let trie_backend = backend.as_trie_backend(); let essence = trie_backend.essence(); let (nb_to_migrate, trie) = count_migrate(essence, essence.root())?; diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index f77f92c625c9d..be7ec923a55bc 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -293,7 +293,7 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, DeserializeOwned, }; -use sp_state_machine::{InMemoryProvingBackend, OverlayedChanges, StateMachine}; +use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; use std::{fmt::Debug, path::PathBuf, str::FromStr}; mod commands; @@ -746,9 +746,11 @@ pub(crate) fn state_machine_call_with_proof(Into::into)?; - let proof = proving_backend.extract_proof(); + let proof = proving_backend + .extract_proof() + .expect("A recorder was set and thus, a storage proof can be extracted; qed"); let proof_size = proof.encoded_size(); let compact_proof = proof .clone() From a357c29ebabb075235977edd5e3901c66575f995 Mon Sep 17 00:00:00 2001 From: Jake Edwards Date: Fri, 19 Aug 2022 03:03:48 -0500 Subject: [PATCH 047/348] Fix broken link to web3 foundation research (#12066) --- frame/staking/reward-curve/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index e66f6fde37599..e1ea8aa7b15d5 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -28,7 +28,7 @@ use syn::parse::{Parse, ParseStream}; /// Accepts a number of expressions to create a instance of PiecewiseLinear which represents the /// NPoS curve (as detailed -/// [here](https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model)) +/// [here](https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model)) /// for those parameters. Parameters are: /// - `min_inflation`: the minimal amount to be rewarded between validators, expressed as a fraction /// of total issuance. Known as `I_0` in the literature. Expressed in millionth, must be between 0 From 90d0247012bfb801b52eafb5c1e592b65ad972bf Mon Sep 17 00:00:00 2001 From: yjh Date: Fri, 19 Aug 2022 16:50:12 +0800 Subject: [PATCH 048/348] chore(executor): make allow_missing_host_functions could be configurable (#12015) * make `with_instance` pub * make `allow_missing_host_functions` could be configurable * add method `allow_missing_host_functions` * update usage * update usage * fix comments * fix `with_instance` --- .../manual-seal/src/consensus/timestamp.rs | 4 +- client/executor/src/native_executor.rs | 37 ++++++++++++------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/client/consensus/manual-seal/src/consensus/timestamp.rs b/client/consensus/manual-seal/src/consensus/timestamp.rs index e7f4e709ab996..70b5e5de4ec6c 100644 --- a/client/consensus/manual-seal/src/consensus/timestamp.rs +++ b/client/consensus/manual-seal/src/consensus/timestamp.rs @@ -46,10 +46,10 @@ use std::{ /// This works by either fetching the `slot_number` from the most recent header and dividing /// that value by `slot_duration` in order to fork chains that expect this inherent. /// -/// It produces timestamp inherents that are increaed by `slot_duraation` whenever +/// It produces timestamp inherents that are increased by `slot_duration` whenever /// `provide_inherent_data` is called. pub struct SlotTimestampProvider { - // holds the unix millisecnd timestamp for the most recent block + // holds the unix millisecond timestamp for the most recent block unix_millis: atomic::AtomicU64, // configured slot_duration in the runtime slot_duration: SlotDuration, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 4d8f559d700b5..7610c4c8f32e0 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -101,7 +101,8 @@ pub struct WasmExecutor { /// The path to a directory which the executor can leverage for a file cache, e.g. put there /// compiled artifacts. cache_path: Option, - + /// Ignore missing function imports. + allow_missing_host_functions: bool, phantom: PhantomData, } @@ -112,6 +113,7 @@ impl Clone for WasmExecutor { default_heap_pages: self.default_heap_pages, cache: self.cache.clone(), cache_path: self.cache_path.clone(), + allow_missing_host_functions: self.allow_missing_host_functions, phantom: self.phantom, } } @@ -153,10 +155,16 @@ where runtime_cache_size, )), cache_path, + allow_missing_host_functions: false, phantom: PhantomData, } } + /// Ignore missing function imports if set true. + pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { + self.allow_missing_host_functions = allow_missing_host_functions + } + /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). /// /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code @@ -170,11 +178,10 @@ where /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already /// implicitly handled as unwind safe, as we store it in a global variable while executing the /// native runtime. - fn with_instance( + pub fn with_instance( &self, runtime_code: &RuntimeCode, ext: &mut dyn Externalities, - allow_missing_host_functions: bool, f: F, ) -> Result where @@ -190,7 +197,7 @@ where ext, self.method, self.default_heap_pages, - allow_missing_host_functions, + self.allow_missing_host_functions, |module, instance, version, ext| { let module = AssertUnwindSafe(module); let instance = AssertUnwindSafe(instance); @@ -353,7 +360,6 @@ where let result = self.with_instance( runtime_code, ext, - false, |module, mut instance, _onchain_version, mut ext| { with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); @@ -374,7 +380,7 @@ where ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + self.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) }) } @@ -387,7 +393,7 @@ where D: NativeExecutionDispatch, { /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: std::marker::PhantomData, + _dummy: PhantomData, /// Native runtime version info. native_version: NativeVersion, /// Fallback wasm executor. @@ -414,7 +420,7 @@ impl NativeElseWasmExecutor { max_runtime_instances: usize, runtime_cache_size: u8, ) -> Self { - let wasm_executor = WasmExecutor::new( + let wasm = WasmExecutor::new( fallback_method, default_heap_pages, max_runtime_instances, @@ -425,9 +431,14 @@ impl NativeElseWasmExecutor { NativeElseWasmExecutor { _dummy: Default::default(), native_version: D::native_version(), - wasm: wasm_executor, + wasm, } } + + /// Ignore missing function imports if set true. + pub fn allow_missing_host_functions(&mut self, allow_missing_host_functions: bool) { + self.wasm.allow_missing_host_functions = allow_missing_host_functions + } } impl RuntimeVersionOf for NativeElseWasmExecutor { @@ -436,10 +447,9 @@ impl RuntimeVersionOf for NativeElseWasmExecutor ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.wasm - .with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) - }) + self.wasm.with_instance(runtime_code, ext, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) } } @@ -606,7 +616,6 @@ impl CodeExecutor for NativeElseWasmExecut let result = self.wasm.with_instance( runtime_code, ext, - false, |module, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; From c76c40ac94c1e66c38af6fcf2dba18e401b125b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 19 Aug 2022 13:07:14 +0200 Subject: [PATCH 049/348] Fix benchmarks and adds CI to test them (#12068) * Fix benchmarks and adds CI to test them Instead of waiting for benchmarks failing in Polkadot CI, we also can just test them in Substrate :P * Do not overflow --- client/allocator/src/freeing_bump.rs | 4 ++-- frame/democracy/src/benchmarking.rs | 4 +++- frame/identity/src/benchmarking.rs | 6 ++++-- frame/offences/benchmarking/src/lib.rs | 2 +- frame/proxy/src/benchmarking.rs | 10 +++++++--- scripts/ci/gitlab/pipeline/test.yml | 18 ++++++++++++++++++ .../benchmarking-cli/src/pallet/command.rs | 5 +++++ 7 files changed, 40 insertions(+), 9 deletions(-) diff --git a/client/allocator/src/freeing_bump.rs b/client/allocator/src/freeing_bump.rs index 13dc6dca0dcd6..e81d1b79e74ed 100644 --- a/client/allocator/src/freeing_bump.rs +++ b/client/allocator/src/freeing_bump.rs @@ -331,7 +331,7 @@ pub struct AllocationStats { /// The sum of every allocation ever made. /// /// This increases every time a new allocation is made. - pub bytes_allocated_sum: u32, + pub bytes_allocated_sum: u128, /// The amount of address space (in bytes) used by the allocator. /// @@ -435,7 +435,7 @@ impl FreeingBumpHeapAllocator { Header::Occupied(order).write_into(mem, header_ptr)?; self.stats.bytes_allocated += order.size() + HEADER_SIZE; - self.stats.bytes_allocated_sum += order.size() + HEADER_SIZE; + self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE); self.stats.bytes_allocated_peak = std::cmp::max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated); self.stats.address_space_used = self.bumper - self.original_heap_base; diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 9b275043df43e..d5db82de3840d 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -43,7 +43,9 @@ fn assert_last_event(generic_event: ::Event) { fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + // Give the account half of the maximum value of the `Balance` type. + // Otherwise some transfers will fail with an overflow error. + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); caller } diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 20af41d392089..5d409f48bf567 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -120,7 +120,8 @@ benchmarks! { let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); let origin = T::RegistrarOrigin::successful_origin(); - }: _(origin, account("registrar", r + 1, SEED)) + let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); + }: _(origin, account) verify { ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); } @@ -287,7 +288,8 @@ benchmarks! { Identity::::add_registrar(registrar_origin, caller_lookup)?; let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == caller, "id not set."); - }: _(RawOrigin::Signed(caller), r, account("new", 0, SEED)) + let new_account = T::Lookup::unlookup(account("new", 0, SEED)); + }: _(RawOrigin::Signed(caller), r, new_account) verify { let registrars = Registrars::::get(); ensure!(registrars[r as usize].as_ref().unwrap().account == account("new", 0, SEED), "id not changed."); diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index b793bd8d2699a..5d81a71d4c47c 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -305,7 +305,7 @@ benchmarks! { verify { let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); let slash_amount = slash_fraction * bond_amount; - let reward_amount = slash_amount * (1 + n) / 2; + let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index adaaebb0adc98..c01a8da000c09 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -35,9 +35,11 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), let caller = maybe_who.unwrap_or_else(whitelisted_caller); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value() / 2u32.into()); for i in 0..n { + let real = T::Lookup::unlookup(account("target", i, SEED)); + Proxy::::add_proxy( RawOrigin::Signed(caller.clone()).into(), - account("target", i, SEED), + real, T::ProxyType::default(), T::BlockNumber::zero(), )?; @@ -180,9 +182,10 @@ benchmarks! { add_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); + let real = T::Lookup::unlookup(account("target", T::MaxProxies::get(), SEED)); }: _( RawOrigin::Signed(caller.clone()), - account("target", T::MaxProxies::get(), SEED), + real, T::ProxyType::default(), T::BlockNumber::zero() ) @@ -194,9 +197,10 @@ benchmarks! { remove_proxy { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); + let delegate = T::Lookup::unlookup(account("target", 0, SEED)); }: _( RawOrigin::Signed(caller.clone()), - account("target", 0, SEED), + delegate, T::ProxyType::default(), T::BlockNumber::zero() ) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index a58015c6e22d6..acd2cb1fa7855 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -277,6 +277,24 @@ test-linux-stable-extra: - time cargo test --doc --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path ./bin/node/cli/Cargo.toml - rusty-cachier cache upload +# This job runs all benchmarks defined in the `/bin/node/runtime` once to check that there are no errors. +quick-benchmarks: + stage: test + extends: + - .docker-env + - .test-refs + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + script: + - rusty-cachier snapshot create + - time cargo run --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - rusty-cachier cache upload + test-frame-examples-compile-to-wasm: # into one job stage: test diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index fb6f1393650ad..0fc7cc4d783f7 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -228,6 +228,11 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); for (pallet, extrinsic, components) in benchmarks_to_run { + log::info!( + "Starting benchmark: {}::{}", + String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), + String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), + ); let all_components = if components.is_empty() { vec![Default::default()] } else { From b7d2cb5c2dbfbc49cb3c06f6198daa60b04f16c0 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 19 Aug 2022 15:15:57 +0200 Subject: [PATCH 050/348] Revert "Auto-incremental CollectionId (#11796)" (#12059) This reverts commit 7d8e5a1e3979cb0fc61123d5cfeec974dfa25334. Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 4 +- frame/uniques/src/benchmarking.rs | 25 ++--- frame/uniques/src/functions.rs | 15 --- frame/uniques/src/lib.rs | 59 ++--------- frame/uniques/src/tests.rs | 83 +++++---------- frame/uniques/src/weights.rs | 169 +++++++++++++----------------- 6 files changed, 121 insertions(+), 234 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1579c887b8fed..060cd960ee985 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5043,9 +5043,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.15" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", "libm", diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 8aa1512883448..3e3148b5b5fc2 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -42,10 +42,13 @@ fn create_collection, I: 'static>( let caller_lookup = T::Lookup::unlookup(caller.clone()); let collection = T::Helper::collection(0); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - assert!( - Uniques::::force_create(SystemOrigin::Root.into(), caller_lookup.clone(), false,) - .is_ok() - ); + assert!(Uniques::::force_create( + SystemOrigin::Root.into(), + collection, + caller_lookup.clone(), + false, + ) + .is_ok()); (collection, caller, caller_lookup) } @@ -139,7 +142,7 @@ benchmarks_instance_pallet! { whitelist_account!(caller); let admin = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - let call = Call::::create { admin }; + let call = Call::::create { collection, admin }; }: { call.dispatch_bypass_filter(origin)? } verify { assert_last_event::(Event::Created { collection: T::Helper::collection(0), creator: caller.clone(), owner: caller }.into()); @@ -148,7 +151,7 @@ benchmarks_instance_pallet! { force_create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - }: _(SystemOrigin::Root, caller_lookup, true) + }: _(SystemOrigin::Root, T::Helper::collection(0), caller_lookup, true) verify { assert_last_event::(Event::ForceCreated { collection: T::Helper::collection(0), owner: caller }.into()); } @@ -404,16 +407,6 @@ benchmarks_instance_pallet! { }.into()); } - try_increment_id { - let (_, caller, _) = create_collection::(); - Uniques::::set_next_id(0); - }: _(SystemOrigin::Signed(caller.clone())) - verify { - assert_last_event::(Event::NextCollectionIdIncremented { - next_id: 1u32.into() - }.into()); - } - set_price { let (collection, caller, _) = create_collection::(); let (item, ..) = mint_item::(0); diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index e9169bdd963b4..107214558307f 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -88,12 +88,7 @@ impl, I: 'static> Pallet { }, ); - let next_id = collection.saturating_add(1u32.into()); - CollectionAccount::::insert(&owner, &collection, ()); - NextCollectionId::::set(next_id); - - Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); Self::deposit_event(event); Ok(()) } @@ -213,16 +208,6 @@ impl, I: 'static> Pallet { Ok(()) } - #[cfg(any(test, feature = "runtime-benchmarks"))] - pub fn set_next_id(count: u32) { - NextCollectionId::::set(count.into()); - } - - #[cfg(test)] - pub fn get_next_id() -> T::CollectionId { - NextCollectionId::::get() - } - pub fn do_set_price( collection: T::CollectionId, item: T::ItemId, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index de5a774d16061..70f10ca4f8b39 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -51,7 +51,7 @@ use frame_support::{ }; use frame_system::Config as SystemConfig; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Saturating, StaticLookup, Zero}, + traits::{Saturating, StaticLookup, Zero}, ArithmeticError, RuntimeDebug, }; use sp_std::prelude::*; @@ -94,12 +94,7 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// Identifier for the collection of item. - type CollectionId: Member - + Parameter - + MaxEncodedLen - + Copy - + Default - + AtLeast32BitUnsigned; + type CollectionId: Member + Parameter + MaxEncodedLen + Copy; /// The type used to identify a unique item within a collection. type ItemId: Member + Parameter + MaxEncodedLen + Copy; @@ -273,12 +268,6 @@ pub mod pallet { pub(super) type CollectionMaxSupply, I: 'static = ()> = StorageMap<_, Blake2_128Concat, T::CollectionId, u32, OptionQuery>; - #[pallet::storage] - /// Stores the `CollectionId` that is going to be used for the next collection. - /// This gets incremented by 1 whenever a new collection is created. - pub(super) type NextCollectionId, I: 'static = ()> = - StorageValue<_, T::CollectionId, ValueQuery>; - #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -370,8 +359,6 @@ pub mod pallet { OwnershipAcceptanceChanged { who: T::AccountId, maybe_collection: Option }, /// Max supply has been set for a collection. CollectionMaxSupplySet { collection: T::CollectionId, max_supply: u32 }, - /// Event gets emmited when the `NextCollectionId` gets incremented. - NextCollectionIdIncremented { next_id: T::CollectionId }, /// The price was set for the instance. ItemPriceSet { collection: T::CollectionId, @@ -423,10 +410,6 @@ pub mod pallet { MaxSupplyAlreadySet, /// The provided max supply is less to the amount of items a collection already has. MaxSupplyTooSmall, - /// The `CollectionId` in `NextCollectionId` is not being used. - /// - /// This means that you can directly proceed to call `create`. - NextIdNotUsed, /// The given item ID is unknown. UnknownItem, /// Item is not for sale. @@ -458,6 +441,7 @@ pub mod pallet { /// `ItemDeposit` funds of sender are reserved. /// /// Parameters: + /// - `collection`: The identifier of the new collection. This must not be currently in use. /// - `admin`: The admin of this collection. The admin is the initial address of each /// member of the collection's admin team. /// @@ -465,9 +449,11 @@ pub mod pallet { /// /// Weight: `O(1)` #[pallet::weight(T::WeightInfo::create())] - pub fn create(origin: OriginFor, admin: AccountIdLookupOf) -> DispatchResult { - let collection = NextCollectionId::::get(); - + pub fn create( + origin: OriginFor, + collection: T::CollectionId, + admin: AccountIdLookupOf, + ) -> DispatchResult { let owner = T::CreateOrigin::ensure_origin(origin, &collection)?; let admin = T::Lookup::lookup(admin)?; @@ -489,6 +475,7 @@ pub mod pallet { /// /// Unlike `create`, no funds are reserved. /// + /// - `collection`: The identifier of the new item. This must not be currently in use. /// - `owner`: The owner of this collection of items. The owner has full superuser /// permissions /// over this item, but may later change and configure the permissions using @@ -500,14 +487,13 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::force_create())] pub fn force_create( origin: OriginFor, + collection: T::CollectionId, owner: AccountIdLookupOf, free_holding: bool, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; - let collection = NextCollectionId::::get(); - Self::do_create_collection( collection, owner.clone(), @@ -518,31 +504,6 @@ pub mod pallet { ) } - /// Increments the `CollectionId` stored in `NextCollectionId`. - /// - /// This is only callable when the next `CollectionId` is already being - /// used for some other collection. - /// - /// The origin must be Signed and the sender must have sufficient funds - /// free. - /// - /// Emits `NextCollectionIdIncremented` event when successful. - /// - /// Weight: `O(1)` - #[pallet::weight(T::WeightInfo::try_increment_id())] - pub fn try_increment_id(origin: OriginFor) -> DispatchResult { - ensure_signed(origin)?; - ensure!( - Collection::::contains_key(NextCollectionId::::get()), - Error::::NextIdNotUsed - ); - - let next_id = NextCollectionId::::get().saturating_add(1u32.into()); - NextCollectionId::::set(next_id); - Self::deposit_event(Event::NextCollectionIdIncremented { next_id }); - Ok(()) - } - /// Destroy a collection of fungible items. /// /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index bd3a2b032945e..8b1d00d7ba0c7 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -92,12 +92,12 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Uniques::force_create(Origin::root(), 2, true)); + assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); @@ -108,7 +108,7 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 1)); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); @@ -151,7 +151,7 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 1)); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); @@ -162,7 +162,7 @@ fn destroy_with_bad_witness_should_not_work() { #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_eq!(Uniques::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); @@ -173,7 +173,7 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); @@ -188,7 +188,7 @@ fn transfer_should_work() { #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); @@ -205,7 +205,7 @@ fn freezing_should_work() { #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); @@ -230,7 +230,7 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Uniques::create(Origin::signed(1), 1)); + assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( Uniques::transfer_ownership(Origin::signed(1), 0, 2), @@ -275,7 +275,7 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); @@ -294,7 +294,7 @@ fn set_collection_metadata_should_work() { Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); // Cannot add metadata to unowned item assert_noop!( Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), @@ -354,7 +354,7 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( @@ -410,7 +410,7 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -455,7 +455,7 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); @@ -487,7 +487,7 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); @@ -521,7 +521,7 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 1, false)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); assert_noop!( @@ -545,7 +545,7 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); @@ -560,7 +560,7 @@ fn approval_lifecycle_works() { #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -592,7 +592,7 @@ fn cancel_approval_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -620,7 +620,7 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); @@ -653,7 +653,7 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Uniques::set_collection_max_supply( @@ -695,48 +695,15 @@ fn max_supply_should_work() { }); } -#[test] -fn try_increment_id_works() { - new_test_ext().execute_with(|| { - // should fail because the next `CollectionId` is not being used. - assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); - - // create two collections & check for events. - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); - assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); - assert_ok!(Uniques::force_create(Origin::root(), 1, true)); - assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 2 })); - - // there are now two collections. - assert_eq!(Uniques::get_next_id(), 2); - - // reset the collections counter to test if the `try_increment_id` - // works. - Uniques::set_next_id(0); - assert_ok!(Uniques::try_increment_id(Origin::signed(2))); - - // `try_increment_id` should emit an event when successful. - assert!(events().contains(&Event::::NextCollectionIdIncremented { next_id: 1 })); - - // because reset, the collections count should be now 1 - assert_eq!(Uniques::get_next_id(), 1); - - // increment the collections count again. - assert_ok!(Uniques::try_increment_id(Origin::signed(2))); - // should fail because the next `CollectionId` is not being used. - assert_noop!(Uniques::try_increment_id(Origin::signed(2)), Error::::NextIdNotUsed); - }); -} - #[test] fn set_price_should_work() { new_test_ext().execute_with(|| { let user_id = 1; - let collection_id: u32 = 0; + let collection_id = 0; let item_1 = 1; let item_2 = 2; - assert_ok!(Uniques::force_create(Origin::root(), user_id, true)); + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); @@ -788,7 +755,7 @@ fn buy_item_should_work() { let user_1 = 1; let user_2 = 2; let user_3 = 3; - let collection_id: u32 = 0; + let collection_id = 0; let item_1 = 1; let item_2 = 2; let item_3 = 3; @@ -800,7 +767,7 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Uniques::force_create(Origin::root(), user_1, true)); + assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 75bb89f26b428..7c8cb170b1b1d 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-07-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// target/production/substrate // benchmark // pallet // --steps=50 @@ -70,7 +70,6 @@ pub trait WeightInfo { fn cancel_approval() -> Weight; fn set_accept_ownership() -> Weight; fn set_collection_max_supply() -> Weight; - fn try_increment_id() -> Weight; fn set_price() -> Weight; fn buy_item() -> Weight; } @@ -78,21 +77,19 @@ pub trait WeightInfo { /// Weights for pallet_uniques using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (30_481_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (33_075_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_811_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + (19_528_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -107,12 +104,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 17_000 - .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 17_000 - .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 17_000 - .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) @@ -125,7 +122,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_980_000 as Weight) + (42_146_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -134,7 +131,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (38_771_000 as Weight) + (42_960_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -143,7 +140,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (29_914_000 as Weight) + (33_025_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -152,8 +149,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -162,26 +159,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (22_425_000 as Weight) + (25_194_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (23_011_000 as Weight) + (25_397_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (17_718_000 as Weight) + (19_278_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (17_619_000 as Weight) + (19_304_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -189,20 +186,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_869_000 as Weight) + (28_615_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (18_058_000 as Weight) + (19_943_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_720_000 as Weight) + (22_583_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -210,7 +207,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_808_000 as Weight) + (47_520_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -218,76 +215,69 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_866_000 as Weight) + (45_316_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (34_767_000 as Weight) + (38_391_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_910_000 as Weight) + (38_023_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (33_827_000 as Weight) + (37_398_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_998_000 as Weight) + (35_621_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (23_607_000 as Weight) + (25_856_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (23_341_000 as Weight) + (26_098_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (21_969_000 as Weight) + (24_076_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (20_355_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - // Storage: Uniques NextCollectionId (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) - fn try_increment_id() -> Weight { - (19_233_000 as Weight) + (22_035_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (20_733_000 as Weight) + (22_534_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -296,7 +286,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (40_798_000 as Weight) + (45_272_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } @@ -304,21 +294,19 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (30_481_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + (33_075_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - // Storage: Uniques NextCollectionId (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_811_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + (19_528_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -333,12 +321,12 @@ impl WeightInfo for () { /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { (0 as Weight) - // Standard Error: 17_000 - .saturating_add((10_950_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 17_000 - .saturating_add((1_657_000 as Weight).saturating_mul(m as Weight)) - // Standard Error: 17_000 - .saturating_add((1_512_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 25_000 + .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 25_000 + .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 25_000 + .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) @@ -351,7 +339,7 @@ impl WeightInfo for () { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (36_980_000 as Weight) + (42_146_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -360,7 +348,7 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (38_771_000 as Weight) + (42_960_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -369,7 +357,7 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (29_914_000 as Weight) + (33_025_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } @@ -378,8 +366,8 @@ impl WeightInfo for () { /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((12_759_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 24_000 + .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -388,26 +376,26 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (22_425_000 as Weight) + (25_194_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (23_011_000 as Weight) + (25_397_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (17_718_000 as Weight) + (19_278_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (17_619_000 as Weight) + (19_304_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -415,20 +403,20 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (25_869_000 as Weight) + (28_615_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (18_058_000 as Weight) + (19_943_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (20_720_000 as Weight) + (22_583_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -436,7 +424,7 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (41_808_000 as Weight) + (47_520_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -444,76 +432,69 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (39_866_000 as Weight) + (45_316_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (34_767_000 as Weight) + (38_391_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (33_910_000 as Weight) + (38_023_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (33_827_000 as Weight) + (37_398_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (31_998_000 as Weight) + (35_621_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (23_607_000 as Weight) + (25_856_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (23_341_000 as Weight) + (26_098_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (21_969_000 as Weight) + (24_076_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (20_355_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - // Storage: Uniques NextCollectionId (r:1 w:1) - // Storage: Uniques Class (r:1 w:0) - fn try_increment_id() -> Weight { - (19_233_000 as Weight) + (22_035_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (20_733_000 as Weight) + (22_534_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -522,7 +503,7 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (40_798_000 as Weight) + (45_272_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } From bd2a876e4c0e79b6e2ab6b0a92c038304a64b20f Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 19 Aug 2022 18:08:31 +0100 Subject: [PATCH 051/348] make weightinfo pub (#12074) --- frame/whitelist/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 239f0fd280160..5fb4e3aeb9ba6 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -38,6 +38,7 @@ mod mock; #[cfg(test)] mod tests; pub mod weights; +pub use weights::WeightInfo; use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ @@ -54,7 +55,6 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; - use crate::weights::WeightInfo; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; From 28f0cb18c96fb82f810f1d9be69fb609fbb7217e Mon Sep 17 00:00:00 2001 From: Quentin McGaw Date: Sat, 20 Aug 2022 15:40:43 -0400 Subject: [PATCH 052/348] Fix comment typo `SS85` -> `SS58` (#12076) --- frame/system/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 12d415e4e2b42..990e3cdd14de1 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -333,7 +333,7 @@ pub mod pallet { type SystemWeightInfo: WeightInfo; - /// The designated SS85 prefix of this chain. + /// The designated SS58 prefix of this chain. /// /// This replaces the "ss58Format" property declared in the chain spec. Reason is /// that the runtime should know about the prefix in order to make use of it as From 1498d867ffc4182d7d82298171d9831e9bbc1d25 Mon Sep 17 00:00:00 2001 From: Sasha Gryaznov Date: Mon, 22 Aug 2022 16:06:43 +0300 Subject: [PATCH 053/348] [contracts] `define_env!` re-write as a proc macro (#11888) * define_env proc macro basics + can_satisfy part ready * expand_impls part done * fix of the &FunctionType bug * pallet is compiled * updated host fn definition syntax * docs comments allowed to host fn definitions * all 53 host funcs re-defined by the new macro * unstable feat fix * cleanup * legacy mbe macros cleaned up * Added Env ident to macro attribute; all tests pass! * \#[v(..)] -> \#[version(..)] * some tiny corrections * save * builds with non-magic rt; tests fail * tests pass * refactored errors + added docs * merge err fixed * fixes on @ascjones review, all except moving away from `pub mod env` syntax * debug printing cleared * clippy fix --- frame/contracts/proc-macro/src/lib.rs | 406 +++- frame/contracts/src/wasm/env_def/macros.rs | 396 ---- frame/contracts/src/wasm/env_def/mod.rs | 3 - frame/contracts/src/wasm/prepare.rs | 27 +- frame/contracts/src/wasm/runtime.rs | 2006 +++++++++++--------- 5 files changed, 1523 insertions(+), 1315 deletions(-) delete mode 100644 frame/contracts/src/wasm/env_def/macros.rs diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index dca29c805cec4..65c13bb1fc607 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -15,16 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Proc macros used in the contracts module. +//! Procedural macroses used in the contracts module. +//! +//! Most likely you should use the [`#[define_env]`][`macro@define_env`] attribute macro which hides +//! boilerplate of defining external environment for a wasm module. #![no_std] extern crate alloc; -use alloc::string::ToString; +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; use proc_macro2::TokenStream; -use quote::{quote, quote_spanned}; -use syn::{parse_macro_input, Data, DeriveInput, Ident}; +use quote::{quote, quote_spanned, ToTokens}; +use syn::{parse_macro_input, spanned::Spanned, Data, DeriveInput, Ident}; /// This derives `Debug` for a struct where each field must be of some numeric type. /// It interprets each field as its represents some weight and formats it as times so that @@ -85,7 +92,7 @@ fn derive_debug( /// This is only used then the `full` feature is activated. #[cfg(feature = "full")] fn iterate_fields(data: &syn::DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { - use syn::{spanned::Spanned, Fields}; + use syn::Fields; match &data.fields { Fields::Named(fields) => { @@ -140,3 +147,392 @@ fn format_default(field: &Ident) -> TokenStream { &self.#field } } + +/// Parsed environment definition. +struct EnvDef { + host_funcs: Vec, +} + +/// Parsed host function definition. +struct HostFn { + item: syn::ItemFn, + module: String, + name: String, + returns: HostFnReturn, +} + +enum HostFnReturn { + Unit, + U32, + ReturnCode, +} + +impl ToTokens for HostFn { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.item.to_tokens(tokens); + } +} + +impl HostFn { + pub fn try_from(item: syn::Item) -> syn::Result { + let err = |span, msg| { + let msg = format!("Invalid host function definition. {}", msg); + syn::Error::new(span, msg) + }; + let msg = "only #[version()] or #[unstable] attribute is allowed."; + let span = item.span(); + let item = match item { + syn::Item::Fn(i_fn) => Ok(i_fn), + _ => Err(err(span, msg)), + }?; + + let name = item.sig.ident.to_string(); + let attrs: Vec<&syn::Attribute> = + item.attrs.iter().filter(|m| !m.path.is_ident("doc")).collect(); + + let module = match attrs.len() { + 0 => Ok("seal0".to_string()), + 1 => { + let attr = &attrs[0]; + let ident = attr.path.get_ident().ok_or(err(span, msg))?.to_string(); + match ident.as_str() { + "version" => { + let ver: syn::LitInt = attr.parse_args()?; + Ok(format!("seal{}", ver.base10_parse::().map_err(|_| err(span, msg))?)) + }, + "unstable" => Ok("__unstable__".to_string()), + _ => Err(err(span, msg)), + } + }, + _ => Err(err(span, msg)), + }?; + + let msg = r#"Should return one of the following: + - Result<(), TrapReason>, + - Result, + - Result"#; + + let ret_ty = match item.clone().sig.output { + syn::ReturnType::Type(_, ty) => Ok(ty.clone()), + _ => Err(err(span, &msg)), + }?; + + match *ret_ty { + syn::Type::Path(tp) => { + let result = &tp.path.segments.last().ok_or(err(span, &msg))?; + let (id, span) = (result.ident.to_string(), result.ident.span()); + id.eq(&"Result".to_string()).then_some(()).ok_or(err(span, &msg))?; + + match &result.arguments { + syn::PathArguments::AngleBracketed(group) => { + if group.args.len() != 2 { + return Err(err(span, &msg)) + }; + + let arg2 = group.args.last().ok_or(err(span, &msg))?; + + let err_ty = match arg2 { + syn::GenericArgument::Type(ty) => Ok(ty.clone()), + _ => Err(err(arg2.span(), &msg)), + }?; + + match err_ty { + syn::Type::Path(tp) => Ok(tp + .path + .segments + .first() + .ok_or(err(arg2.span(), &msg))? + .ident + .to_string()), + _ => Err(err(tp.span(), &msg)), + }? + .eq("TrapReason") + .then_some(()) + .ok_or(err(span, &msg))?; + + let arg1 = group.args.first().ok_or(err(span, &msg))?; + let ok_ty = match arg1 { + syn::GenericArgument::Type(ty) => Ok(ty.clone()), + _ => Err(err(arg1.span(), &msg)), + }?; + let ok_ty_str = match ok_ty { + syn::Type::Path(tp) => Ok(tp + .path + .segments + .first() + .ok_or(err(arg1.span(), &msg))? + .ident + .to_string()), + syn::Type::Tuple(tt) => { + if !tt.elems.is_empty() { + return Err(err(arg1.span(), &msg)) + }; + Ok("()".to_string()) + }, + _ => Err(err(ok_ty.span(), &msg)), + }?; + + let returns = match ok_ty_str.as_str() { + "()" => Ok(HostFnReturn::Unit), + "u32" => Ok(HostFnReturn::U32), + "ReturnCode" => Ok(HostFnReturn::ReturnCode), + _ => Err(err(arg1.span(), &msg)), + }?; + Ok(Self { item, module, name, returns }) + }, + _ => Err(err(span, &msg)), + } + }, + _ => Err(err(span, &msg)), + } + } + + fn to_wasm_sig(&self) -> TokenStream { + let args = self.item.sig.inputs.iter().skip(1).filter_map(|a| match a { + syn::FnArg::Typed(pt) => Some(&pt.ty), + _ => None, + }); + let returns = match &self.returns { + HostFnReturn::U32 => quote! { vec![ ::VALUE_TYPE ] }, + HostFnReturn::ReturnCode => quote! { vec![ ::VALUE_TYPE ] }, + HostFnReturn::Unit => quote! { vec![] }, + }; + + quote! { + wasm_instrument::parity_wasm::elements::FunctionType::new( + vec! [ #(<#args>::VALUE_TYPE),* ], + #returns, + ) + } + } +} +impl EnvDef { + pub fn try_from(item: syn::ItemMod) -> syn::Result { + let span = item.span(); + let err = |msg| syn::Error::new(span, msg); + let items = &item + .content + .as_ref() + .ok_or(err("Invalid environment definition, expected `mod` to be inlined."))? + .1; + + let host_funcs = items + .iter() + .map(|i| HostFn::try_from(i.clone())) + .collect::, _>>()?; + + Ok(Self { host_funcs }) + } +} + +/// Expands environment definiton. +/// Should generate source code for: +/// - wasm import satisfy checks (see `expand_can_satisfy()`); +/// - implementations of the host functions to be added to the wasm runtime environment (see +/// `expand_impls()`). +fn expand_env(def: &mut EnvDef) -> proc_macro2::TokenStream { + let can_satisfy = expand_can_satisfy(def); + let impls = expand_impls(def); + + quote! { + pub struct Env; + #can_satisfy + #impls + } +} + +/// Generates `can_satisfy()` method for every host function, to be used to check +/// these functions versus expected module, name and signatures when imporing them from a wasm +/// module. +fn expand_can_satisfy(def: &mut EnvDef) -> proc_macro2::TokenStream { + let checks = def.host_funcs.iter().map(|f| { + let (module, name, signature) = (&f.module, &f.name, &f.to_wasm_sig()); + quote! { + if module == #module.as_bytes() + && name == #name.as_bytes() + && signature == &#signature + { + return true; + } + } + }); + let satisfy_checks = quote! { + #( #checks )* + }; + + quote! { + impl crate::wasm::env_def::ImportSatisfyCheck for Env { + fn can_satisfy( + module: &[u8], + name: &[u8], + signature: &wasm_instrument::parity_wasm::elements::FunctionType, + ) -> bool { + use crate::wasm::env_def::ConvertibleToWasm; + #[cfg(not(feature = "unstable-interface"))] + if module == b"__unstable__" { + return false; + } + #satisfy_checks + return false; + } + } + } +} + +/// Generates implementation for every host function, to register it in the contract execution +/// environment. +fn expand_impls(def: &mut EnvDef) -> proc_macro2::TokenStream { + let impls = def.host_funcs.iter().map(|f| { + let params = &f.item.sig.inputs.iter().skip(1).map(|arg| { + match arg { + syn::FnArg::Typed(pt) => { + if let syn::Pat::Ident(ident) = &*pt.pat { + let p_type = &pt.ty; + let p_name = ident.ident.clone(); + quote! { + let #p_name : <#p_type as crate::wasm::env_def::ConvertibleToWasm>::NativeType = + args.next() + .and_then(|v| <#p_type as crate::wasm::env_def::ConvertibleToWasm>::from_typed_value(v.clone())) + .expect( + "precondition: all imports should be checked against the signatures of corresponding + functions defined by `#[define_env]` proc macro by the user of the macro; + thus this can never be `None`; + qed;" + ); + } + } else { quote! { } } + }, + _ => quote! { }, + } + }); + + let outline = match &f.returns { + HostFnReturn::Unit => quote! { + body().map_err(|reason| { + ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Unit); + }, + _ => quote! { + let r = body().map_err(|reason| { + ctx.set_trap_reason(reason); + sp_sandbox::HostError + })?; + return Ok(sp_sandbox::ReturnValue::Value({ + r.to_typed_value() + })); + }, + }; + let params = params.clone(); + let (module, name, ident, body) = (&f.module, &f.name, &f.item.sig.ident, &f.item.block); + let unstable_feat = match module.as_str() { + "__unstable__" => quote! { #[cfg(feature = "unstable-interface")] }, + _ => quote! { }, + }; + quote! { + #unstable_feat + f(#module.as_bytes(), #name.as_bytes(), { + fn #ident( + ctx: &mut crate::wasm::Runtime, + args: &[sp_sandbox::Value], + ) -> Result + where + ::AccountId: sp_core::crypto::UncheckedFrom<::Hash> + + AsRef<[u8]>, + { + #[allow(unused)] + let mut args = args.iter(); + let mut body = || { + #( #params )* + #body + }; + #outline + } + #ident:: + }); + } + }); + + let packed_impls = quote! { + #( #impls )* + }; + + quote! { + impl crate::wasm::env_def::FunctionImplProvider for Env + where + ::AccountId: + sp_core::crypto::UncheckedFrom<::Hash> + AsRef<[u8]>, + { + fn impls)>(f: &mut F) { + #packed_impls + } + } + } +} + +/// Defines a host functions set that can be imported by contract wasm code. +/// +/// **NB**: Be advised that all functions defined by this macro +/// will panic if called with unexpected arguments. +/// +/// It's up to you as the user of this macro to check signatures of wasm code to be executed +/// and reject the code if any imported function has a mismatched signature. +/// +/// ## Example +/// +/// ```nocompile +/// #[define_env] +/// pub mod some_env { +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result<(), TrapReason> { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// } +/// ``` +/// This example will expand to the `some_host_fn()` defined in the wasm module named `seal0`. +/// To define a host function in `seal1` and `__unstable__` modules, it should be annotated with the +/// appropriate attribute as follows: +/// +/// ## Example +/// +/// ```nocompile +/// #[define_env] +/// pub mod some_env { +/// #[version(1)] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// +/// #[unstable] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// } +/// ``` +/// +/// Only following return types are allowed for the host functions defined with the macro: +/// - `Result<(), TrapReason>`, +/// - `Result`, +/// - `Result`. +/// +/// The macro expands to `pub struct Env` declaration, with the following traits implementations: +/// - `pallet_contracts::wasm::env_def::ImportSatisfyCheck` +/// - `pallet_contracts::wasm::env_def::FunctionImplProvider` +#[proc_macro_attribute] +pub fn define_env( + attr: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + if !attr.is_empty() { + let msg = "Invalid `define_env` attribute macro: expected no attributes: `#[define_env]`."; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } + + let item = syn::parse_macro_input!(item as syn::ItemMod); + + match EnvDef::try_from(item) { + Ok(mut def) => expand_env(&mut def).into(), + Err(e) => e.to_compile_error().into(), + } +} diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs deleted file mode 100644 index aa5a1626681f4..0000000000000 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ /dev/null @@ -1,396 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Definition of macros that hides boilerplate of defining external environment -//! for a wasm module. -//! -//! Most likely you should use `define_env` macro. - -macro_rules! convert_args { - () => (vec![]); - ( $( $t:ty ),* ) => ( vec![ $( { use $crate::wasm::env_def::ConvertibleToWasm; <$t>::VALUE_TYPE }, )* ] ); -} - -macro_rules! gen_signature { - ( ( $( $params: ty ),* ) ) => ( - { - wasm_instrument::parity_wasm::elements::FunctionType::new( - convert_args!($($params),*), vec![], - ) - } - ); - - ( ( $( $params: ty ),* ) -> $returns: ty ) => ( - { - wasm_instrument::parity_wasm::elements::FunctionType::new( - convert_args!($($params),*), - vec![{use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE}], - ) - } - ); -} - -macro_rules! gen_signature_dispatch { - ( - $needle_module:ident, - $needle_name:ident, - $needle_sig:ident ; - $module:ident, - $name:ident - ( $ctx:ident $( , $names:ident : $params:ty )* ) $( -> $returns:ty )* , $($rest:tt)* - ) => { - let module = stringify!($module).as_bytes(); - if module == $needle_module && stringify!($name).as_bytes() == $needle_name { - let signature = gen_signature!( ( $( $params ),* ) $( -> $returns )* ); - if $needle_sig == &signature { - return true; - } - } else { - gen_signature_dispatch!($needle_module, $needle_name, $needle_sig ; $($rest)*); - } - }; - ( $needle_module:ident, $needle_name:ident, $needle_sig:ident ; ) => {}; -} - -/// Unmarshall arguments and then execute `body` expression and return its result. -macro_rules! unmarshall_then_body { - ( $body:tt, $ctx:ident, $args_iter:ident, $( $names:ident : $params:ty ),* ) => ({ - $( - let $names : <$params as $crate::wasm::env_def::ConvertibleToWasm>::NativeType = - $args_iter.next() - .and_then(|v| <$params as $crate::wasm::env_def::ConvertibleToWasm> - ::from_typed_value(v.clone())) - .expect( - "precondition: all imports should be checked against the signatures of corresponding - functions defined by `define_env!` macro by the user of the macro; - signatures of these functions defined by `$params`; - calls always made with arguments types of which are defined by the corresponding imports; - thus types of arguments should be equal to type list in `$params` and - length of argument list and $params should be equal; - thus this can never be `None`; - qed; - " - ); - )* - $body - }) -} - -/// Since we can't specify the type of closure directly at binding site: -/// -/// ```nocompile -/// let f: FnOnce() -> Result<::NativeType, _> = || { /* ... */ }; -/// ``` -/// -/// we use this function to constrain the type of the closure. -#[inline(always)] -pub fn constrain_closure(f: F) -> F -where - F: FnOnce() -> Result, -{ - f -} - -macro_rules! unmarshall_then_body_then_marshall { - ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ - let body = $crate::wasm::env_def::macros::constrain_closure::< - <$returns as $crate::wasm::env_def::ConvertibleToWasm>::NativeType, _ - >(|| { - unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) - }); - let r = body().map_err(|reason| { - $ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Value({ use $crate::wasm::env_def::ConvertibleToWasm; r.to_typed_value() })) - }); - ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ - let body = $crate::wasm::env_def::macros::constrain_closure::<(), _>(|| { - unmarshall_then_body!($body, $ctx, $args_iter, $( $names : $params ),*) - }); - body().map_err(|reason| { - $ctx.set_trap_reason(reason); - sp_sandbox::HostError - })?; - return Ok(sp_sandbox::ReturnValue::Unit) - }) -} - -macro_rules! define_func { - ( $trait:tt $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { - fn $name< E: $trait >( - $ctx: &mut $crate::wasm::Runtime, - args: &[sp_sandbox::Value], - ) -> Result - where - ::AccountId: - sp_core::crypto::UncheckedFrom<::Hash> + - AsRef<[u8]> - { - #[allow(unused)] - let mut args = args.iter(); - - unmarshall_then_body_then_marshall!( - args, - $ctx, - ( $( $names : $params ),* ) $( -> $returns )* => $body - ) - } - }; -} - -macro_rules! register_body { - ( $reg_cb:ident, $trait:tt; - $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt - ) => { - $reg_cb( - stringify!($module).as_bytes(), - stringify!($name).as_bytes(), - { - define_func!( - $trait $name ( $ctx $(, $names : $params )* ) $( -> $returns )* => $body - ); - $name:: - } - ); - } -} - -macro_rules! register_func { - ( $reg_cb:ident, $trait:tt; ) => {}; - - ( $reg_cb:ident, $trait:tt; - __unstable__ $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt $($rest:tt)* - ) => { - #[cfg(feature = "unstable-interface")] - register_body!( - $reg_cb, $trait; - __unstable__ $name - ( $ctx $( , $names : $params )* ) - $( -> $returns )* => $body - ); - register_func!( $reg_cb, $trait; $($rest)* ); - }; - - ( $reg_cb:ident, $trait:tt; - $module:ident $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt $($rest:tt)* - ) => { - register_body!( - $reg_cb, $trait; - $module $name - ( $ctx $( , $names : $params )* ) - $( -> $returns )* => $body - ); - register_func!( $reg_cb, $trait; $($rest)* ); - }; -} - -/// Define a function set that can be imported by executing wasm code. -/// -/// **NB**: Be advised that all functions defined by this macro -/// will panic if called with unexpected arguments. -/// -/// It's up to the user of this macro to check signatures of wasm code to be executed -/// and reject the code if any imported function has a mismatched signature. -macro_rules! define_env { - ( $init_name:ident , < E: $trait:tt > , - $( [$module:ident] $name:ident ( $ctx:ident $( , $names:ident : $params:ty )* ) - $( -> $returns:ty )* => $body:tt , )* - ) => { - pub struct $init_name; - - impl $crate::wasm::env_def::ImportSatisfyCheck for $init_name { - fn can_satisfy( - module: &[u8], - name: &[u8], - func_type: &wasm_instrument::parity_wasm::elements::FunctionType, - ) -> bool - { - #[cfg(not(feature = "unstable-interface"))] - if module == b"__unstable__" { - return false; - } - gen_signature_dispatch!( - module, name, func_type ; - $( $module, $name ( $ctx $(, $names : $params )* ) $( -> $returns )* , )* - ); - - return false; - } - } - - impl $crate::wasm::env_def::FunctionImplProvider for $init_name - where - ::AccountId: - sp_core::crypto::UncheckedFrom<::Hash> + - AsRef<[u8]> - { - fn impls)>(f: &mut F) { - register_func!( - f, - $trait; - $( $module $name ( $ctx $( , $names : $params )* ) $( -> $returns)* => $body )* - ); - } - } - }; -} - -#[cfg(test)] -mod tests { - use crate::{ - exec::Ext, - wasm::{runtime::TrapReason, tests::MockExt, Runtime}, - Weight, - }; - use sp_runtime::traits::Zero; - use sp_sandbox::{ReturnValue, Value}; - use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; - - struct TestRuntime { - value: u32, - } - - impl TestRuntime { - fn set_trap_reason(&mut self, _reason: TrapReason) {} - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_value_or_trap() { - fn test_value( - _ctx: &mut TestRuntime, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - _ctx, - (a: u32, b: u32) -> u32 => { - if b == 0 { - Err(crate::wasm::runtime::TrapReason::Termination) - } else { - Ok(a / b) - } - } - ) - } - - let ctx = &mut TestRuntime { value: 0 }; - assert_eq!( - test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), - ReturnValue::Value(Value::I32(5)), - ); - assert!(test_value(ctx, &[Value::I32(15), Value::I32(0)]).is_err()); - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_unit() { - fn test_unit( - ctx: &mut TestRuntime, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - ctx, - (a: u32, b: u32) => { - ctx.value = a + b; - Ok(()) - } - ) - } - - let ctx = &mut TestRuntime { value: 0 }; - let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); - assert_eq!(result, ReturnValue::Unit); - assert_eq!(ctx.value, 5); - } - - #[test] - fn macro_define_func() { - define_func!( Ext seal_gas (_ctx, amount: u32) => { - let amount = Weight::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(TrapReason::Termination) - } - }); - let _f: fn( - &mut Runtime, - &[sp_sandbox::Value], - ) -> Result = seal_gas::; - } - - #[test] - fn macro_gen_signature() { - assert_eq!(gen_signature!((i32)), FunctionType::new(vec![ValueType::I32], vec![])); - - assert_eq!( - gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], vec![ValueType::I32]), - ); - } - - #[test] - fn macro_unmarshall_then_body() { - let args = vec![Value::I32(5), Value::I32(3)]; - let mut args = args.iter(); - - let ctx: &mut u32 = &mut 0; - - let r = unmarshall_then_body!( - { - *ctx = a + b; - a * b - }, - ctx, - args, - a: u32, - b: u32 - ); - - assert_eq!(*ctx, 8); - assert_eq!(r, 15); - } - - #[test] - fn macro_define_env() { - use crate::wasm::env_def::ImportSatisfyCheck; - - define_env!(Env, , - [seal0] seal_gas( _ctx, amount: u32 ) => { - let amount = Weight::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(crate::wasm::runtime::TrapReason::Termination) - } - }, - ); - - assert!(Env::can_satisfy( - b"seal0", - b"seal_gas", - &FunctionType::new(vec![ValueType::I32], vec![]) - )); - assert!(!Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![]))); - } -} diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index c904445f8aea8..be6e688c97868 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -21,9 +21,6 @@ use crate::exec::Ext; use sp_sandbox::Value; use wasm_instrument::parity_wasm::elements::{FunctionType, ValueType}; -#[macro_use] -pub mod macros; - pub trait ConvertibleToWasm: Sized { const VALUE_TYPE: ValueType; type NativeType; diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index aec5dc2db611d..7b81c1c55b3bd 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -517,6 +517,7 @@ mod tests { schedule::Limits, tests::{Test, ALICE}, }; + use pallet_contracts_proc_macro::define_env; use std::fmt; impl fmt::Debug for PrefabWasmModule { @@ -532,17 +533,27 @@ mod tests { // Define test environment for tests. We need ImportSatisfyCheck // implementation from it. So actual implementations doesn't matter. - define_env!(Test, , - [seal0] panic(_ctx) => { unreachable!(); }, + #[define_env] + pub mod test_env { + fn panic(_ctx: crate::wasm::Runtime) -> Result<(), TrapReason> { + Ok(()) + } // gas is an implementation defined function and a contract can't import it. - [seal0] gas(_ctx, _amount: u32) => { unreachable!(); }, + fn gas(_ctx: crate::wasm::Runtime, _amount: u32) -> Result<(), TrapReason> { + Ok(()) + } - [seal0] nop(_ctx, _unused: u64) => { unreachable!(); }, + fn nop(_ctx: crate::wasm::Runtime, _unused: u64) -> Result<(), TrapReason> { + Ok(()) + } - // new version of nop with other data type for argument - [seal1] nop(_ctx, _unused: i32) => { unreachable!(); }, - ); + // new version of nop with other data type for argumebt + #[version(1)] + fn nop(_ctx: crate::wasm::Runtime, _unused: i32) -> Result<(), TrapReason> { + Ok(()) + } + } } macro_rules! prepare_test { @@ -561,7 +572,7 @@ mod tests { }, .. Default::default() }; - let r = do_preparation::(wasm, &schedule, ALICE); + let r = do_preparation::(wasm, &schedule, ALICE); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); } }; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index f21dc7c76e44c..1d5478a5277cd 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -29,6 +29,7 @@ use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; +use pallet_contracts_proc_macro::define_env; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; @@ -947,183 +948,235 @@ where // Any input that leads to a out of bound error (reading or writing) or failing to decode // data passed to the supervisor will lead to a trap. This is not documented explicitly // for every function. -define_env!(Env, , - // Account for used gas. Traps if gas used is greater than gas limit. - // - // NOTE: This is a implementation defined call and is NOT a part of the public API. - // This call is supposed to be called only by instrumentation injected code. - // - // - amount: How much gas is used. - [seal0] gas(ctx, amount: u32) => { +#[define_env] +pub mod env { + /// Account for used gas. Traps if gas used is greater than gas limit. + /// + /// NOTE: This is a implementation defined call and is NOT a part of the public API. + /// This call is supposed to be called only by instrumentation injected code. + /// + /// - amount: How much gas is used. + fn gas(ctx: Runtime, amount: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) - }, + } - // Set the value at the given key in the contract storage. - // - // Equivalent to the newer version of `seal_set_storage` with the exception of the return - // type. Still a valid thing to call when not interested in the return value. - [seal0] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) => { + /// Set the value at the given key in the contract storage. + /// + /// Equivalent to the newer version of `seal_set_storage` with the exception of the return + /// type. Still a valid thing to call when not interested in the return value. + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + value_ptr: u32, + value_len: u32, + ) -> Result<(), TrapReason> { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) - }, + } - // Set the value at the given key in the contract storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // The value length must not exceed the maximum defined by the contracts module parameters. - // Specifying a `value_len` of zero will store an empty value. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - // - `value_ptr`: pointer into the linear memory where the value to set is placed. - // - `value_len`: the length of the value in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [seal1] seal_set_storage(ctx, key_ptr: u32, value_ptr: u32, value_len: u32) -> u32 => { + /// Set the value at the given key in the contract storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// The value length must not exceed the maximum defined by the contracts module parameters. + /// Specifying a `value_len` of zero will store an empty value. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + /// - `value_ptr`: pointer into the linear memory where the value to set is placed. + /// - `value_len`: the length of the value in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[version(1)] + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { ctx.set_storage(KeyType::Fix, key_ptr, value_ptr, value_len) - }, + } - // Set the value at the given key in the contract storage. - // - // The key and value lengths must not exceed the maximums defined by the contracts module parameters. - // Specifying a `value_len` of zero will store an empty value. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - // - `key_len`: the length of the key in bytes. - // - `value_ptr`: pointer into the linear memory where the value to set is placed. - // - `value_len`: the length of the value in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_set_storage(ctx, key_ptr: u32, key_len: u32, value_ptr: u32, value_len: u32) -> u32 => { + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. Specifying a `value_len` of zero will store an empty value. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `value_ptr`: pointer into the linear memory where the value to set is placed. + /// - `value_len`: the length of the value in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_set_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + value_ptr: u32, + value_len: u32, + ) -> Result { ctx.set_storage(KeyType::Variable(key_len), key_ptr, value_ptr, value_len) - }, + } - // Clear the value at the given key in the contract storage. - // - // Equivalent to the newer version of `seal_clear_storage` with the exception of the return - // type. Still a valid thing to call when not interested in the return value. - [seal0] seal_clear_storage(ctx, key_ptr: u32) => { + /// Clear the value at the given key in the contract storage. + /// + /// Equivalent to the newer version of `seal_clear_storage` with the exception of the return + /// type. Still a valid thing to call when not interested in the return value. + fn seal_clear_storage(ctx: Runtime, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(KeyType::Fix, key_ptr).map(|_| ()) - }, + } - // Clear the value at the given key in the contract storage. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key is placed. - // - `key_len`: the length of the key in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_clear_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + /// Clear the value at the given key in the contract storage. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key is placed. + /// - `key_len`: the length of the key in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_clear_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + ) -> Result { ctx.clear_storage(KeyType::Variable(key_len), key_ptr) - }, + } - // Retrieve the value under the given key from storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [seal0] seal_get_storage(ctx, key_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + fn seal_get_storage( + ctx: Runtime, + key_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.get_storage(KeyType::Fix, key_ptr, out_ptr, out_len_ptr) - }, + } - // Retrieve the value under the given key from storage. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // The key length must not exceed the maximum defined by the contracts module parameter. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [__unstable__] seal_get_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + #[unstable] + fn seal_get_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.get_storage(KeyType::Variable(key_len), key_ptr, out_ptr, out_len_ptr) - }, + } - // Checks whether there is a value stored under the given key. - // - // This version is to be used with a fixed sized storage key. For runtimes supporting transparent - // hashing, please use the newer version of this function. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [seal0] seal_contains_storage(ctx, key_ptr: u32) -> u32 => { + /// Checks whether there is a value stored under the given key. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + fn seal_contains_storage(ctx: Runtime, key_ptr: u32) -> Result { ctx.contains_storage(KeyType::Fix, key_ptr) - }, + } - // Checks whether there is a value stored under the given key. - // - // The key length must not exceed the maximum defined by the contracts module parameter. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - // # Return Value - // - // Returns the size of the pre-existing value at the specified key if any. Otherwise - // `SENTINEL` is returned as a sentinel value. - [__unstable__] seal_contains_storage(ctx, key_ptr: u32, key_len: u32) -> u32 => { + /// Checks whether there is a value stored under the given key. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// + /// # Return Value + /// + /// Returns the size of the pre-existing value at the specified key if any. Otherwise + /// `SENTINEL` is returned as a sentinel value. + #[unstable] + fn seal_contains_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + ) -> Result { ctx.contains_storage(KeyType::Variable(key_len), key_ptr) - }, + } - // Retrieve and remove the value under the given key from storage. - // - // # Parameters - // - // - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - // - `key_len`: the length of the key in bytes. - // - `out_ptr`: pointer to the linear memory where the value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [__unstable__] seal_take_storage(ctx, key_ptr: u32, key_len: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve and remove the value under the given key from storage. + /// + /// # Parameters + /// + /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. + /// - `key_len`: the length of the key in bytes. + /// - `out_ptr`: pointer to the linear memory where the value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + #[unstable] + fn seal_take_storage( + ctx: Runtime, + key_ptr: u32, + key_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; let key = ctx.read_sandbox_memory(key_ptr, key_len)?; - if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent(&VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, None, true)? { + if let crate::storage::WriteOutcome::Taken(value) = ctx.ext.set_storage_transparent( + &VarSizedKey::::try_from(key).map_err(|_| Error::::DecodingFailed)?, + None, + true, + )? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(out_ptr, out_len_ptr, &value, false, already_charged)?; Ok(ReturnCode::Success) @@ -1131,59 +1184,56 @@ define_env!(Env, , ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(0)); Ok(ReturnCode::KeyNotFound) } - }, - - // Transfer some value to another account. - // - // # Parameters - // - // - account_ptr: a pointer to the address of the beneficiary account - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - account_len: length of the address buffer. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - value_len: length of the value buffer. - // - // # Errors - // - // `ReturnCode::TransferFailed` - [seal0] seal_transfer( - ctx, + } + /// Transfer some value to another account. + /// + /// # Parameters + /// + /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - account_len: length of the address buffer. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - value_len: length of the value buffer. + /// + /// # Errors + /// + /// `ReturnCode::TransferFailed` + fn seal_transfer( + ctx: Runtime, account_ptr: u32, _account_len: u32, value_ptr: u32, - _value_len: u32 - ) -> ReturnCode => { + _value_len: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; - let value: BalanceOf<::T> = - ctx.read_sandbox_memory_as(value_ptr)?; - + let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr)?; let result = ctx.ext.transfer(&callee, value); match result { Ok(()) => Ok(ReturnCode::Success), Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) - } + }, } - }, + } - // Make a call to another contract. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function with - // `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. - // - // # Note - // - // The values `_callee_len` and `_value_len` are ignored because the encoded sizes - // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_call( - ctx, + /// Make a call to another contract. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function with + /// `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. + /// + /// # Note + /// + /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes + /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_call( + ctx: Runtime, callee_ptr: u32, _callee_len: u32, gas: u64, @@ -1192,49 +1242,50 @@ define_env!(Env, , input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::ALLOW_REENTRY, - CallType::Call{callee_ptr, value_ptr, gas}, + CallType::Call { callee_ptr, value_ptr, gas }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, + } - // Make a call to another contract. - // - // The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. - // The copy of the output buffer can be skipped by supplying the sentinel value - // of `SENTINEL` to `output_ptr`. - // - // # Parameters - // - // - flags: See [`CallFlags`] for a documentation of the supported flags. - // - callee_ptr: a pointer to the address of the callee contract. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - gas: how much gas to devote to the execution. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - // - input_data_len: length of the input data buffer. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - // # Errors - // - // An error means that the call wasn't successful output buffer is returned unless - // stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::TransferFailed` - // `ReturnCode::NotCallable` - [seal1] seal_call( - ctx, + /// Make a call to another contract. + /// + /// The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. + /// The copy of the output buffer can be skipped by supplying the sentinel value + /// of `SENTINEL` to `output_ptr`. + /// + /// # Parameters + /// + /// - flags: See [`CallFlags`] for a documenation of the supported flags. + /// - callee_ptr: a pointer to the address of the callee contract. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - gas: how much gas to devote to the execution. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + /// - input_data_len: length of the input data buffer. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// + /// # Errors + /// + /// An error means that the call wasn't successful output buffer is returned unless + /// stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::TransferFailed` + /// `ReturnCode::NotCallable` + #[version(1)] + fn seal_call( + ctx: Runtime, flags: u32, callee_ptr: u32, gas: u64, @@ -1242,75 +1293,74 @@ define_env!(Env, , input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::Call{callee_ptr, value_ptr, gas}, + CallType::Call { callee_ptr, value_ptr, gas }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, + } - // Execute code in the context (storage, caller, value) of the current contract. - // - // Reentrancy protection is always disabled since the callee is allowed - // to modify the callers storage. This makes going through a reentrancy attack - // unnecessary for the callee when it wants to exploit the caller. - // - // # Parameters - // - // - flags: See [`CallFlags`] for a documentation of the supported flags. - // - code_hash: a pointer to the hash of the code to be called. - // - input_data_ptr: a pointer to a buffer to be used as input data to the callee. - // - input_data_len: length of the input data buffer. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - // # Errors - // - // An error means that the call wasn't successful and no output buffer is returned unless - // stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::CodeNotFound` - [seal0] seal_delegate_call( - ctx, + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - flags: See [`CallFlags`] for a documentation of the supported flags. + /// - code_hash: a pointer to the hash of the code to be called. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the callee. + /// - input_data_len: length of the input data buffer. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::CodeNotFound` + fn seal_delegate_call( + ctx: Runtime, flags: u32, code_hash_ptr: u32, input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> ReturnCode => { + output_len_ptr: u32, + ) -> Result { ctx.call( CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, - CallType::DelegateCall{code_hash_ptr}, + CallType::DelegateCall { code_hash_ptr }, input_data_ptr, input_data_len, output_ptr, output_len_ptr, ) - }, - - // Instantiate a contract with the specified code hash. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function. The newer version - // drops the now unnecessary length fields. - // - // # Note - // - // The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes - // of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_instantiate( - ctx, + } + /// Instantiate a contract with the specified code hash. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function. The newer version + /// drops the now unnecessary length fields. + /// + /// # Note + /// + /// The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes + /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_instantiate( + ctx: Runtime, code_hash_ptr: u32, _code_hash_len: u32, gas: u64, @@ -1323,9 +1373,9 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32 - ) -> ReturnCode => { - ctx.instantiate ( + salt_len: u32, + ) -> Result { + ctx.instantiate( code_hash_ptr, gas, value_ptr, @@ -1338,50 +1388,51 @@ define_env!(Env, , salt_ptr, salt_len, ) - }, + } - // Instantiate a contract with the specified code hash. - // - // This function creates an account and executes the constructor defined in the code specified - // by the code hash. The address of this new account is copied to `address_ptr` and its length - // to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - // supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. - // - // `value` must be at least the minimum balance. Otherwise the instantiation fails and the - // contract is not created. - // - // # Parameters - // - // - code_hash_ptr: a pointer to the buffer that contains the initializer code. - // - gas: how much gas to devote to the execution of the initializer code. - // - value_ptr: a pointer to the buffer with value, how much value to send. - // Should be decodable as a `T::Balance`. Traps otherwise. - // - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. - // - input_data_len: length of the input data buffer. - // - address_ptr: a pointer where the new account's address is copied to. - // - address_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - output_ptr: a pointer where the output buffer is copied to. - // - output_len_ptr: in-out pointer to where the length of the buffer is read from - // and the actual length is written to. - // - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. - // - salt_len: length in bytes of the supplied salt. - // - // # Errors - // - // Please consult the `ReturnCode` enum declaration for more information on those - // errors. Here we only note things specific to this function. - // - // An error means that the account wasn't created and no address or output buffer - // is returned unless stated otherwise. - // - // `ReturnCode::CalleeReverted`: Output buffer is returned. - // `ReturnCode::CalleeTrapped` - // `ReturnCode::TransferFailed` - // `ReturnCode::CodeNotFound` - [seal1] seal_instantiate( - ctx, + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. The address of this new account is copied to `address_ptr` and its length + /// to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its + /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by + /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. + /// + /// `value` must be at least the minimum balance. Otherwise the instantiation fails and the + /// contract is not created. + /// + /// # Parameters + /// + /// - code_hash_ptr: a pointer to the buffer that contains the initializer code. + /// - gas: how much gas to devote to the execution of the initializer code. + /// - value_ptr: a pointer to the buffer with value, how much value to send. Should be decodable + /// as a `T::Balance`. Traps otherwise. + /// - input_data_ptr: a pointer to a buffer to be used as input data to the initializer code. + /// - input_data_len: length of the input data buffer. + /// - address_ptr: a pointer where the new account's address is copied to. + /// - address_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// - output_ptr: a pointer where the output buffer is copied to. + /// - output_len_ptr: in-out pointer to where the length of the buffer is read from and the + /// actual length is written to. + /// - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. + /// - salt_len: length in bytes of the supplied salt. + /// + /// # Errors + /// + /// Please consult the `ReturnCode` enum declaration for more information on those + /// errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// `ReturnCode::CalleeReverted`: Output buffer is returned. + /// `ReturnCode::CalleeTrapped` + /// `ReturnCode::TransferFailed` + /// `ReturnCode::CodeNotFound` + #[version(1)] + fn seal_instantiate( + ctx: Runtime, code_hash_ptr: u32, gas: u64, value_ptr: u32, @@ -1392,8 +1443,8 @@ define_env!(Env, , output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32 - ) -> ReturnCode => { + salt_len: u32, + ) -> Result { ctx.instantiate( code_hash_ptr, gas, @@ -1407,54 +1458,59 @@ define_env!(Env, , salt_ptr, salt_len, ) - }, + } - // Remove the calling account and transfer remaining balance. - // - // # Deprecation - // - // This is equivalent to calling the newer version of this function. The newer version - // drops the now unnecessary length fields. - // - // # Note - // - // The value `_beneficiary_len` is ignored because the encoded sizes - // this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards - // compatibility. Consider switching to the newest version of this function. - [seal0] seal_terminate(ctx, beneficiary_ptr: u32, _beneficiary_len: u32) => { + /// Remove the calling account and transfer remaining balance. + /// + /// # Deprecation + /// + /// This is equivalent to calling the newer version of this function. The newer version + /// drops the now unnecessary length fields. + /// + /// # Note + /// + /// The value `_beneficiary_len` is ignored because the encoded sizes + /// this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards + /// compatibility. Consider switching to the newest version of this function. + fn seal_terminate( + ctx: Runtime, + beneficiary_ptr: u32, + _beneficiary_len: u32, + ) -> Result<(), TrapReason> { ctx.terminate(beneficiary_ptr) - }, + } - // Remove the calling account and transfer remaining **free** balance. - // - // This function never returns. Either the termination was successful and the - // execution of the destroyed contract is halted. Or it failed during the termination - // which is considered fatal and results in a trap + rollback. - // - // - beneficiary_ptr: a pointer to the address of the beneficiary account where all - // where all remaining funds of the caller are transferred. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - // # Traps - // - // - The contract is live i.e is already on the call stack. - // - Failed to send the balance to the beneficiary. - // - The deletion queue is full. - [seal1] seal_terminate(ctx, beneficiary_ptr: u32) => { + /// Remove the calling account and transfer remaining **free** balance. + /// + /// This function never returns. Either the termination was successful and the + /// execution of the destroyed contract is halted. Or it failed during the termination + /// which is considered fatal and results in a trap + rollback. + /// + /// - beneficiary_ptr: a pointer to the address of the beneficiary account where all where all + /// remaining funds of the caller are transferred. Should be decodable as an `T::AccountId`. + /// Traps otherwise. + /// + /// # Traps + /// + /// - The contract is live i.e is already on the call stack. + /// - Failed to send the balance to the beneficiary. + /// - The deletion queue is full. + #[version(1)] + fn seal_terminate(ctx: Runtime, beneficiary_ptr: u32) -> Result<(), TrapReason> { ctx.terminate(beneficiary_ptr) - }, + } - // Stores the input passed by the caller into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Note - // - // This function traps if the input was previously forwarded by a `seal_call`. - [seal0] seal_input(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the input passed by the caller into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Note + /// + /// This function traps if the input was previously forwarded by a `seal_call`. + fn seal_input(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { @@ -1465,300 +1521,402 @@ define_env!(Env, , } else { Err(Error::::InputForwarded.into()) } - }, + } - // Cease contract execution and save a data buffer as a result of the execution. - // - // This function never returns as it stops execution of the caller. - // This is the only way to return a data buffer to the caller. Returning from - // execution without calling this function is equivalent to calling: - // ``` - // seal_return(0, 0, 0); - // ``` - // - // The flags argument is a bitfield that can be used to signal special return - // conditions to the supervisor: - // --- lsb --- - // bit 0 : REVERT - Revert all storage changes made by the caller. - // bit [1, 31]: Reserved for future use. - // --- msb --- - // - // Using a reserved bit triggers a trap. - [seal0] seal_return(ctx, flags: u32, data_ptr: u32, data_len: u32) => { + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ``` + /// seal_return(0, 0, 0); + /// ``` + /// + /// The flags argument is a bitfield that can be used to signal special return + /// conditions to the supervisor: + /// --- lsb --- + /// bit 0 : REVERT - Revert all storage changes made by the caller. + /// bit [1, 31]: Reserved for future use. + /// --- msb --- + /// + /// Using a reserved bit triggers a trap. + fn seal_return( + ctx: Runtime, + flags: u32, + data_ptr: u32, + data_len: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Return(data_len))?; Err(TrapReason::Return(ReturnData { flags, data: ctx.read_sandbox_memory(data_ptr, data_len)?, })) - }, + } - // Stores the address of the caller into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - // extrinsic will be returned. Otherwise, if this call is initiated by another contract then the - // address of the contract will be returned. The value is encoded as T::AccountId. - [seal0] seal_caller(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the address of the caller into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. The value is encoded as T::AccountId. + fn seal_caller(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Caller)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.caller().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.caller().encode(), + false, + already_charged, )?) - }, + } - // Checks whether a specified address belongs to a contract. - // - // # Parameters - // - // - account_ptr: a pointer to the address of the beneficiary account - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - // Returned value is a u32-encoded boolean: (0 = false, 1 = true). - [seal0] seal_is_contract(ctx, account_ptr: u32) -> u32 => { + /// Checks whether a specified address belongs to a contract. + /// + /// # Parameters + /// + /// - account_ptr: a pointer to the address of the beneficiary account Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// + /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). + fn seal_is_contract(ctx: Runtime, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::IsContract)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; Ok(ctx.ext.is_contract(&address) as u32) - }, + } - // Retrieve the code hash for a specified contract address. - // - // # Parameters - // - // - `account_ptr`: a pointer to the address in question. - // Should be decodable as an `T::AccountId`. Traps otherwise. - // - `out_ptr`: pointer to the linear memory where the returning value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - // - // # Errors - // - // `ReturnCode::KeyNotFound` - [seal0] seal_code_hash(ctx, account_ptr: u32, out_ptr: u32, out_len_ptr: u32) -> ReturnCode => { + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `account_ptr`: a pointer to the address in question. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - `out_ptr`: pointer to the linear memory where the returning value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + /// + /// # Errors + /// + /// `ReturnCode::KeyNotFound` + fn seal_code_hash( + ctx: Runtime, + account_ptr: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::CodeHash)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; if let Some(value) = ctx.ext.code_hash(&address) { - ctx.write_sandbox_output(out_ptr, out_len_ptr, &value.encode(), false, already_charged)?; + ctx.write_sandbox_output( + out_ptr, + out_len_ptr, + &value.encode(), + false, + already_charged, + )?; Ok(ReturnCode::Success) } else { Ok(ReturnCode::KeyNotFound) } - }, + } - // Retrieve the code hash of the currently executing contract. - // - // # Parameters - // - // - `out_ptr`: pointer to the linear memory where the returning value is written to. - // - `out_len_ptr`: in-out pointer into linear memory where the buffer length - // is read from and the value length is written to. - [seal0] seal_own_code_hash(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Retrieve the code hash of the currently executing contract. + /// + /// # Parameters + /// + /// - `out_ptr`: pointer to the linear memory where the returning value is written to. + /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and + /// the value length is written to. + fn seal_own_code_hash( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; let code_hash_encoded = &ctx.ext.own_code_hash().encode(); - Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, code_hash_encoded, false, already_charged)?) - }, + Ok(ctx.write_sandbox_output( + out_ptr, + out_len_ptr, + code_hash_encoded, + false, + already_charged, + )?) + } - // Checks whether the caller of the current contract is the origin of the whole call stack. - // - // Prefer this over `seal_is_contract` when checking whether your contract is being called by a contract - // or a plain account. The reason is that it performs better since it does not need to - // do any storage lookups. - // - // A return value of`true` indicates that this contract is being called by a plain account - // and `false` indicates that the caller is another contract. - // - // Returned value is a u32-encoded boolean: (0 = false, 1 = true). - [seal0] seal_caller_is_origin(ctx) -> u32 => { + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// + /// Prefer this over `seal_is_contract` when checking whether your contract is being called by a + /// contract or a plain account. The reason is that it performs better since it does not need to + /// do any storage lookups. + /// + /// A return value of`true` indicates that this contract is being called by a plain account + /// and `false` indicates that the caller is another contract. + /// + /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). + fn seal_caller_is_origin(ctx: Runtime) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; Ok(ctx.ext.caller_is_origin() as u32) - }, + } - // Stores the address of the current contract into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_address(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the address of the current contract into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_address( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Address)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.address().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.address().encode(), + false, + already_charged, )?) - }, + } - // Stores the price for the specified amount of gas into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - // - // # Note - // - // It is recommended to avoid specifying very small values for `gas` as the prices for a single - // gas can be smaller than one. - [seal0] seal_weight_to_fee(ctx, gas: u64, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the price for the specified amount of gas into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + /// + /// # Note + /// + /// It is recommended to avoid specifying very small values for `gas` as the prices for a single + /// gas can be smaller than one. + fn seal_weight_to_fee( + ctx: Runtime, + gas: u64, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::WeightToFee)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.get_weight_price(gas).encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.get_weight_price(gas).encode(), + false, + already_charged, )?) - }, + } - // Stores the amount of gas left into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as Gas. - [seal0] seal_gas_left(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the amount of gas left into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as Gas. + fn seal_gas_left( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; let gas_left = &ctx.ext.gas_meter().gas_left().encode(); - Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, gas_left, false, already_charged, - )?) - }, - - // Stores the **free* balance of the current account into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - [seal0] seal_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, gas_left, false, already_charged)?) + } + + /// Stores the **free* balance of the current account into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + fn seal_balance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.balance().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.balance().encode(), + false, + already_charged, )?) - }, + } - // Stores the value transferred along with this call/instantiate into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Balance. - [seal0] seal_value_transferred(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Balance. + fn seal_value_transferred( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::ValueTransferred)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.value_transferred().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.value_transferred().encode(), + false, + already_charged, )?) - }, + } - // Stores a random number for the current block and the given subject into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as T::Hash. - // - // # Deprecation - // - // This function is deprecated. Users should migrate to the version in the "seal1" module. - [seal0] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + /// Stores a random number for the current block and the given subject into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as T::Hash. + /// + /// # Deprecation + /// + /// This function is deprecated. Users should migrate to the version in the "seal1" module. + fn seal_random( + ctx: Runtime, + subject_ptr: u32, + subject_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).0.encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.random(&subject_buf).0.encode(), + false, + already_charged, )?) - }, + } - // Stores a random number for the current block and the given subject into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // The data is encoded as (T::Hash, T::BlockNumber). - // - // # Changes from v0 - // - // In addition to the seed it returns the block number since which it was determinable - // by chain observers. - // - // # Note - // - // The returned seed should only be used to distinguish commitments made before - // the returned block number. If the block number is too early (i.e. commitments were - // made afterwards), then ensure no further commitments may be made and repeatedly - // call this on later blocks until the block number returned is later than the latest - // commitment. - [seal1] seal_random(ctx, subject_ptr: u32, subject_len: u32, out_ptr: u32, out_len_ptr: u32) => { + /// Stores a random number for the current block and the given subject into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// The data is encoded as (T::Hash, T::BlockNumber). + /// + /// # Changes from v0 + /// + /// In addition to the seed it returns the block number since which it was determinable + /// by chain observers. + /// + /// # Note + /// + /// The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + #[version(1)] + fn seal_random( + ctx: Runtime, + subject_ptr: u32, + subject_len: u32, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Random)?; if subject_len > ctx.ext.schedule().limits.subject_len { - return Err(Error::::RandomSubjectTooLong.into()); + return Err(Error::::RandomSubjectTooLong.into()) } let subject_buf = ctx.read_sandbox_memory(subject_ptr, subject_len)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.random(&subject_buf).encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.random(&subject_buf).encode(), + false, + already_charged, )?) - }, + } - // Load the latest block timestamp into the supplied buffer - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_now(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Load the latest block timestamp into the supplied buffer + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_now(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Now)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.now().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.now().encode(), + false, + already_charged, )?) - }, + } - // Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - // - // The data is encoded as T::Balance. - [seal0] seal_minimum_balance(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// + /// The data is encoded as T::Balance. + fn seal_minimum_balance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MinimumBalance)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.minimum_balance().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.minimum_balance().encode(), + false, + already_charged, )?) - }, + } - // Stores the tombstone deposit into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Deprecation - // - // There is no longer a tombstone deposit. This function always returns 0. - [seal0] seal_tombstone_deposit(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the tombstone deposit into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Deprecation + /// + /// There is no longer a tombstone deposit. This function always returns 0. + fn seal_tombstone_deposit( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; let deposit = >::zero().encode(); Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, &deposit, false, already_charged)?) - }, + } - // Was used to restore the given destination contract sacrificing the caller. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatibility - [seal0] seal_restore_to( - ctx, + /// Was used to restore the given destination contract sacrificing the caller. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity + fn seal_restore_to( + ctx: Runtime, _dest_ptr: u32, _dest_len: u32, _code_hash_ptr: u32, @@ -1766,45 +1924,46 @@ define_env!(Env, , _rent_allowance_ptr: u32, _rent_allowance_len: u32, _delta_ptr: u32, - _delta_count: u32 - ) => { + _delta_count: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to restore the given destination contract sacrificing the caller. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatibility - [seal1] seal_restore_to( - ctx, + /// Was used to restore the given destination contract sacrificing the caller. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity + #[version(1)] + fn seal_restore_to( + ctx: Runtime, _dest_ptr: u32, _code_hash_ptr: u32, _rent_allowance_ptr: u32, _delta_ptr: u32, - _delta_count: u32 - ) => { + _delta_count: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Deposit a contract event with the data buffer and optional list of topics. There is a limit - // on the maximum number of topics specified by `event_topics`. - // - // - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of this - // is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. - // - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. - // - data_ptr - a pointer to a raw data buffer which will saved along the event. - // - data_len - the length of the data buffer. - [seal0] seal_deposit_event( - ctx, + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// - topics_ptr - a pointer to the buffer of topics encoded as `Vec`. The value of + /// this is ignored if `topics_len` is set to 0. The topics list can't contain duplicates. + /// - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. + /// - data_ptr - a pointer to a raw data buffer which will saved along the event. + /// - data_len - the length of the data buffer. + fn seal_deposit_event( + ctx: Runtime, topics_ptr: u32, topics_len: u32, data_ptr: u32, - data_len: u32 - ) => { + data_len: u32, + ) -> Result<(), TrapReason> { fn has_duplicates(items: &mut Vec) -> bool { // # Warning // @@ -1812,40 +1971,35 @@ define_env!(Env, , // because we are rejecting duplicates which removes the non determinism. items.sort_unstable(); // Find any two consecutive equal elements. - items.windows(2).any(|w| { - match &w { - &[a, b] => a == b, - _ => false, - } + items.windows(2).any(|w| match &w { + &[a, b] => a == b, + _ => false, }) } let num_topic = topics_len .checked_div(sp_std::mem::size_of::>() as u32) .ok_or("Zero sized topics are not allowed")?; - ctx.charge_gas(RuntimeCosts::DepositEvent { - num_topic, - len: data_len, - })?; + ctx.charge_gas(RuntimeCosts::DepositEvent { num_topic, len: data_len })?; if data_len > ctx.ext.max_value_size() { - return Err(Error::::ValueTooLarge.into()); + return Err(Error::::ValueTooLarge.into()) } - let mut topics: Vec::::T>> = match topics_len { + let mut topics: Vec::T>> = match topics_len { 0 => Vec::new(), _ => ctx.read_sandbox_memory_as_unbounded(topics_ptr, topics_len)?, }; // If there are more than `event_topics`, then trap. if topics.len() > ctx.ext.schedule().limits.event_topics as usize { - return Err(Error::::TooManyTopics.into()); + return Err(Error::::TooManyTopics.into()) } // Check for duplicate topics. If there are any, then trap. // Complexity O(n * log(n)) and no additional allocations. // This also sorts the topics. if has_duplicates(&mut topics) { - return Err(Error::::DuplicateTopics.into()); + return Err(Error::::DuplicateTopics.into()) } let event_data = ctx.read_sandbox_memory(data_ptr, data_len)?; @@ -1853,179 +2007,208 @@ define_env!(Env, , ctx.ext.deposit_event(topics, event_data); Ok(()) - }, + } - // Was used to set rent allowance of the contract. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatibility. - [seal0] seal_set_rent_allowance(ctx, _value_ptr: u32, _value_len: u32) => { + /// Was used to set rent allowance of the contract. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + fn seal_set_rent_allowance( + ctx: Runtime, + _value_ptr: u32, + _value_len: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to set rent allowance of the contract. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatibility. - [seal1] seal_set_rent_allowance(ctx, _value_ptr: u32) => { + /// Was used to set rent allowance of the contract. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + #[version(1)] + fn seal_set_rent_allowance(ctx: Runtime, _value_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) - }, + } - // Was used to store the rent allowance into the supplied buffer. - // - // # Note - // - // The state rent functionality was removed. This is stub only exists for - // backwards compatibility. - [seal0] seal_rent_allowance(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Was used to store the rent allowance into the supplied buffer. + /// + /// # Note + /// + /// The state rent functionality was removed. This is stub only exists for + /// backwards compatiblity. + fn seal_rent_allowance( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &rent_allowance, false, already_charged + out_ptr, + out_len_ptr, + &rent_allowance, + false, + already_charged, )?) - }, + } - // Stores the current block number of the current contract into the supplied buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // `out_len_ptr` must point to a u32 value that describes the available space at - // `out_ptr`. This call overwrites it with the size of the value. If the available - // space at `out_ptr` is less than the size of the value a trap is triggered. - [seal0] seal_block_number(ctx, out_ptr: u32, out_len_ptr: u32) => { + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// `out_len_ptr` must point to a u32 value that describes the available space at + /// `out_ptr`. This call overwrites it with the size of the value. If the available + /// space at `out_ptr` is less than the size of the value a trap is triggered. + fn seal_block_number( + ctx: Runtime, + out_ptr: u32, + out_len_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::BlockNumber)?; Ok(ctx.write_sandbox_output( - out_ptr, out_len_ptr, &ctx.ext.block_number().encode(), false, already_charged + out_ptr, + out_len_ptr, + &ctx.ext.block_number().encode(), + false, + already_charged, )?) - }, + } - // Computes the SHA2 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the SHA2 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_sha2_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashSha256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(sha2_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the KECCAK 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the KECCAK 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_keccak_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashKeccak256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(keccak_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the BLAKE2 256-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 32 bytes (256 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the BLAKE2 256-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 32 bytes (256 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_blake2_256( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashBlake256(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_256, input_ptr, input_len, output_ptr)?) - }, + } - // Computes the BLAKE2 128-bit hash on the given input buffer. - // - // Returns the result directly into the given output buffer. - // - // # Note - // - // - The `input` and `output` buffer may overlap. - // - The output buffer is expected to hold at least 16 bytes (128 bits). - // - It is the callers responsibility to provide an output buffer that - // is large enough to hold the expected amount of bytes returned by the - // chosen hash function. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input - // data is placed. - // - `input_len`: the length of the input data in bytes. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - [seal0] seal_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + /// Computes the BLAKE2 128-bit hash on the given input buffer. + /// + /// Returns the result directly into the given output buffer. + /// + /// # Note + /// + /// - The `input` and `output` buffer may overlap. + /// - The output buffer is expected to hold at least 16 bytes (128 bits). + /// - It is the callers responsibility to provide an output buffer that is large enough to hold + /// the expected amount of bytes returned by the chosen hash function. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + fn seal_hash_blake2_128( + ctx: Runtime, + input_ptr: u32, + input_len: u32, + output_ptr: u32, + ) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::HashBlake128(input_len))?; Ok(ctx.compute_hash_on_intermediate_buffer(blake2_128, input_ptr, input_len, output_ptr)?) - }, + } - // Call into the chain extension provided by the chain if any. - // - // Handling of the input values is up to the specific chain extension and so is the - // return value. The extension can decide to use the inputs as primitive inputs or as - // in/out arguments by interpreting them as pointers. Any caller of this function - // must therefore coordinate with the chain that it targets. - // - // # Note - // - // If no chain extension exists the contract will trap with the `NoChainExtension` - // module error. - [seal0] seal_call_chain_extension( - ctx, + /// Call into the chain extension provided by the chain if any. + /// + /// Handling of the input values is up to the specific chain extension and so is the + /// return value. The extension can decide to use the inputs as primitive inputs or as + /// in/out arguments by interpreting them as pointers. Any caller of this function + /// must therefore coordinate with the chain that it targets. + /// + /// # Note + /// + /// If no chain extension exists the contract will trap with the `NoChainExtension` + /// module error. + fn seal_call_chain_extension( + ctx: Runtime, id: u32, input_ptr: u32, input_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> u32 => { + output_len_ptr: u32, + ) -> Result { use crate::chain_extension::{ChainExtension, Environment, RetVal}; if !::ChainExtension::enabled() { - return Err(Error::::NoChainExtension.into()); + return Err(Error::::NoChainExtension.into()) } let mut chain_extension = ctx.chain_extension.take().expect( "Constructor initializes with `Some`. This is the only place where it is set to `None`.\ @@ -2034,87 +2217,93 @@ define_env!(Env, , let env = Environment::new(ctx, id, input_ptr, input_len, output_ptr, output_len_ptr); let ret = match chain_extension.call(env)? { RetVal::Converging(val) => Ok(val), - RetVal::Diverging{flags, data} => Err(TrapReason::Return(ReturnData { - flags: flags.bits(), - data, - })), + RetVal::Diverging { flags, data } => + Err(TrapReason::Return(ReturnData { flags: flags.bits(), data })), }; ctx.chain_extension = Some(chain_extension); ret - }, + } - // Emit a custom debug message. - // - // No newlines are added to the supplied message. - // Specifying invalid UTF-8 triggers a trap. - // - // This is a no-op if debug message recording is disabled which is always the case - // when the code is executing on-chain. The message is interpreted as UTF-8 and - // appended to the debug buffer which is then supplied to the calling RPC client. - // - // # Note - // - // Even though no action is taken when debug message recording is disabled there is still - // a non trivial overhead (and weight cost) associated with calling this function. Contract - // languages should remove calls to this function (either at runtime or compile time) when - // not being executed as an RPC. For example, they could allow users to disable logging - // through compile time flags (cargo features) for on-chain deployment. Additionally, the - // return value of this function can be cached in order to prevent further calls at runtime. - [seal0] seal_debug_message(ctx, str_ptr: u32, str_len: u32) -> ReturnCode => { + /// Emit a custom debug message. + /// + /// No newlines are added to the supplied message. + /// Specifying invalid UTF-8 triggers a trap. + /// + /// This is a no-op if debug message recording is disabled which is always the case + /// when the code is executing on-chain. The message is interpreted as UTF-8 and + /// appended to the debug buffer which is then supplied to the calling RPC client. + /// + /// # Note + /// + /// Even though no action is taken when debug message recording is disabled there is still + /// a non trivial overhead (and weight cost) associated with calling this function. Contract + /// languages should remove calls to this function (either at runtime or compile time) when + /// not being executed as an RPC. For example, they could allow users to disable logging + /// through compile time flags (cargo features) for on-chain deployment. Additionally, the + /// return value of this function can be cached in order to prevent further calls at runtime. + fn seal_debug_message( + ctx: Runtime, + str_ptr: u32, + str_len: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::DebugMessage)?; if ctx.ext.append_debug_buffer("") { let data = ctx.read_sandbox_memory(str_ptr, str_len)?; - let msg = core::str::from_utf8(&data) - .map_err(|_| >::DebugMessageInvalidUTF8)?; + let msg = + core::str::from_utf8(&data).map_err(|_| >::DebugMessageInvalidUTF8)?; ctx.ext.append_debug_buffer(msg); - return Ok(ReturnCode::Success); + return Ok(ReturnCode::Success) } Ok(ReturnCode::LoggingDisabled) - }, + } - // Call some dispatchable of the runtime. - // - // This function decodes the passed in data as the overarching `Call` type of the - // runtime and dispatches it. The weight as specified in the runtime is charged - // from the gas meter. Any weight refunds made by the dispatchable are considered. - // - // The filter specified by `Config::CallFilter` is attached to the origin of - // the dispatched call. - // - // # Parameters - // - // - `input_ptr`: the pointer into the linear memory where the input data is placed. - // - `input_len`: the length of the input data in bytes. - // - // # Return Value - // - // Returns `ReturnCode::Success` when the dispatchable was successfully executed and - // returned `Ok`. When the dispatchable was executed but returned an error - // `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not - // provided because it is not guaranteed to be stable. - // - // # Comparison with `ChainExtension` - // - // Just as a chain extension this API allows the runtime to extend the functionality - // of contracts. While making use of this function is generally easier it cannot be - // used in call cases. Consider writing a chain extension if you need to do perform - // one of the following tasks: - // - // - Return data. - // - Provide functionality **exclusively** to contracts. - // - Provide custom weights. - // - Avoid the need to keep the `Call` data structure stable. - // - // # Unstable - // - // This function is unstable and subject to change (or removal) in the future. Do not - // deploy a contract using it to a production chain. - [__unstable__] seal_call_runtime(ctx, call_ptr: u32, call_len: u32) -> ReturnCode => { + /// Call some dispatchable of the runtime. + /// + /// This function decodes the passed in data as the overarching `Call` type of the + /// runtime and dispatches it. The weight as specified in the runtime is charged + /// from the gas meter. Any weight refunds made by the dispatchable are considered. + /// + /// The filter specified by `Config::CallFilter` is attached to the origin of + /// the dispatched call. + /// + /// # Parameters + /// + /// - `input_ptr`: the pointer into the linear memory where the input data is placed. + /// - `input_len`: the length of the input data in bytes. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the dispatchable was succesfully executed and + /// returned `Ok`. When the dispatchable was exeuted but returned an error + /// `ReturnCode::CallRuntimeReturnedError` is returned. The full error is not + /// provided because it is not guaranteed to be stable. + /// + /// # Comparison with `ChainExtension` + /// + /// Just as a chain extension this API allows the runtime to extend the functionality + /// of contracts. While making use of this function is generelly easier it cannot be + /// used in call cases. Consider writing a chain extension if you need to do perform + /// one of the following tasks: + /// + /// - Return data. + /// - Provide functionality **exclusively** to contracts. + /// - Provide custom weights. + /// - Avoid the need to keep the `Call` data structure stable. + /// + /// # Unstable + /// + /// This function is unstable and subject to change (or removal) in the future. Do not + /// deploy a contract using it to a production chain. + #[unstable] + fn seal_call_runtime( + ctx: Runtime, + call_ptr: u32, + call_len: u32, + ) -> Result { use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; - let call: ::Call = ctx.read_sandbox_memory_as_unbounded( - call_ptr, call_len - )?; + let call: ::Call = + ctx.read_sandbox_memory_as_unbounded(call_ptr, call_len)?; let dispatch_info = call.get_dispatch_info(); let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; let result = ctx.ext.call_runtime(call); @@ -2124,27 +2313,31 @@ define_env!(Env, , Ok(_) => Ok(ReturnCode::Success), Err(_) => Ok(ReturnCode::CallRuntimeReturnedError), } - }, + } - // Recovers the ECDSA public key from the given message hash and signature. - // - // Writes the public key into the given output buffer. - // Assumes the secp256k1 curve. - // - // # Parameters - // - // - `signature_ptr`: the pointer into the linear memory where the signature - // is placed. Should be decodable as a 65 bytes. Traps otherwise. - // - `message_hash_ptr`: the pointer into the linear memory where the message - // hash is placed. Should be decodable as a 32 bytes. Traps otherwise. - // - `output_ptr`: the pointer into the linear memory where the output - // data is placed. The buffer should be 33 bytes. The function - // will write the result directly into this buffer. - // - // # Errors - // - // `ReturnCode::EcdsaRecoverFailed` - [seal0] seal_ecdsa_recover(ctx, signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32) -> ReturnCode => { + /// Recovers the ECDSA public key from the given message hash and signature. + /// + /// Writes the public key into the given output buffer. + /// Assumes the secp256k1 curve. + /// + /// # Parameters + /// + /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should + /// be decodable as a 65 bytes. Traps otherwise. + /// - `message_hash_ptr`: the pointer into the linear memory where the message hash is placed. + /// Should be decodable as a 32 bytes. Traps otherwise. + /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The + /// buffer should be 33 bytes. The function will write the result directly into this buffer. + /// + /// # Errors + /// + /// `ReturnCode::EcdsaRecoverFailed` + fn seal_ecdsa_recover( + ctx: Runtime, + signature_ptr: u32, + message_hash_ptr: u32, + output_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; let mut signature: [u8; 65] = [0; 65]; @@ -2164,65 +2357,72 @@ define_env!(Env, , }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - }, + } - // Replace the contract code at the specified address with new code. - // - // # Note - // - // There are a couple of important considerations which must be taken into account when - // using this API: - // - // 1. The storage at the code address will remain untouched. This means that contract developers - // must ensure that the storage layout of the new code is compatible with that of the old code. - // - // 2. Contracts using this API can't be assumed as having deterministic addresses. Said another way, - // when using this API you lose the guarantee that an address always identifies a specific code hash. - // - // 3. If a contract calls into itself after changing its code the new call would use - // the new code. However, if the original caller panics after returning from the sub call it - // would revert the changes made by `seal_set_code_hash` and the next caller would use - // the old code. - // - // # Parameters - // - // - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. - // - // # Errors - // - // `ReturnCode::CodeNotFound` - [seal0] seal_set_code_hash(ctx, code_hash_ptr: u32) -> ReturnCode => { + /// Replace the contract code at the specified address with new code. + /// + /// # Note + /// + /// There are a couple of important considerations which must be taken into account when + /// using this API: + /// + /// 1. The storage at the code address will remain untouched. This means that contract + /// developers must ensure that the storage layout of the new code is compatible with that of + /// the old code. + /// + /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another + /// way, when using this API you lose the guarantee that an address always identifies a specific + /// code hash. + /// 3. If a contract calls into itself after changing its code the new call would use + /// the new code. However, if the original caller panics after returning from the sub call it + /// would revert the changes made by `seal_set_code_hash` and the next caller would use + /// the old code. + /// + /// # Parameters + /// + /// - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. + /// + /// # Errors + /// + /// `ReturnCode::CodeNotFound` + fn seal_set_code_hash( + ctx: Runtime, + code_hash_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(code_hash_ptr)?; match ctx.ext.set_code_hash(code_hash) { - Err(err) => { + Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) }, - Ok(()) => Ok(ReturnCode::Success) + Ok(()) => Ok(ReturnCode::Success), } - }, + } - // Calculates Ethereum address from the ECDSA compressed public key and stores - // it into the supplied buffer. - // - // # Parameters - // - // - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes value. - // Traps otherwise. - // - `out_ptr`: the pointer into the linear memory where the output - // data is placed. The function will write the result - // directly into this buffer. - // - // The value is stored to linear memory at the address pointed to by `out_ptr`. - // If the available space at `out_ptr` is less than the size of the value a trap is triggered. - // - // # Errors - // - // `ReturnCode::EcdsaRecoverFailed` - [seal0] seal_ecdsa_to_eth_address(ctx, key_ptr: u32, out_ptr: u32) -> ReturnCode => { + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// it into the supplied buffer. + /// + /// # Parameters + /// + /// - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes + /// value. Traps otherwise. + /// - `out_ptr`: the pointer into the linear memory where the output data is placed. The + /// function will write the result directly into this buffer. + /// + /// The value is stored to linear memory at the address pointed to by `out_ptr`. + /// If the available space at `out_ptr` is less than the size of the value a trap is triggered. + /// + /// # Errors + /// + /// `ReturnCode::EcdsaRecoverFailed` + fn seal_ecdsa_to_eth_address( + ctx: Runtime, + key_ptr: u32, + out_ptr: u32, + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; - let mut compressed_key: [u8; 33] = [0;33]; + let mut compressed_key: [u8; 33] = [0; 33]; ctx.read_sandbox_memory_into_buf(key_ptr, &mut compressed_key)?; let result = ctx.ext.ecdsa_to_eth_address(&compressed_key); match result { @@ -2232,5 +2432,5 @@ define_env!(Env, , }, Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), } - }, -); + } +} From 57e3486d9c7bb4deaef33cf9ba2da083b4e40314 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 22 Aug 2022 18:53:14 +0300 Subject: [PATCH 054/348] Network sync refactoring (part 7) (#12006) * Move `MultiaddrWithPeerId` and related parsing functions into `sc-network-common`, remove dependency on `sc-network` from `sc-chain-spec` * Remove dependency on `sc-network` from `sc-offchain` * Remove dependency on `sc-network` from `sc-network-gossip` --- Cargo.lock | 10 +- client/authority-discovery/Cargo.toml | 1 - client/authority-discovery/src/error.rs | 2 +- client/chain-spec/Cargo.toml | 2 +- client/chain-spec/src/chain_spec.rs | 2 +- client/chain-spec/src/lib.rs | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network-gossip/src/bridge.rs | 4 +- client/network-gossip/src/lib.rs | 2 +- client/network-gossip/src/state_machine.rs | 6 +- client/network-gossip/src/validator.rs | 2 +- client/network/common/Cargo.toml | 1 + client/network/common/src/config.rs | 129 ++++++++++++++++++++- client/network/src/config.rs | 129 +-------------------- client/network/src/service.rs | 3 +- client/network/src/service/tests.rs | 16 +-- client/network/test/src/lib.rs | 6 +- client/offchain/Cargo.toml | 3 +- client/offchain/src/api.rs | 5 +- client/offchain/src/lib.rs | 3 +- client/rpc/Cargo.toml | 1 + client/rpc/src/system/tests.rs | 2 +- client/service/src/config.rs | 5 +- 23 files changed, 173 insertions(+), 165 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 060cd960ee985..1c453da9d3045 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7730,7 +7730,6 @@ dependencies = [ "quickcheck", "rand 0.7.3", "sc-client-api", - "sc-network", "sc-network-common", "sp-api", "sp-authority-discovery", @@ -7793,7 +7792,7 @@ dependencies = [ "memmap2 0.5.0", "parity-scale-codec", "sc-chain-spec-derive", - "sc-network", + "sc-network-common", "sc-telemetry", "serde", "serde_json", @@ -8426,6 +8425,7 @@ dependencies = [ "prost-build", "sc-consensus", "sc-peerset", + "serde", "smallvec", "sp-consensus", "sp-finality-grandpa", @@ -8445,8 +8445,8 @@ dependencies = [ "log", "lru", "quickcheck", - "sc-network", "sc-network-common", + "sc-peerset", "sp-runtime", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -8547,6 +8547,7 @@ dependencies = [ "hyper", "hyper-rustls", "lazy_static", + "libp2p", "num_cpus", "once_cell", "parity-scale-codec", @@ -8555,8 +8556,8 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-client-db", - "sc-network", "sc-network-common", + "sc-peerset", "sc-transaction-pool", "sc-transaction-pool-api", "sc-utils", @@ -8610,6 +8611,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-network", + "sc-network-common", "sc-rpc-api", "sc-tracing", "sc-transaction-pool", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 544df4c8d4812..b7a140f24a111 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -29,7 +29,6 @@ thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } -sc-network = { version = "0.10.0-dev", path = "../network" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-authority-discovery = { version = "4.0.0-dev", path = "../../primitives/authority-discovery" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index bce39069ef7c7..285a2714b81f5 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -57,7 +57,7 @@ pub enum Error { ParsingMultiaddress(#[from] libp2p::core::multiaddr::Error), #[error("Failed to parse a libp2p key.")] - ParsingLibp2pIdentity(#[from] sc_network::DecodingError), + ParsingLibp2pIdentity(#[from] libp2p::identity::error::DecodingError), #[error("Failed to sign using a specific public key.")] MissingSignature(CryptoTypePublicPair), diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 6ab559dea46fd..bd4ee548e296b 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -19,7 +19,7 @@ memmap2 = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } -sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 5aafc28524dbf..2cc9f356e4df7 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; -use sc_network::config::MultiaddrWithPeerId; +use sc_network_common::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{Deserialize, Serialize}; use serde_json as json; diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 73d3e1af15492..9d2cc728b8288 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -184,7 +184,7 @@ pub use extension::{ }; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -use sc_network::config::MultiaddrWithPeerId; +use sc_network_common::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 185c37985b585..8ecf2b9cec787 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -22,8 +22,8 @@ log = "0.4.17" lru = "0.7.5" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index b5c957024701d..b63e3f64a1256 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,8 +21,8 @@ use crate::{ Network, Validator, }; -use sc_network::ReputationChange; use sc_network_common::protocol::event::Event; +use sc_peerset::ReputationChange; use futures::{ channel::mpsc::{channel, Receiver, Sender}, @@ -152,7 +152,7 @@ impl GossipEngine { /// Send addressed message to the given peers. The message is not kept or multicast /// later on. - pub fn send_message(&mut self, who: Vec, data: Vec) { + pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { self.state_machine.send_message(&mut *self.network, who, data.clone()); } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index b9dff0bcd4d00..0fdde1feac6c6 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -67,7 +67,7 @@ pub use self::{ validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, }; -use sc_network::{multiaddr, PeerId}; +use libp2p::{multiaddr, PeerId}; use sc_network_common::service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, }; diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 3cb8d5bd9bab9..a870ca6a3acdf 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -42,9 +42,9 @@ const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_millis(750); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); mod rep { - use sc_network::ReputationChange as Rep; + use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sends us a gossip message that we didn't know about. - pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successfull gossip"); + pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successful gossip"); /// Reputation change when a peer sends us a gossip message that we already knew about. pub const DUPLICATE_GOSSIP: Rep = Rep::new(-(1 << 2), "Duplicate gossip"); } @@ -513,7 +513,6 @@ mod tests { use super::*; use crate::multiaddr::Multiaddr; use futures::prelude::*; - use sc_network::ReputationChange; use sc_network_common::{ protocol::event::Event, service::{ @@ -521,6 +520,7 @@ mod tests { NotificationSender, NotificationSenderError, }, }; + use sc_peerset::ReputationChange; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::NumberFor, diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index a98c62ab5a9eb..185c2cfeed2c7 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::PeerId; +use libp2p::PeerId; use sc_network_common::protocol::event::ObservedRole; use sp_runtime::traits::Block as BlockT; diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 1b10bd248292c..8f0702e05cac5 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -28,6 +28,7 @@ libp2p = "0.46.1" smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +serde = { version = "1.0.136", features = ["derive"] } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 92f8df5cd380f..c2e61424c88e0 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,7 +18,8 @@ //! Configuration of the networking layer. -use std::{fmt, str}; +use libp2p::{multiaddr, Multiaddr, PeerId}; +use std::{fmt, str, str::FromStr}; /// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. #[derive(Clone, PartialEq, Eq, Hash)] @@ -42,3 +43,129 @@ impl fmt::Debug for ProtocolId { fmt::Debug::fmt(self.as_ref(), f) } } + +/// Parses a string address and splits it into Multiaddress and PeerId, if +/// valid. +/// +/// # Example +/// +/// ``` +/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_common::config::parse_str_addr; +/// let (peer_id, addr) = parse_str_addr( +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" +/// ).unwrap(); +/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); +/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); +/// ``` +pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { + let addr: Multiaddr = addr_str.parse()?; + parse_addr(addr) +} + +/// Splits a Multiaddress into a Multiaddress and PeerId. +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, + _ => return Err(ParseErr::PeerIdMissing), + }; + + Ok((who, addr)) +} + +/// Address of a node, including its identity. +/// +/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. +/// +/// # Example +/// +/// ``` +/// # use libp2p::{Multiaddr, PeerId}; +/// # use sc_network_common::config::MultiaddrWithPeerId; +/// let addr: MultiaddrWithPeerId = +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); +/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); +/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] +#[serde(try_from = "String", into = "String")] +pub struct MultiaddrWithPeerId { + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, +} + +impl MultiaddrWithPeerId { + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); + self.multiaddr.clone().with(proto) + } +} + +impl fmt::Display for MultiaddrWithPeerId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } +} + +impl FromStr for MultiaddrWithPeerId { + type Err = ParseErr; + + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(Self { peer_id, multiaddr }) + } +} + +impl From for String { + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } +} + +impl TryFrom for MultiaddrWithPeerId { + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } +} + +/// Error that can be generated by `parse_str_addr`. +#[derive(Debug)] +pub enum ParseErr { + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, +} + +impl fmt::Display for ParseErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::MultiaddrParse(err) => write!(f, "{}", err), + Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), + } + } +} + +impl std::error::Error for ParseErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::MultiaddrParse(err) => Some(err), + Self::InvalidPeerId => None, + Self::PeerIdMissing => None, + } + } +} + +impl From for ParseErr { + fn from(err: multiaddr::Error) -> ParseErr { + Self::MultiaddrParse(err) + } +} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index afd61cae6c83b..52fa28e76e207 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -37,11 +37,11 @@ use core::{fmt, iter}; use futures::future; use libp2p::{ identity::{ed25519, Keypair}, - multiaddr, Multiaddr, PeerId, + multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::sync::ChainSync; +use sc_network_common::{config::MultiaddrWithPeerId, sync::ChainSync}; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, @@ -54,7 +54,6 @@ use std::{ path::{Path, PathBuf}, pin::Pin, str, - str::FromStr, sync::Arc, }; use zeroize::Zeroize; @@ -225,130 +224,6 @@ impl TransactionPool for EmptyTransaction } } -/// Parses a string address and splits it into Multiaddress and PeerId, if -/// valid. -/// -/// # Example -/// -/// ``` -/// # use sc_network::{Multiaddr, PeerId, config::parse_str_addr}; -/// let (peer_id, addr) = parse_str_addr( -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" -/// ).unwrap(); -/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); -/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); -/// ``` -pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { - let addr: Multiaddr = addr_str.parse()?; - parse_addr(addr) -} - -/// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => - PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, - _ => return Err(ParseErr::PeerIdMissing), - }; - - Ok((who, addr)) -} - -/// Address of a node, including its identity. -/// -/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. -/// -/// # Example -/// -/// ``` -/// # use sc_network::{Multiaddr, PeerId, config::MultiaddrWithPeerId}; -/// let addr: MultiaddrWithPeerId = -/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); -/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); -/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); -/// ``` -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] -#[serde(try_from = "String", into = "String")] -pub struct MultiaddrWithPeerId { - /// Address of the node. - pub multiaddr: Multiaddr, - /// Its identity. - pub peer_id: PeerId, -} - -impl MultiaddrWithPeerId { - /// Concatenates the multiaddress and peer ID into one multiaddress containing both. - pub fn concat(&self) -> Multiaddr { - let proto = multiaddr::Protocol::P2p(From::from(self.peer_id)); - self.multiaddr.clone().with(proto) - } -} - -impl fmt::Display for MultiaddrWithPeerId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.concat(), f) - } -} - -impl FromStr for MultiaddrWithPeerId { - type Err = ParseErr; - - fn from_str(s: &str) -> Result { - let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(Self { peer_id, multiaddr }) - } -} - -impl From for String { - fn from(ma: MultiaddrWithPeerId) -> String { - format!("{}", ma) - } -} - -impl TryFrom for MultiaddrWithPeerId { - type Error = ParseErr; - fn try_from(string: String) -> Result { - string.parse() - } -} - -/// Error that can be generated by `parse_str_addr`. -#[derive(Debug)] -pub enum ParseErr { - /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), - /// Multihash of the peer ID is invalid. - InvalidPeerId, - /// The peer ID is missing from the address. - PeerIdMissing, -} - -impl fmt::Display for ParseErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::MultiaddrParse(err) => write!(f, "{}", err), - Self::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), - Self::PeerIdMissing => write!(f, "Peer id is missing from the address"), - } - } -} - -impl std::error::Error for ParseErr { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::MultiaddrParse(err) => Some(err), - Self::InvalidPeerId => None, - Self::PeerIdMissing => None, - } - } -} - -impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { - Self::MultiaddrParse(err) - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index bf15a9250d82c..d36e310795818 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,7 +30,7 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, bitswap::Bitswap, - config::{parse_str_addr, Params, TransportConfig}, + config::{Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, network_state::{ @@ -62,6 +62,7 @@ use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ + config::parse_str_addr, protocol::event::{DhtEvent, Event}, request_responses::{IfDisconnected, RequestFailure}, service::{ diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 6ccca17650b67..e2fe58423abfe 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -21,7 +21,7 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; use sc_network_common::{ - config::ProtocolId, + config::{MultiaddrWithPeerId, ProtocolId}, protocol::event::Event, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, }; @@ -194,7 +194,7 @@ fn build_nodes_one_proto() -> ( fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), }], @@ -383,7 +383,7 @@ fn lots_of_incoming_peers_works() { fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), peer_id: main_node_peer_id, }], @@ -513,7 +513,7 @@ fn fallback_name_working() { fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { + reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), }], @@ -584,7 +584,7 @@ fn ensure_listen_addresses_consistent_with_transport_not_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let boot_node = config::MultiaddrWithPeerId { + let boot_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; @@ -601,7 +601,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let boot_node = config::MultiaddrWithPeerId { + let boot_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; @@ -617,7 +617,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let reserved_node = config::MultiaddrWithPeerId { + let reserved_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], peer_id: PeerId::random(), }; @@ -637,7 +637,7 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { #[should_panic(expected = "don't match the transport")] fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let reserved_node = config::MultiaddrWithPeerId { + let reserved_node = MultiaddrWithPeerId { multiaddr: config::build_multiaddr![Memory(rand::random::())], peer_id: PeerId::random(), }; diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 3ba663aba2267..fbe56f463d0f3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -51,13 +51,13 @@ use sc_consensus::{ pub use sc_network::config::EmptyTransactionPool; use sc_network::{ config::{ - MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, - SyncMode, TransportConfig, + NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, SyncMode, + TransportConfig, }, Multiaddr, NetworkService, NetworkWorker, }; -pub use sc_network_common::config::ProtocolId; use sc_network_common::{ + config::{MultiaddrWithPeerId, ProtocolId}, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index ab26c0c38596c..89b77dbb5fea7 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -21,6 +21,7 @@ futures-timer = "3.0.2" hex = "0.4" hyper = { version = "0.14.16", features = ["stream", "http2"] } hyper-rustls = { version = "0.23.0", features = ["http2"] } +libp2p = { version = "0.46.1", default-features = false } num_cpus = "1.13" once_cell = "1.8" parking_lot = "0.12.0" @@ -28,8 +29,8 @@ rand = "0.7.2" threadpool = "1.7" tracing = "0.1.29" sc-client-api = { version = "4.0.0-dev", path = "../api" } -sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } +sc-peerset = { version = "4.0.0-dev", path = "../peerset" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index f379ebad17bbf..5c8139d4505e7 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -22,7 +22,7 @@ use crate::NetworkProvider; use codec::{Decode, Encode}; use futures::Future; pub use http::SharedClient; -use sc_network::{Multiaddr, PeerId}; +use libp2p::{Multiaddr, PeerId}; use sp_core::{ offchain::{ self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, @@ -324,9 +324,10 @@ impl AsyncApi { #[cfg(test)] mod tests { use super::*; + use libp2p::PeerId; use sc_client_db::offchain::LocalStorage; - use sc_network::{PeerId, ReputationChange}; use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; + use sc_peerset::ReputationChange; use sp_core::offchain::{DbExternalities, Externalities}; use std::{borrow::Cow, time::SystemTime}; diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 14a2408e61a70..5eed142ff3871 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -246,9 +246,10 @@ pub async fn notification_future( mod tests { use super::*; use futures::executor::block_on; + use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network::{Multiaddr, PeerId, ReputationChange}; + use sc_peerset::ReputationChange; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 5a05ae6e29df1..2381323d25a24 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -46,6 +46,7 @@ assert_matches = "1.3.0" lazy_static = "1.4.0" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } tokio = "1.17.0" diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index facad7a0b347a..2f91648008ff7 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -99,7 +99,7 @@ fn api>>(sync: T) -> RpcModule> { ); }, Request::NetworkAddReservedPeer(peer, sender) => { - let _ = match sc_network::config::parse_str_addr(&peer) { + let _ = match sc_network_common::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 6f5650df5648c..2fb3f820ebf15 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -25,13 +25,12 @@ pub use sc_executor::WasmExecutionMethod; pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ config::{ - MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, - SetConfig, TransportConfig, + NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, SetConfig, TransportConfig, }, Multiaddr, }; pub use sc_network_common::{ - config::ProtocolId, + config::{MultiaddrWithPeerId, ProtocolId}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, From 7266eb7d794d74a7860fe193d7a3074200765ea1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 23 Aug 2022 13:19:54 +0200 Subject: [PATCH 055/348] Bump parity-db from 0.3.13 to 0.3.16 (#12091) Bumps [parity-db](https://github.com/paritytech/parity-db) from 0.3.13 to 0.3.16. - [Release notes](https://github.com/paritytech/parity-db/releases) - [Commits](https://github.com/paritytech/parity-db/commits) --- updated-dependencies: - dependency-name: parity-db dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/db/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c453da9d3045..c99212ba06ba8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6515,9 +6515,9 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.13" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a7901b85874402471e131de3332dde0e51f38432c69a3853627c8e25433048" +checksum = "2bb474d0ed0836e185cb998a6b140ed1073d1fbf27d690ecf9ede8030289382c" dependencies = [ "blake2-rfc", "crc32fast", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 7bfba997767f2..144791fda5c3c 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -22,7 +22,7 @@ kvdb-memorydb = "0.11.0" kvdb-rocksdb = { version = "0.15.2", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.3.13" +parity-db = "0.3.16" parking_lot = "0.12.0" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } From 40703f41167c718e13b491228efeafb4a6194448 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 23 Aug 2022 22:22:58 +0200 Subject: [PATCH 056/348] Print bootnode mismatch message as `warning` (#12075) * Print bootnode mismatch message as `warning` * Fix test :thinking_face: * Remove wrong warning --- client/network/src/service.rs | 15 ++------------- primitives/trie/src/cache/mod.rs | 2 +- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d36e310795818..162b9c6a30491 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -160,18 +160,7 @@ where .network_config .boot_nodes .into_iter() - .filter(|boot_node| { - if boot_node.peer_id == local_peer_id { - warn!( - target: "sub-libp2p", - "Local peer ID used in bootnode, ignoring: {}", - boot_node, - ); - false - } else { - true - } - }) + .filter(|boot_node| boot_node.peer_id != local_peer_id) .collect(); params.network_config.default_peers_set.reserved_nodes = params .network_config @@ -1825,7 +1814,7 @@ where if let ConnectedPoint::Dialer { address, role_override: _ } = endpoint { - error!( + warn!( "💔 The bootnode you want to connect to at `{}` provided a different peer ID `{}` than the one you expect `{}`.", address, obtained, diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index a588f20bdb7b4..67348919250cd 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -631,7 +631,7 @@ mod tests { let mut cache = local_cache.as_trie_db_cache(root); let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - for (k, _) in TEST_DATA.iter().take(2) { + for (k, _) in TEST_DATA.iter().skip(2) { trie.get(k).unwrap().unwrap(); } } From b7d8a3a3bd70f281d50352c48634cc96f6232d57 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 24 Aug 2022 17:55:48 +0200 Subject: [PATCH 057/348] Test bench components with more values (#11963) * Test values besides low and high. Signed-off-by: Oliver Tale-Yazdi * Only add highest if different and remove check Signed-off-by: Oliver Tale-Yazdi * Add env var Signed-off-by: Oliver Tale-Yazdi * Update README Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/benchmarking/README.md | 2 ++ frame/benchmarking/src/lib.rs | 40 +++++++++++++++++++++---- frame/benchmarking/src/tests.rs | 53 ++++++++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 7 deletions(-) diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index f0fe05cc140f2..6316cd5903c8b 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -125,6 +125,8 @@ cargo test -p pallet-balances --features runtime-benchmarks > ``` > To solve this, navigate to the folder of the node (`cd bin/node/cli`) or pallet (`cd frame/pallet`) and run the command there. +This will instance each linear component with different values. The number of values per component is set to six and can be changed with the `VALUES_PER_COMPONENT` environment variable. + ## Adding Benchmarks The benchmarks included with each pallet are not automatically added to your node. To actually diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 0a0f8eecdcc3d..18472595f15b9 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1153,6 +1153,8 @@ macro_rules! impl_benchmark { // This creates a unit test for one benchmark of the main benchmark macro. // It runs the benchmark using the `high` and `low` value for each component // and ensure that everything completes successfully. +// Instances each component with six values which can be controlled with the +// env variable `VALUES_PER_COMPONENT`. #[macro_export] #[doc(hidden)] macro_rules! impl_benchmark_test { @@ -1197,16 +1199,42 @@ macro_rules! impl_benchmark_test { if components.is_empty() { execute_benchmark(Default::default())?; } else { - for (name, low, high) in components.iter() { - // Test only the low and high value, assuming values in the middle - // won't break - for component_value in $crate::vec![low, high] { + let num_values: u32 = if let Ok(ev) = std::env::var("VALUES_PER_COMPONENT") { + ev.parse().map_err(|_| { + $crate::BenchmarkError::Stop( + "Could not parse env var `VALUES_PER_COMPONENT` as u32." + ) + })? + } else { + 6 + }; + + if num_values < 2 { + return Err("`VALUES_PER_COMPONENT` must be at least 2".into()); + } + + for (name, low, high) in components.clone().into_iter() { + // Test the lowest, highest (if its different from the lowest) + // and up to num_values-2 more equidistant values in between. + // For 0..10 and num_values=6 this would mean: [0, 2, 4, 6, 8, 10] + + let mut values = $crate::vec![low]; + let diff = (high - low).min(num_values - 1); + let slope = (high - low) as f32 / diff as f32; + + for i in 1..=diff { + let value = ((low as f32 + slope * i as f32) as u32) + .clamp(low, high); + values.push(value); + } + + for component_value in values { // Select the max value for all the other components. let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components .iter() .map(|(n, _, h)| - if n == name { - (*n, *component_value) + if *n == name { + (*n, component_value) } else { (*n, *h) } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 9a97250aeb965..be5c23fff17f6 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -27,6 +27,7 @@ use sp_runtime::{ BuildStorage, }; use sp_std::prelude::*; +use std::cell::RefCell; #[frame_support::pallet] mod pallet_test { @@ -125,10 +126,16 @@ fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig::default().build_storage().unwrap().into() } +thread_local! { + /// Tracks the used components per value. Needs to be a thread local since the + /// benchmarking clears the storage after each run. + static VALUES_PER_COMPONENT: RefCell> = RefCell::new(vec![]); +} + // NOTE: This attribute is only needed for the `modify_in_` functions. #[allow(unreachable_code)] mod benchmarks { - use super::{new_test_ext, pallet_test::Value, Test}; + use super::{new_test_ext, pallet_test::Value, Test, VALUES_PER_COMPONENT}; use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; use frame_support::{assert_err, assert_ok, ensure, traits::Get}; use frame_system::RawOrigin; @@ -247,6 +254,13 @@ mod benchmarks { Value::::set(Some(123)); return Err(BenchmarkError::Stop("Should error")); } + + // Stores all component values in the thread-local storage. + values_per_component { + let n in 0 .. 10; + }: { + VALUES_PER_COMPONENT.with(|v| v.borrow_mut().push(n)); + } } #[test] @@ -394,4 +408,41 @@ mod benchmarks { assert_eq!(Value::::get(), None); }); } + + /// Test that the benchmarking uses the correct values for each component and + /// that the number of components can be controlled with `VALUES_PER_COMPONENT`. + #[test] + fn test_values_per_component() { + let tests = vec![ + (Some("1"), Err("`VALUES_PER_COMPONENT` must be at least 2".into())), + (Some("asdf"), Err("Could not parse env var `VALUES_PER_COMPONENT` as u32.".into())), + (None, Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("2"), Ok(vec![0, 10])), + (Some("4"), Ok(vec![0, 3, 6, 10])), + (Some("6"), Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("10"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), + (Some("11"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + (Some("99"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + ]; + + for (num, expected) in tests { + run_test_values_per_component(num, expected); + } + } + + /// Helper for [`test_values_per_component`]. + fn run_test_values_per_component(num: Option<&str>, output: Result, BenchmarkError>) { + VALUES_PER_COMPONENT.with(|v| v.borrow_mut().clear()); + match num { + Some(n) => std::env::set_var("VALUES_PER_COMPONENT", n), + None => std::env::remove_var("VALUES_PER_COMPONENT"), + } + + new_test_ext().execute_with(|| { + let got = Pallet::::test_benchmark_values_per_component() + .map(|_| VALUES_PER_COMPONENT.with(|v| v.borrow().clone())); + + assert_eq!(got, output); + }); + } } From bf7729208a6c5c67d0ac2f91004ba180a169be6f Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 25 Aug 2022 01:47:08 +0800 Subject: [PATCH 058/348] Implement ResultQuery (#11257) * Implement ResultQuery * Fix test expectations * Add more tests * Fix test expectations * Clean up some names * Silence warnings * Specify error type when supplying error type to ResultQuery * cargo fmt * Add support for type parameters in parameter_types macro * Reduce deeply indented code * Fixes * Update test expectation * Rewrite and document formula for calculating max storage size * More docs * cargo fmt * formatting Co-authored-by: parity-processbot <> --- .../procedural/src/pallet/expand/storage.rs | 217 ++++++++++++++++-- .../procedural/src/pallet/parse/storage.rs | 110 ++++++++- frame/support/src/lib.rs | 81 ++++--- frame/support/src/storage/types/mod.rs | 32 ++- frame/support/test/tests/pallet.rs | 139 ++++++++++- frame/support/test/tests/pallet_instance.rs | 129 ++++++++++- .../storage_result_query_missing_generics.rs | 21 ++ ...orage_result_query_missing_generics.stderr | 15 ++ ...storage_result_query_multiple_type_args.rs | 23 ++ ...age_result_query_multiple_type_args.stderr | 5 + ...ge_result_query_no_defined_pallet_error.rs | 16 ++ ...esult_query_no_defined_pallet_error.stderr | 5 + ...age_result_query_parenthesized_generics.rs | 22 ++ ...result_query_parenthesized_generics.stderr | 5 + ...storage_result_query_wrong_generic_kind.rs | 22 ++ ...age_result_query_wrong_generic_kind.stderr | 5 + 16 files changed, 770 insertions(+), 77 deletions(-) create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs create mode 100644 frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr diff --git a/frame/support/procedural/src/pallet/expand/storage.rs b/frame/support/procedural/src/pallet/expand/storage.rs index 657968e17a80c..181f35b545496 100644 --- a/frame/support/procedural/src/pallet/expand/storage.rs +++ b/frame/support/procedural/src/pallet/expand/storage.rs @@ -19,7 +19,9 @@ use crate::pallet::{ parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, Def, }; -use std::collections::HashMap; +use quote::ToTokens; +use std::{collections::HashMap, ops::IndexMut}; +use syn::spanned::Spanned; /// Generate the prefix_ident related to the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. @@ -84,12 +86,28 @@ fn check_prefix_duplicates( Ok(()) } +pub struct ResultOnEmptyStructMetadata { + /// The Rust ident that is going to be used as the name of the OnEmpty struct. + pub name: syn::Ident, + /// The path to the error type being returned by the ResultQuery. + pub error_path: syn::Path, + /// The visibility of the OnEmpty struct. + pub visibility: syn::Visibility, + /// The type of the storage item. + pub value_ty: syn::Type, + /// The name of the pallet error enum variant that is going to be returned. + pub variant_name: syn::Ident, + /// The span used to report compilation errors about the OnEmpty struct. + pub span: proc_macro2::Span, +} + /// /// * if generics are unnamed: replace the first generic `_` by the generated prefix structure /// * if generics are named: reorder the generic, remove their name, and add the missing ones. /// * Add `#[allow(type_alias_bounds)]` -pub fn process_generics(def: &mut Def) -> syn::Result<()> { +pub fn process_generics(def: &mut Def) -> syn::Result> { let frame_support = &def.frame_support; + let mut on_empty_struct_metadata = Vec::new(); for storage_def in def.storages.iter_mut() { let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; @@ -120,27 +138,72 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { let default_query_kind: syn::Type = syn::parse_quote!(#frame_support::storage::types::OptionQuery); - let default_on_empty: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let mut default_on_empty = |value_ty: syn::Type| -> syn::Type { + if let Some(QueryKind::ResultQuery(error_path, variant_name)) = + storage_def.query_kind.as_ref() + { + let on_empty_ident = + quote::format_ident!("__Frame_Internal_Get{}Result", storage_def.ident); + on_empty_struct_metadata.push(ResultOnEmptyStructMetadata { + name: on_empty_ident.clone(), + visibility: storage_def.vis.clone(), + value_ty, + error_path: error_path.clone(), + variant_name: variant_name.clone(), + span: storage_def.attr_span, + }); + return syn::parse_quote!(#on_empty_ident) + } + syn::parse_quote!(#frame_support::traits::GetDefault) + }; let default_max_values: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let set_result_query_type_parameter = |query_type: &mut syn::Type| -> syn::Result<()> { + if let Some(QueryKind::ResultQuery(error_path, _)) = storage_def.query_kind.as_ref() { + if let syn::Type::Path(syn::TypePath { path: syn::Path { segments, .. }, .. }) = + query_type + { + if let Some(seg) = segments.last_mut() { + if let syn::PathArguments::AngleBracketed( + syn::AngleBracketedGenericArguments { args, .. }, + ) = &mut seg.arguments + { + args.clear(); + args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))); + } + } + } else { + let msg = format!( + "Invalid pallet::storage, unexpected type for query, expected ResultQuery \ + with 1 type parameter, found `{}`", + query_type.to_token_stream().to_string() + ); + return Err(syn::Error::new(query_type.span(), msg)) + } + } + Ok(()) + }; + if let Some(named_generics) = storage_def.named_generics.clone() { args.args.clear(); args.args.push(syn::parse_quote!( #prefix_ident<#type_use_gen> )); match named_generics { StorageGenerics::Value { value, query_kind, on_empty } => { - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); }, StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -155,10 +218,11 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -177,20 +241,22 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(key1)); args.args.push(syn::GenericArgument::Type(hasher2)); args.args.push(syn::GenericArgument::Type(key2)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); }, StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(keygen)); - args.args.push(syn::GenericArgument::Type(value)); - let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + args.args.push(syn::GenericArgument::Type(value.clone())); + let mut query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); + set_result_query_type_parameter(&mut query_kind)?; args.args.push(syn::GenericArgument::Type(query_kind)); - let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); + let on_empty = on_empty.unwrap_or_else(|| default_on_empty(value)); args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); @@ -198,10 +264,40 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { } } else { args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); + + let (value_idx, query_idx, on_empty_idx) = match storage_def.metadata { + Metadata::Value { .. } => (1, 2, 3), + Metadata::NMap { .. } => (2, 3, 4), + Metadata::Map { .. } | Metadata::CountedMap { .. } => (3, 4, 5), + Metadata::DoubleMap { .. } => (5, 6, 7), + }; + + if query_idx < args.args.len() { + if let syn::GenericArgument::Type(query_kind) = args.args.index_mut(query_idx) { + set_result_query_type_parameter(query_kind)?; + } + } else if let Some(QueryKind::ResultQuery(error_path, _)) = + storage_def.query_kind.as_ref() + { + args.args.push(syn::GenericArgument::Type(syn::parse_quote!(#error_path))) + } + + // Here, we only need to check if OnEmpty is *not* specified, and if so, then we have to + // generate a default OnEmpty struct for it. + if on_empty_idx >= args.args.len() && + matches!(storage_def.query_kind.as_ref(), Some(QueryKind::ResultQuery(_, _))) + { + let value_ty = match args.args[value_idx].clone() { + syn::GenericArgument::Type(ty) => ty, + _ => unreachable!(), + }; + let on_empty = default_on_empty(value_ty); + args.args.push(syn::GenericArgument::Type(on_empty)); + } } } - Ok(()) + Ok(on_empty_struct_metadata) } /// @@ -212,9 +308,10 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { /// * Add `#[allow(type_alias_bounds)]` on storages type alias /// * generate metadatas pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { - if let Err(e) = process_generics(def) { - return e.into_compile_error() - } + let on_empty_struct_metadata = match process_generics(def) { + Ok(idents) => idents, + Err(e) => return e.into_compile_error(), + }; // Check for duplicate prefixes let mut prefix_set = HashMap::new(); @@ -277,6 +374,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -296,6 +397,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -317,6 +422,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -338,6 +447,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -361,6 +474,10 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), + QueryKind::ResultQuery(error_path, _) => + quote::quote_spanned!(storage.attr_span => + Result<#value, #error_path> + ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => @@ -459,6 +576,61 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { ) }); + let on_empty_structs = on_empty_struct_metadata.into_iter().map(|metadata| { + use crate::pallet::parse::GenericKind; + use syn::{GenericArgument, Path, PathArguments, PathSegment, Type, TypePath}; + + let ResultOnEmptyStructMetadata { + name, + visibility, + value_ty, + error_path, + variant_name, + span, + } = metadata; + + let generic_kind = match error_path.segments.last() { + Some(PathSegment { arguments: PathArguments::AngleBracketed(args), .. }) => { + let (has_config, has_instance) = + args.args.iter().fold((false, false), |(has_config, has_instance), arg| { + match arg { + GenericArgument::Type(Type::Path(TypePath { + path: Path { segments, .. }, + .. + })) => { + let maybe_config = + segments.first().map_or(false, |seg| seg.ident == "T"); + let maybe_instance = + segments.first().map_or(false, |seg| seg.ident == "I"); + + (has_config || maybe_config, has_instance || maybe_instance) + }, + _ => (has_config, has_instance), + } + }); + GenericKind::from_gens(has_config, has_instance).unwrap_or(GenericKind::None) + }, + _ => GenericKind::None, + }; + let type_impl_gen = generic_kind.type_impl_gen(proc_macro2::Span::call_site()); + let config_where_clause = &def.config.where_clause; + + quote::quote_spanned!(span => + #[doc(hidden)] + #[allow(non_camel_case_types)] + #visibility struct #name; + + impl<#type_impl_gen> #frame_support::traits::Get> + for #name + #config_where_clause + { + fn get() -> Result<#value_ty, #error_path> { + Err(<#error_path>::#variant_name) + } + } + ) + }); + let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); let completed_where_clause = super::merge_where_clauses(&where_clauses); @@ -489,5 +661,6 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { #( #getters )* #( #prefix_structs )* + #( #on_empty_structs )* ) } diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 1f1bb5b2f26ad..97abdd3ad0977 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -29,6 +29,7 @@ mod keyword { syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); syn::custom_keyword!(OptionQuery); + syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); } @@ -129,6 +130,7 @@ pub enum Metadata { pub enum QueryKind { OptionQuery, + ResultQuery(syn::Path, syn::Ident), ValueQuery, } @@ -153,7 +155,7 @@ pub struct StorageDef { /// Optional expression that evaluates to a type that can be used as StoragePrefix instead of /// ident. pub rename_as: Option, - /// Whereas the querytype of the storage is OptionQuery or ValueQuery. + /// Whereas the querytype of the storage is OptionQuery, ResultQuery or ValueQuery. /// Note that this is best effort as it can't be determined when QueryKind is generic, and /// result can be false if user do some unexpected type alias. pub query_kind: Option, @@ -695,21 +697,105 @@ impl StorageDef { let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; let query_kind = query_kind - .map(|query_kind| match query_kind { - syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") => - Some(QueryKind::OptionQuery), - syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => - Some(QueryKind::ValueQuery), - _ => None, + .map(|query_kind| { + use syn::{ + AngleBracketedGenericArguments, GenericArgument, Path, PathArguments, Type, + TypePath, + }; + + let result_query = match query_kind { + Type::Path(path) + if path + .path + .segments + .last() + .map_or(false, |s| s.ident == "OptionQuery") => + return Ok(Some(QueryKind::OptionQuery)), + Type::Path(TypePath { path: Path { segments, .. }, .. }) + if segments.last().map_or(false, |s| s.ident == "ResultQuery") => + segments + .last() + .expect("segments is checked to have the last value; qed") + .clone(), + Type::Path(path) + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => + return Ok(Some(QueryKind::ValueQuery)), + _ => return Ok(None), + }; + + let error_type = match result_query.arguments { + PathArguments::AngleBracketed(AngleBracketedGenericArguments { + args, .. + }) => { + if args.len() != 1 { + let msg = format!( + "Invalid pallet::storage, unexpected number of generic arguments \ + for ResultQuery, expected 1 type argument, found {}", + args.len(), + ); + return Err(syn::Error::new(args.span(), msg)) + } + + args[0].clone() + }, + args => { + let msg = format!( + "Invalid pallet::storage, unexpected generic args for ResultQuery, \ + expected angle-bracketed arguments, found `{}`", + args.to_token_stream().to_string() + ); + return Err(syn::Error::new(args.span(), msg)) + }, + }; + + match error_type { + GenericArgument::Type(Type::Path(TypePath { + path: Path { segments: err_variant, leading_colon }, + .. + })) => { + if err_variant.len() < 2 { + let msg = format!( + "Invalid pallet::storage, unexpected number of path segments for \ + the generics in ResultQuery, expected a path with at least 2 \ + segments, found {}", + err_variant.len(), + ); + return Err(syn::Error::new(err_variant.span(), msg)) + } + let mut error = err_variant.clone(); + let err_variant = error + .pop() + .expect("Checked to have at least 2; qed") + .into_value() + .ident; + + // Necessary here to eliminate the last double colon + let last = + error.pop().expect("Checked to have at least 2; qed").into_value(); + error.push_value(last); + + Ok(Some(QueryKind::ResultQuery( + syn::Path { leading_colon: leading_colon.clone(), segments: error }, + err_variant, + ))) + }, + gen_arg => { + let msg = format!( + "Invalid pallet::storage, unexpected generic argument kind, expected a \ + type path to a `PalletError` enum variant, found `{}`", + gen_arg.to_token_stream().to_string(), + ); + Err(syn::Error::new(gen_arg.span(), msg)) + }, + } }) - .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. + .transpose()? + .unwrap_or(Some(QueryKind::OptionQuery)); if let (None, Some(getter)) = (query_kind.as_ref(), getter.as_ref()) { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ - identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ - identifiable."; + identifiable. QueryKind must be `OptionQuery`, `ResultQuery`, `ValueQuery`, or default \ + one to be identifiable."; return Err(syn::Error::new(getter.span(), msg)) } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 7e4c944330fe3..72551980bc7e9 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -281,79 +281,85 @@ pub use frame_support_procedural::storage_alias; macro_rules! parameter_types { ( $( #[ $attr:meta ] )* - $vis:vis const $name:ident: $type:ty = $value:expr; + $vis:vis const $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL_CONST $name , $type , $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL_CONST $name , $type , $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis $name:ident: $type:ty = $value:expr; + $vis:vis $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL $name, $type, $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); ( $( #[ $attr:meta ] )* - $vis:vis storage $name:ident: $type:ty = $value:expr; + $vis:vis storage $name:ident $(< $($ty_params:ident),* >)?: $type:ty = $value:expr; $( $rest:tt )* ) => ( $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(IMPL_STORAGE $name, $type, $value); + $vis struct $name $( + < $($ty_params),* >( $($crate::sp_std::marker::PhantomData<$ty_params>),* ) + )?; + $crate::parameter_types!(IMPL_STORAGE $name, $type, $value $( $(, $ty_params)* )?); $crate::parameter_types!( $( $rest )* ); ); () => (); - (IMPL_CONST $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL_CONST $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the value of this parameter type. pub const fn get() -> $type { $value } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the value of this parameter type. pub fn get() -> $type { $value } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type>, $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() } } }; - (IMPL_STORAGE $name:ident, $type:ty, $value:expr) => { - impl $name { + (IMPL_STORAGE $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the key for this parameter type. #[allow(unused)] pub fn key() -> [u8; 16] { @@ -379,13 +385,13 @@ macro_rules! parameter_types { } } - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) + impl<_I: From<$type> $(, $ty_params)*> $crate::traits::Get<_I> for $name< $($ty_params),* > { + fn get() -> _I { + _I::from(Self::get()) } } - impl $crate::traits::TypedGet for $name { + impl< $($ty_params),* > $crate::traits::TypedGet for $name< $($ty_params),* > { type Type = $type; fn get() -> $type { Self::get() @@ -1360,8 +1366,8 @@ pub mod pallet_prelude { storage::{ bounded_vec::BoundedVec, types::{ - CountedStorageMap, Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, - StorageNMap, StorageValue, ValueQuery, + CountedStorageMap, Key as NMapKey, OptionQuery, ResultQuery, StorageDoubleMap, + StorageMap, StorageNMap, StorageValue, ValueQuery, }, }, traits::{ @@ -1835,6 +1841,23 @@ pub mod pallet_prelude { /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// +/// Any type placed as the `QueryKind` parameter must implement +/// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this +/// trait by default: +/// 1. [`frame_support::storage::types::OptionQuery`], the default `QueryKind` used when this +/// type parameter is omitted. Specifying this as the `QueryKind` would cause storage map +/// APIs that return a `QueryKind` to instead return an `Option`, returning `Some` when a +/// value does exist under a specified storage key, and `None` otherwise. +/// 2. [`frame_support::storage::types::ValueQuery`] causes storage map APIs that return a +/// `QueryKind` to instead return the value type. In cases where a value does not exist +/// under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is used +/// to return an appropriate value. +/// 3. [`frame_support::storage::types::ResultQuery`] causes storage map APIs that return a +/// `QueryKind` to instead return a `Result`, with `T` being the value type and `E` +/// being the pallet error type specified by the `#[pallet::error]` attribute. In cases +/// where a value does not exist under a specified storage key, an `Err` with the specified +/// pallet error variant is returned. +/// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some /// type alias then the generation of the getter might fail. In this case the getter can be /// implemented manually. diff --git a/frame/support/src/storage/types/mod.rs b/frame/support/src/storage/types/mod.rs index 0706e9fb377e2..f87da5de12274 100644 --- a/frame/support/src/storage/types/mod.rs +++ b/frame/support/src/storage/types/mod.rs @@ -42,9 +42,11 @@ pub use value::StorageValue; /// Trait implementing how the storage optional value is converted into the queried type. /// /// It is implemented by: -/// * `OptionQuery` which convert an optional value to an optional value, user when querying storage -/// will get an optional value. -/// * `ValueQuery` which convert an optional value to a value, user when querying storage will get a +/// * `OptionQuery` which converts an optional value to an optional value, used when querying +/// storage returns an optional value. +/// * `ResultQuery` which converts an optional value to a result value, used when querying storage +/// returns a result value. +/// * `ValueQuery` which converts an optional value to a value, used when querying storage returns a /// value. pub trait QueryKindTrait { /// Metadata for the storage kind. @@ -85,6 +87,30 @@ where } } +/// Implement QueryKindTrait with query being `Result` +pub struct ResultQuery(sp_std::marker::PhantomData); +impl QueryKindTrait for ResultQuery +where + Value: FullCodec + 'static, + Error: FullCodec + 'static, + OnEmpty: crate::traits::Get>, +{ + const METADATA: StorageEntryModifier = StorageEntryModifier::Optional; + + type Query = Result; + + fn from_optional_value_to_query(v: Option) -> Self::Query { + match v { + Some(v) => Ok(v), + None => OnEmpty::get(), + } + } + + fn from_query_to_optional_value(v: Self::Query) -> Option { + v.ok() + } +} + /// Implement QueryKindTrait with query being `Value` pub struct ValueQuery; impl QueryKindTrait for ValueQuery diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index c4bbc59c70373..e96e52d3d2c6e 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -229,6 +229,7 @@ pub mod pallet { pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, + NonExistentStorageValue, Code(u8), #[codec(skip)] Skipped(u128), @@ -282,6 +283,10 @@ pub mod pallet { pub type Map2 = StorageMap>; + #[pallet::storage] + pub type Map3 = + StorageMap<_, Blake2_128Concat, u32, u64, ResultQuery::NonExistentStorageValue>>; + #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -295,6 +300,17 @@ pub mod pallet { MaxValues = ConstU32<5>, >; + #[pallet::storage] + pub type DoubleMap3 = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -307,6 +323,15 @@ pub mod pallet { MaxValues = ConstU32<11>, >; + #[pallet::storage] + #[pallet::getter(fn nmap3)] + pub type NMap3 = StorageNMap< + _, + (NMapKey, NMapKey), + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn conditional_value)] #[cfg(feature = "conditional-storage")] @@ -934,6 +959,16 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::Map3::::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::Map3::::get(2), + Err(pallet::Error::::NonExistentStorageValue), + ); + pallet::DoubleMap::::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -948,6 +983,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::DoubleMap3::::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::DoubleMap3::::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + pallet::NMap::::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -961,6 +1007,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + pallet::NMap3::::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + pallet::NMap3::::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); + #[cfg(feature = "conditional-storage")] { pallet::ConditionalValue::::put(1); @@ -1171,6 +1228,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "Map3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::(), + value: meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -1199,6 +1267,20 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "DoubleMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: meta_type::(), + key: meta_type::<(u32, u64)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -1224,6 +1306,20 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "NMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: meta_type::<(u8, u16)>(), + hashers: vec![ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ], + value: meta_type::(), + }, + default: vec![1, 1], + docs: vec![], + }, #[cfg(feature = "conditional-storage")] StorageEntryMetadata { name: "ConditionalValue", @@ -1436,6 +1532,8 @@ fn test_storage_info() { traits::{StorageInfo, StorageInfoTrait}, }; + // Storage max size is calculated by adding up all the hasher size, the key type size and the + // value type size assert_eq!( Example::storage_info(), vec![ @@ -1465,42 +1563,63 @@ fn test_storage_info() { storage_name: b"Map".to_vec(), prefix: prefix(b"Example", b"Map").to_vec(), max_values: None, - max_size: Some(3 + 16), + max_size: Some(16 + 1 + 2), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"Map2".to_vec(), prefix: prefix(b"Example", b"Map2").to_vec(), max_values: Some(3), - max_size: Some(6 + 8), + max_size: Some(8 + 2 + 4), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Map3".to_vec(), + prefix: prefix(b"Example", b"Map3").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 8), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap".to_vec(), prefix: prefix(b"Example", b"DoubleMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"DoubleMap2".to_vec(), prefix: prefix(b"Example", b"DoubleMap2").to_vec(), max_values: Some(5), - max_size: Some(14 + 8 + 16), + max_size: Some(8 + 2 + 16 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"DoubleMap3".to_vec(), + prefix: prefix(b"Example", b"DoubleMap3").to_vec(), + max_values: None, + max_size: Some(16 + 4 + 8 + 8 + 16), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap".to_vec(), prefix: prefix(b"Example", b"NMap").to_vec(), max_values: None, - max_size: Some(5 + 16), + max_size: Some(16 + 1 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), storage_name: b"NMap2".to_vec(), prefix: prefix(b"Example", b"NMap2").to_vec(), max_values: Some(11), - max_size: Some(14 + 8 + 16), + max_size: Some(8 + 2 + 16 + 4 + 8), + }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"NMap3".to_vec(), + prefix: prefix(b"Example", b"NMap3").to_vec(), + max_values: None, + max_size: Some(16 + 1 + 8 + 2 + 16), }, #[cfg(feature = "conditional-storage")] { @@ -1519,7 +1638,7 @@ fn test_storage_info() { storage_name: b"ConditionalMap".to_vec(), prefix: prefix(b"Example", b"ConditionalMap").to_vec(), max_values: Some(12), - max_size: Some(6 + 8), + max_size: Some(8 + 2 + 4), } }, #[cfg(feature = "conditional-storage")] @@ -1529,7 +1648,7 @@ fn test_storage_info() { storage_name: b"ConditionalDoubleMap".to_vec(), prefix: prefix(b"Example", b"ConditionalDoubleMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), } }, #[cfg(feature = "conditional-storage")] @@ -1539,7 +1658,7 @@ fn test_storage_info() { storage_name: b"ConditionalNMap".to_vec(), prefix: prefix(b"Example", b"ConditionalNMap").to_vec(), max_values: None, - max_size: Some(7 + 16 + 8), + max_size: Some(16 + 1 + 8 + 2 + 4), } }, StorageInfo { @@ -1547,7 +1666,7 @@ fn test_storage_info() { storage_name: b"RenamedCountedMap".to_vec(), prefix: prefix(b"Example", b"RenamedCountedMap").to_vec(), max_values: None, - max_size: Some(1 + 4 + 8), + max_size: Some(8 + 1 + 4), }, StorageInfo { pallet_name: b"Example".to_vec(), diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 360a73e5ea2a3..f4ef5f802c44f 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -31,7 +31,7 @@ use sp_runtime::{DispatchError, ModuleError}; #[frame_support::pallet] pub mod pallet { use codec::MaxEncodedLen; - use frame_support::{pallet_prelude::*, scale_info}; + use frame_support::{pallet_prelude::*, parameter_types, scale_info}; use frame_system::pallet_prelude::*; use sp_std::any::TypeId; @@ -104,9 +104,11 @@ pub mod pallet { } #[pallet::error] + #[derive(PartialEq, Eq)] pub enum Error { /// doc comment put into metadata InsufficientProposersBalance, + NonExistentStorageValue, } #[pallet::event] @@ -128,6 +130,20 @@ pub mod pallet { #[pallet::storage] pub type Map2 = StorageMap<_, Twox64Concat, u16, u32>; + parameter_types! { + pub const Map3Default: Result> = Ok(1337); + } + + #[pallet::storage] + pub type Map3 = StorageMap< + _, + Blake2_128Concat, + u32, + u64, + ResultQuery::NonExistentStorageValue>, + Map3Default, + >; + #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; @@ -136,6 +152,17 @@ pub mod pallet { pub type DoubleMap2 = StorageDoubleMap<_, Twox64Concat, u16, Blake2_128Concat, u32, u64>; + #[pallet::storage] + pub type DoubleMap3 = StorageDoubleMap< + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::storage] #[pallet::getter(fn nmap)] pub type NMap = StorageNMap<_, storage::Key, u32>; @@ -145,6 +172,15 @@ pub mod pallet { pub type NMap2 = StorageNMap<_, (storage::Key, storage::Key), u64>; + #[pallet::storage] + #[pallet::getter(fn nmap3)] + pub type NMap3 = StorageNMap< + _, + (NMapKey, NMapKey), + u128, + ResultQuery::NonExistentStorageValue>, + >; + #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -436,6 +472,13 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(1, 2); + let mut k = [twox_128(b"Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(>::get(2), Ok(1337)); + >::insert(&1, &2, &3); let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -450,6 +493,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + >::insert((&1,), &3); let mut k = [twox_128(b"Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -462,6 +516,17 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); }); TestExternalities::default().execute_with(|| { @@ -481,6 +546,13 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(1, 2); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"Map3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u64)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!(>::get(2), Ok(1337)); + >::insert(&1, &2, &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -495,6 +567,17 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"DoubleMap3")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u64.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get(2, 3), + Err(pallet::Error::::NonExistentStorageValue), + ); + >::insert((&1,), &3); let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap")].concat(); k.extend(1u8.using_encoded(blake2_128_concat)); @@ -507,6 +590,17 @@ fn storage_expand() { k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u64)); assert_eq!(&k[..32], &>::final_prefix()); + + >::insert((&1, &2), &3); + let mut k = [twox_128(b"Instance1Example"), twox_128(b"NMap3")].concat(); + k.extend(1u8.using_encoded(blake2_128_concat)); + k.extend(2u16.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u128)); + assert_eq!(&k[..32], &>::final_prefix()); + assert_eq!( + >::get((2, 3)), + Err(pallet::Error::::NonExistentStorageValue), + ); }); } @@ -688,6 +782,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "Map3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::(), + value: scale_info::meta_type::(), + hashers: vec![StorageHasher::Blake2_128Concat], + }, + default: vec![0, 57, 5, 0, 0, 0, 0, 0, 0], + docs: vec![], + }, StorageEntryMetadata { name: "DoubleMap", modifier: StorageEntryModifier::Optional, @@ -710,6 +815,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "DoubleMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + value: scale_info::meta_type::(), + key: scale_info::meta_type::<(u32, u64)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + }, + default: vec![1, 1], + docs: vec![], + }, StorageEntryMetadata { name: "NMap", modifier: StorageEntryModifier::Optional, @@ -732,6 +848,17 @@ fn metadata() { default: vec![0], docs: vec![], }, + StorageEntryMetadata { + name: "NMap3", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + key: scale_info::meta_type::<(u8, u16)>(), + hashers: vec![StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat], + value: scale_info::meta_type::(), + }, + default: vec![1, 1], + docs: vec![], + }, ], }), calls: Some(scale_info::meta_type::>().into()), diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs new file mode 100644 index 0000000000000..a051cc087db58 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.rs @@ -0,0 +1,21 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr new file mode 100644 index 0000000000000..98265462bbdfb --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_missing_generics.stderr @@ -0,0 +1,15 @@ +error[E0107]: missing generics for enum `pallet::Error` + --> tests/pallet_ui/storage_result_query_missing_generics.rs:17:56 + | +17 | type Foo = StorageValue<_, u8, ResultQuery>; + | ^^^^^ expected 1 generic argument + | +note: enum defined here, with 1 generic parameter: `T` + --> tests/pallet_ui/storage_result_query_missing_generics.rs:12:11 + | +12 | pub enum Error { + | ^^^^^ - +help: add missing generic argument + | +17 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue>>; + | ~~~~~~~~ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs new file mode 100644 index 0000000000000..9e0da4b62128d --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.rs @@ -0,0 +1,23 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + SomeOtherError, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr new file mode 100644 index 0000000000000..4be2a36eb89e1 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_multiple_type_args.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected number of generic arguments for ResultQuery, expected 1 type argument, found 2 + --> tests/pallet_ui/storage_result_query_multiple_type_args.rs:19:56 + | +19 | type Foo = StorageValue<_, u8, ResultQuery::NonExistentValue, SomeOtherError>>; + | ^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs new file mode 100644 index 0000000000000..102a2261f8333 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs @@ -0,0 +1,16 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr new file mode 100644 index 0000000000000..77a7972a5b5cf --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_no_defined_pallet_error.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected number of path segments for the generics in ResultQuery, expected a path with at least 2 segments, found 1 + --> tests/pallet_ui/storage_result_query_no_defined_pallet_error.rs:12:56 + | +12 | type Foo = StorageValue<_, u8, ResultQuery>; + | ^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs new file mode 100644 index 0000000000000..f30dc3b6a3cc7 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr new file mode 100644 index 0000000000000..caffd846f272a --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_parenthesized_generics.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected generic args for ResultQuery, expected angle-bracketed arguments, found `(NonExistentValue)` + --> tests/pallet_ui/storage_result_query_parenthesized_generics.rs:18:55 + | +18 | type Foo = StorageValue<_, u8, ResultQuery(NonExistentValue)>; + | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs new file mode 100644 index 0000000000000..a5065398b3970 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.rs @@ -0,0 +1,22 @@ +#[frame_support::pallet] +mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::BlockNumberFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::error] + pub enum Error { + NonExistentValue, + } + + #[pallet::storage] + type Foo = StorageValue<_, u8, ResultQuery<'static>>; +} + +fn main() { +} diff --git a/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr new file mode 100644 index 0000000000000..9f333ae28e6aa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/storage_result_query_wrong_generic_kind.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::storage, unexpected generic argument kind, expected a type path to a `PalletError` enum variant, found `'static` + --> tests/pallet_ui/storage_result_query_wrong_generic_kind.rs:18:56 + | +18 | type Foo = StorageValue<_, u8, ResultQuery<'static>>; + | ^^^^^^^ From 1c2cdfc82be3970cf4019fd46932454d0cc4cf4a Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 25 Aug 2022 16:56:18 +0800 Subject: [PATCH 059/348] Allow construct_runtime to take cfg attributes for pallets (#11818) * Allow construct_runtime to take cfg attributes for pallets * cargo fmt * Remove commented out code * Fixes * cargo fmt * Remove inaccurate comments * Fix typo * Properly reverse pallets * Fixes * cargo fmt * Add missing newlines --- Cargo.lock | 11 + frame/support/procedural/Cargo.toml | 2 + .../src/construct_runtime/expand/call.rs | 32 ++- .../src/construct_runtime/expand/config.rs | 21 +- .../src/construct_runtime/expand/event.rs | 43 +++- .../src/construct_runtime/expand/inherent.rs | 55 +++-- .../src/construct_runtime/expand/metadata.rs | 10 + .../src/construct_runtime/expand/origin.rs | 45 +++- .../src/construct_runtime/expand/unsigned.rs | 21 +- .../procedural/src/construct_runtime/mod.rs | 217 ++++++++++++++---- .../procedural/src/construct_runtime/parse.rs | 31 ++- .../feature_gated_system_pallet.rs | 14 ++ .../feature_gated_system_pallet.stderr | 5 + .../invalid_meta_literal.rs | 15 ++ .../invalid_meta_literal.stderr | 6 + .../unsupported_meta_structure.rs | 15 ++ .../unsupported_meta_structure.stderr | 6 + .../unsupported_pallet_attr.rs | 15 ++ .../unsupported_pallet_attr.stderr | 5 + frame/support/test/tests/pallet.rs | 2 + 20 files changed, 483 insertions(+), 88 deletions(-) create mode 100644 frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr create mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs create mode 100644 frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr diff --git a/Cargo.lock b/Cargo.lock index c99212ba06ba8..ed0d5777d94fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -852,6 +852,15 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aacacf4d96c24b2ad6eb8ee6df040e4f27b0d0b39a5710c30091baa830485db" +dependencies = [ + "smallvec", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -2284,7 +2293,9 @@ name = "frame-support-procedural" version = "4.0.0-dev" dependencies = [ "Inflector", + "cfg-expr", "frame-support-procedural-tools", + "itertools", "proc-macro2", "quote", "syn", diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 7ddec39cad9fb..06b8056aff982 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -16,6 +16,8 @@ proc-macro = true [dependencies] Inflector = "0.11.4" +cfg-expr = "0.10.3" +itertools = "0.10.3" proc-macro2 = "1.0.37" quote = "1.0.10" syn = { version = "1.0.98", features = ["full"] } diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 801b69035121d..028e68f0198d1 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_dispatch( @@ -30,6 +31,7 @@ pub fn expand_outer_dispatch( let mut variant_patterns = Vec::new(); let mut query_call_part_macros = Vec::new(); let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let system_path = &system_pallet.path; let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); @@ -38,12 +40,24 @@ pub fn expand_outer_dispatch( let name = &pallet_declaration.name; let path = &pallet_declaration.path; let index = pallet_declaration.index; + let attr = + pallet_declaration.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); - variant_defs.extend( - quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),), - ); + variant_defs.extend(quote! { + #attr + #[codec(index = #index)] + #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ), + }); variant_patterns.push(quote!(Call::#name(call))); pallet_names.push(name); + pallet_attrs.push(attr); query_call_part_macros.push(quote! { #path::__substrate_call_check::is_call_part_defined!(#name); }); @@ -69,6 +83,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::Callable; use core::mem::size_of; &[#( + #pallet_attrs ( stringify!(#pallet_names), size_of::< <#pallet_names as Callable<#runtime>>::Call >(), @@ -101,7 +116,10 @@ pub fn expand_outer_dispatch( impl #scrate::dispatch::GetDispatchInfo for Call { fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { match self { - #( #variant_patterns => call.get_dispatch_info(), )* + #( + #pallet_attrs + #variant_patterns => call.get_dispatch_info(), + )* } } } @@ -110,6 +128,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::GetCallName; match self { #( + #pallet_attrs #variant_patterns => { let function_name = call.get_call_name(); let pallet_name = stringify!(#pallet_names); @@ -121,6 +140,7 @@ pub fn expand_outer_dispatch( fn get_module_names() -> &'static [&'static str] { &[#( + #pallet_attrs stringify!(#pallet_names), )*] } @@ -129,6 +149,7 @@ pub fn expand_outer_dispatch( use #scrate::dispatch::{Callable, GetCallName}; match module { #( + #pallet_attrs stringify!(#pallet_names) => <<#pallet_names as Callable<#runtime>>::Call as GetCallName>::get_call_names(), @@ -157,6 +178,7 @@ pub fn expand_outer_dispatch( fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { match self { #( + #pallet_attrs #variant_patterns => #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(call, origin), )* @@ -165,6 +187,7 @@ pub fn expand_outer_dispatch( } #( + #pallet_attrs impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { #[allow(unreachable_patterns)] fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { @@ -176,6 +199,7 @@ pub fn expand_outer_dispatch( } } + #pallet_attrs impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { #variant_patterns diff --git a/frame/support/procedural/src/construct_runtime/expand/config.rs b/frame/support/procedural/src/construct_runtime/expand/config.rs index a3d70f18529c7..9b731a5825a3c 100644 --- a/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -19,6 +19,7 @@ use crate::construct_runtime::Pallet; use inflector::Inflector; use proc_macro2::TokenStream; use quote::{format_ident, quote, ToTokens}; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_config( @@ -40,11 +41,19 @@ pub fn expand_outer_config( let field_name = &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); + let attr = &decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); - types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); - fields.extend(quote!(pub #field_name: #config,)); + types.extend(expand_config_types(attr, runtime, decl, &config, part_is_generic)); + fields.extend(quote!(#attr pub #field_name: #config,)); build_storage_calls - .extend(expand_config_build_storage_call(scrate, runtime, decl, field_name)); + .extend(expand_config_build_storage_call(scrate, attr, runtime, decl, field_name)); query_genesis_config_part_macros.push(quote! { #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); #[cfg(feature = "std")] @@ -88,6 +97,7 @@ pub fn expand_outer_config( } fn expand_config_types( + attr: &TokenStream, runtime: &Ident, decl: &Pallet, config: &Ident, @@ -97,14 +107,17 @@ fn expand_config_types( match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; }, (None, true) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime>; }, (_, false) => quote! { + #attr #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig; }, @@ -113,6 +126,7 @@ fn expand_config_types( fn expand_config_build_storage_call( scrate: &TokenStream, + attr: &TokenStream, runtime: &Ident, decl: &Pallet, field_name: &Ident, @@ -125,6 +139,7 @@ fn expand_config_build_storage_call( }; quote! { + #attr #scrate::sp_runtime::BuildModuleGenesisStorage:: <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; } diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index b242f9641562c..f145327d37af5 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_event( @@ -97,19 +98,35 @@ fn expand_event_variant( let path = &pallet.path; let variant_name = &pallet.name; let part_is_generic = !generics.params.is_empty(); + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); match instance { - Some(inst) if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) + Some(inst) if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#runtime, #path::#inst>), }, - Some(inst) => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) + Some(inst) => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#path::#inst>), }, - None if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) + None if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event<#runtime>), }, - None => { - quote!(#[codec(index = #index)] #variant_name(#path::Event),) + None => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Event), }, } } @@ -120,13 +137,23 @@ fn expand_event_conversion( pallet_event: &TokenStream, ) -> TokenStream { let variant_name = &pallet.name; + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr impl From<#pallet_event> for Event { fn from(x: #pallet_event) -> Self { Event::#variant_name(x) } } + #attr impl TryInto<#pallet_event> for Event { type Error = (); diff --git a/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 0f0d538643240..599b34ba87241 100644 --- a/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_outer_inherent( @@ -28,14 +29,24 @@ pub fn expand_outer_inherent( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let mut query_inherent_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("Inherent") { let name = &pallet_decl.name; let path = &pallet_decl.path; + let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); pallet_names.push(name); + pallet_attrs.push(attr); query_inherent_part_macros.push(quote! { #path::__substrate_inherent_check::is_inherent_part_defined!(#name); }); @@ -60,6 +71,7 @@ pub fn expand_outer_inherent( let mut inherents = Vec::new(); #( + #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { let inherent = <#unchecked_extrinsic as #scrate::inherent::Extrinsic>::new( inherent.into(), @@ -90,22 +102,25 @@ pub fn expand_outer_inherent( let mut is_inherent = false; - #({ - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(call) { - is_inherent = true; - if let Err(e) = #pallet_names::check_inherent(call, self) { - result.put_error( - #pallet_names::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result; + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(call) { + is_inherent = true; + if let Err(e) = #pallet_names::check_inherent(call, self) { + result.put_error( + #pallet_names::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result; + } } } } } - })* + )* // Inherents are before any other extrinsics. // No module marked it as inherent thus it is not. @@ -115,6 +130,7 @@ pub fn expand_outer_inherent( } #( + #pallet_attrs match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { @@ -177,14 +193,17 @@ pub fn expand_outer_inherent( false } else { let mut is_inherent = false; - #({ - let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); - if let Some(call) = IsSubType::<_>::is_sub_type(call) { - if #pallet_names::is_inherent(&call) { - is_inherent = true; + #( + #pallet_attrs + { + let call = <#unchecked_extrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if #pallet_names::is_inherent(&call) { + is_inherent = true; + } } } - })* + )* is_inherent }; diff --git a/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 6e2dd5fc002c6..ec90a0d30f98b 100644 --- a/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Ident, TypePath}; pub fn expand_runtime_metadata( @@ -47,8 +48,17 @@ pub fn expand_runtime_metadata( let event = expand_pallet_metadata_events(&filtered_names, runtime, scrate, decl); let constants = expand_pallet_metadata_constants(runtime, decl); let errors = expand_pallet_metadata_errors(runtime, decl); + let attr = decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr #scrate::metadata::PalletMetadata { name: stringify!(#name), index: #index, diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index 46f08832f0bb4..a4bb0d9cbaa02 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::{Generics, Ident}; pub fn expand_outer_origin( @@ -303,19 +304,35 @@ fn expand_origin_caller_variant( let part_is_generic = !generics.params.is_empty(); let variant_name = &pallet.name; let path = &pallet.path; + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); match instance { - Some(inst) if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) + Some(inst) if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#runtime, #path::#inst>), }, - Some(inst) => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) + Some(inst) => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#path::#inst>), }, - None if part_is_generic => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) + None if part_is_generic => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin<#runtime>), }, - None => { - quote!(#[codec(index = #index)] #variant_name(#path::Origin),) + None => quote! { + #attr + #[codec(index = #index)] + #variant_name(#path::Origin), }, } } @@ -339,14 +356,24 @@ fn expand_origin_pallet_conversions( }; let doc_string = get_intra_doc_string(" Convert to runtime origin using", &path.module_name()); + let attr = pallet.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); quote! { + #attr impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { OriginCaller::#variant_name(x) } } + #attr impl From<#pallet_origin> for Origin { #[doc = #doc_string] fn from(x: #pallet_origin) -> Self { @@ -355,6 +382,7 @@ fn expand_origin_pallet_conversions( } } + #attr impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { /// NOTE: converting to pallet origin loses the origin filter information. fn from(val: Origin) -> Self { @@ -366,6 +394,7 @@ fn expand_origin_pallet_conversions( } } + #attr impl TryFrom for #pallet_origin { type Error = OriginCaller; fn try_from( diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs index c030676802093..310516793472f 100644 --- a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -18,6 +18,7 @@ use crate::construct_runtime::Pallet; use proc_macro2::TokenStream; use quote::quote; +use std::str::FromStr; use syn::Ident; pub fn expand_outer_validate_unsigned( @@ -26,14 +27,24 @@ pub fn expand_outer_validate_unsigned( scrate: &TokenStream, ) -> TokenStream { let mut pallet_names = Vec::new(); + let mut pallet_attrs = Vec::new(); let mut query_validate_unsigned_part_macros = Vec::new(); for pallet_decl in pallet_decls { if pallet_decl.exists_part("ValidateUnsigned") { let name = &pallet_decl.name; let path = &pallet_decl.path; + let attr = pallet_decl.cfg_pattern.iter().fold(TokenStream::new(), |acc, pattern| { + let attr = TokenStream::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }); pallet_names.push(name); + pallet_attrs.push(attr); query_validate_unsigned_part_macros.push(quote! { #path::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined!(#name); }); @@ -49,7 +60,10 @@ pub fn expand_outer_validate_unsigned( fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { #[allow(unreachable_patterns)] match call { - #( Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* + #( + #pallet_attrs + Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), + )* // pre-dispatch should not stop inherent extrinsics, validation should prevent // including arbitrary (non-inherent) extrinsics to blocks. _ => Ok(()), @@ -63,7 +77,10 @@ pub fn expand_outer_validate_unsigned( ) -> #scrate::unsigned::TransactionValidity { #[allow(unreachable_patterns)] match call { - #( Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* + #( + #pallet_attrs + Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), + )* _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), } } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 042af31be6bf4..cfd582d0e4472 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -144,9 +144,11 @@ mod expand; mod parse; +use cfg_expr::Predicate; use frame_support_procedural_tools::{ generate_crate_access, generate_crate_access_2018, generate_hidden_includes, }; +use itertools::Itertools; use parse::{ ExplicitRuntimeDeclaration, ImplicitRuntimeDeclaration, Pallet, RuntimeDeclaration, WhereSection, @@ -154,6 +156,10 @@ use parse::{ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; +use std::{ + collections::{HashMap, HashSet}, + str::FromStr, +}; use syn::{Ident, Result}; /// The fixed name of the system pallet. @@ -223,6 +229,28 @@ fn construct_runtime_final_expansion( Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", ) })?; + if !system_pallet.cfg_pattern.is_empty() { + return Err(syn::Error::new( + system_pallet.name.span(), + "`System` pallet declaration is feature gated, please remove any `#[cfg]` attributes", + )) + } + + let features = pallets + .iter() + .filter_map(|decl| { + (!decl.cfg_pattern.is_empty()).then(|| { + decl.cfg_pattern.iter().flat_map(|attr| { + attr.predicates().filter_map(|pred| match pred { + Predicate::Feature(feat) => Some(feat), + Predicate::Test => Some("test"), + _ => None, + }) + }) + }) + }) + .flatten() + .collect::>(); let hidden_crate_name = "construct_runtime"; let scrate = generate_crate_access(hidden_crate_name, "frame-support"); @@ -231,7 +259,7 @@ fn construct_runtime_final_expansion( let outer_event = expand::expand_outer_event(&name, &pallets, &scrate)?; let outer_origin = expand::expand_outer_origin(&name, system_pallet, &pallets, &scrate)?; - let all_pallets = decl_all_pallets(&name, pallets.iter()); + let all_pallets = decl_all_pallets(&name, pallets.iter(), &features); let pallet_to_index = decl_pallet_runtime_setup(&name, &pallets, &scrate); let dispatch = expand::expand_outer_dispatch(&name, system_pallet, &pallets, &scrate); @@ -293,40 +321,140 @@ fn construct_runtime_final_expansion( fn decl_all_pallets<'a>( runtime: &'a Ident, pallet_declarations: impl Iterator, + features: &HashSet<&str>, ) -> TokenStream2 { let mut types = TokenStream2::new(); - let mut names = Vec::new(); + let mut names_by_feature = features + .iter() + .powerset() + .map(|feat| (feat, Vec::new())) + .collect::>(); for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); + let mut attrs = Vec::new(); + for cfg in &pallet_declaration.cfg_pattern { + let feat = format!("#[cfg({})]\n", cfg.original()); + attrs.extend(TokenStream2::from_str(&feat).expect("was parsed successfully; qed")); + } let type_decl = quote!( + #(#attrs)* pub type #type_name = #pallet::Pallet <#(#generics),*>; ); types.extend(type_decl); - names.push(&pallet_declaration.name); + + if pallet_declaration.cfg_pattern.is_empty() { + for names in names_by_feature.values_mut() { + names.push(&pallet_declaration.name); + } + } else { + for (feature_set, names) in &mut names_by_feature { + // Rust tidbit: if we have multiple `#[cfg]` feature on the same item, then the + // predicates listed in all `#[cfg]` attributes are effectively joined by `and()`, + // meaning that all of them must match in order to activate the item + let is_feature_active = pallet_declaration.cfg_pattern.iter().all(|expr| { + expr.eval(|pred| match pred { + Predicate::Feature(f) => feature_set.contains(&f), + Predicate::Test => feature_set.contains(&&"test"), + _ => false, + }) + }); + if is_feature_active { + names.push(&pallet_declaration.name); + } + } + } } - let system_pallet = match names.iter().find(|n| **n == SYSTEM_PALLET_NAME) { - Some(name) => name, - None => - return syn::Error::new( - proc_macro2::Span::call_site(), - "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", - ) - .into_compile_error(), - }; + let all_pallets_without_system = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types. + /// Excludes the System pallet. + pub type AllPalletsWithoutSystem = ( #(#names,)* ); + } + }); - let names_without_system = - names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).collect::>(); - let names_reversed = names.clone().into_iter().rev().collect::>(); - let names_without_system_reverse = - names_without_system.clone().into_iter().rev().collect::>(); - let names_reversed_with_system_first = std::iter::once(system_pallet) - .chain(names_without_system_reverse.clone().into_iter()) - .collect::>(); + let all_pallets_with_system = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types. + pub type AllPalletsWithSystem = ( #(#names,)* ); + } + }); + + let all_pallets_without_system_reversed = + names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// Excludes the System pallet. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); + } + }); + + let all_pallets_with_system_reversed = names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = names.iter().rev(); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithSystemReversed = ( #(#names,)* ); + } + }); + + let system_pallet = + match names_by_feature[&Vec::new()].iter().find(|n| **n == SYSTEM_PALLET_NAME) { + Some(name) => name, + None => + return syn::Error::new( + proc_macro2::Span::call_site(), + "`System` pallet declaration is missing. \ + Please add this line: `System: frame_system,`", + ) + .into_compile_error(), + }; + + let all_pallets_reversed_with_system_first = + names_by_feature.iter().map(|(feature_set, names)| { + let mut feature_set = feature_set.iter().collect::>(); + let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); + let feature_set = feature_set.into_iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let names = std::iter::once(system_pallet) + .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME)); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// With the system pallet first. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); + } + }); quote!( #types @@ -342,29 +470,15 @@ fn decl_all_pallets<'a>( https://github.com/paritytech/substrate/pull/10043")] pub type AllPallets = AllPalletsWithSystem; - /// All pallets included in the runtime as a nested tuple of types. - pub type AllPalletsWithSystem = ( #(#names),* ); + #( #all_pallets_with_system )* - /// All pallets included in the runtime as a nested tuple of types. - /// Excludes the System pallet. - pub type AllPalletsWithoutSystem = ( #(#names_without_system),* ); - - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithoutSystemReversed =( #(#names_without_system_reverse),* ); - - /// All pallets included in the runtime as a nested tuple of types in reversed order. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithSystemReversed = ( #(#names_reversed),* ); - - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsReversedWithSystemFirst = ( #(#names_reversed_with_system_first),* ); + #( #all_pallets_without_system )* + + #( #all_pallets_with_system_reversed )* + + #( #all_pallets_without_system_reversed )* + + #( #all_pallets_reversed_with_system_first )* ) } @@ -387,6 +501,19 @@ fn decl_pallet_runtime_setup( } }) .collect::>(); + let pallet_attrs = pallet_declarations + .iter() + .map(|pallet| { + pallet.cfg_pattern.iter().fold(TokenStream2::new(), |acc, pattern| { + let attr = TokenStream2::from_str(&format!("#[cfg({})]", pattern.original())) + .expect("was successfully parsed before; qed"); + quote! { + #acc + #attr + } + }) + }) + .collect::>(); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -397,6 +524,7 @@ fn decl_pallet_runtime_setup( fn index() -> Option { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#indices) } @@ -408,6 +536,7 @@ fn decl_pallet_runtime_setup( fn name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#name_strings) } @@ -419,6 +548,7 @@ fn decl_pallet_runtime_setup( fn module_name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#module_names) } @@ -430,6 +560,7 @@ fn decl_pallet_runtime_setup( fn crate_version() -> Option<#scrate::traits::CrateVersion> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( + #pallet_attrs if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some( <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 711da85c10cfc..7a5acf43b92b0 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -23,7 +23,7 @@ use syn::{ parse::{Parse, ParseStream}, punctuated::Punctuated, spanned::Spanned, - token, Error, Ident, Path, Result, Token, + token, Attribute, Error, Ident, Path, Result, Token, }; mod keyword { @@ -185,6 +185,8 @@ impl Parse for WhereDefinition { pub struct PalletDeclaration { /// The name of the pallet, e.g.`System` in `System: frame_system`. pub name: Ident, + /// Optional attributes tagged right above a pallet declaration. + pub attrs: Vec, /// Optional fixed index, e.g. `MyPallet ... = 3,`. pub index: Option, /// The path of the pallet, e.g. `frame_system` in `System: frame_system`. @@ -212,6 +214,8 @@ pub enum SpecifiedParts { impl Parse for PalletDeclaration { fn parse(input: ParseStream) -> Result { + let attrs = input.call(Attribute::parse_outer)?; + let name = input.parse()?; let _: Token![:] = input.parse()?; let path = input.parse()?; @@ -279,7 +283,7 @@ impl Parse for PalletDeclaration { None }; - Ok(Self { name, path, instance, pallet_parts, specified_parts, index }) + Ok(Self { attrs, name, path, instance, pallet_parts, specified_parts, index }) } } @@ -535,6 +539,8 @@ pub struct Pallet { pub instance: Option, /// The pallet parts to use for the pallet. pub pallet_parts: Vec, + /// Expressions specified inside of a #[cfg] attribute. + pub cfg_pattern: Vec, } impl Pallet { @@ -647,11 +653,32 @@ fn convert_pallets(pallets: Vec) -> syn::Result (), } + let cfg_pattern = pallet + .attrs + .iter() + .map(|attr| { + if attr.path.segments.len() != 1 || attr.path.segments[0].ident != "cfg" { + let msg = "Unsupported attribute, only #[cfg] is supported on pallet \ + declarations in `construct_runtime`"; + return Err(syn::Error::new(attr.span(), msg)) + } + + attr.parse_args_with(|input: syn::parse::ParseStream| { + // Required, otherwise the parse stream doesn't advance and will result in + // an error. + let input = input.parse::()?; + cfg_expr::Expression::parse(&input.to_string()) + .map_err(|e| syn::Error::new(attr.span(), e.to_string())) + }) + }) + .collect::>>()?; + Ok(Pallet { name: pallet.name, index: final_index, path: pallet.path, instance: pallet.instance, + cfg_pattern, pallet_parts, }) }) diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs new file mode 100644 index 0000000000000..79b5632babd95 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.rs @@ -0,0 +1,14 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + #[cfg(test)] + System: frame_system::{Pallet, Call, Storage, Config, Event}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr new file mode 100644 index 0000000000000..a86a839615aa0 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/feature_gated_system_pallet.stderr @@ -0,0 +1,5 @@ +error: `System` pallet declaration is feature gated, please remove any `#[cfg]` attributes + --> tests/construct_runtime_ui/feature_gated_system_pallet.rs:10:3 + | +10 | System: frame_system::{Pallet, Call, Storage, Config, Event}, + | ^^^^^^ diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs new file mode 100644 index 0000000000000..a1d39fa76ea85 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[cfg(feature = 1)] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr new file mode 100644 index 0000000000000..68366a3410bf1 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/invalid_meta_literal.stderr @@ -0,0 +1,6 @@ +error: feature = 1 + ^ expected one of ``, `all`, `any`, `not` here + --> tests/construct_runtime_ui/invalid_meta_literal.rs:10:3 + | +10 | #[cfg(feature = 1)] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs new file mode 100644 index 0000000000000..b93adf9a780a7 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[cfg(feature(test))] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr new file mode 100644 index 0000000000000..98d99a0d34997 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_meta_structure.stderr @@ -0,0 +1,6 @@ +error: feature(test) + ^ expected one of `=`, `,`, `)` here + --> tests/construct_runtime_ui/unsupported_meta_structure.rs:10:3 + | +10 | #[cfg(feature(test))] + | ^ diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs new file mode 100644 index 0000000000000..3ec8b9db1d435 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.rs @@ -0,0 +1,15 @@ +use frame_support::construct_runtime; + +construct_runtime! { + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Pallet}, + #[attr] + Balance: balances::{Config, Call}, + } +} + +fn main() {} diff --git a/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr new file mode 100644 index 0000000000000..fceb2b8a99db8 --- /dev/null +++ b/frame/support/test/tests/construct_runtime_ui/unsupported_pallet_attr.stderr @@ -0,0 +1,5 @@ +error: Unsupported attribute, only #[cfg] is supported on pallet declarations in `construct_runtime` + --> tests/construct_runtime_ui/unsupported_pallet_attr.rs:10:3 + | +10 | #[attr] + | ^ diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index e96e52d3d2c6e..497b7bb04c36a 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -626,6 +626,8 @@ frame_support::construct_runtime!( System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, Example2: pallet2 exclude_parts { Call }, + #[cfg(feature = "example3")] + Example3: pallet3, Example4: pallet4 use_parts { Call }, } ); From 088b5e0d71b57886957cfbedfb68b36b8014dc5e Mon Sep 17 00:00:00 2001 From: Squirrel Date: Thu, 25 Aug 2022 12:11:15 +0100 Subject: [PATCH 060/348] clone not required as copy. (#12110) --- frame/support/procedural/src/pallet/parse/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 97abdd3ad0977..f0e1353774910 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -775,7 +775,7 @@ impl StorageDef { error.push_value(last); Ok(Some(QueryKind::ResultQuery( - syn::Path { leading_colon: leading_colon.clone(), segments: error }, + syn::Path { leading_colon, segments: error }, err_variant, ))) }, From 85d7e106be0885c7f4dfd7e38f8684ae93d39ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 25 Aug 2022 13:18:53 +0200 Subject: [PATCH 061/348] Fix `wasm_export_functions` when using a return statement (#12107) When used a return statement before we directly returned from the function which is obviously wrong. --- client/executor/runtime-test/src/lib.rs | 531 ++++++++++--------- client/executor/src/integration_tests/mod.rs | 11 + primitives/core/src/testing.rs | 4 +- 3 files changed, 281 insertions(+), 265 deletions(-) diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 0c61d6fcd38a2..5a741f51c3899 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -54,287 +54,287 @@ static mut MUTABLE_STATIC: u64 = 32; static mut MUTABLE_STATIC_BSS: u64 = 0; sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } + fn test_calling_missing_external() { + unsafe { missing_external() } + } - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); - print("storage"); - let foo = storage::get(b"foo").unwrap(); + print("storage"); + let foo = storage::get(b"foo").unwrap(); - print("set_storage"); - storage::set(b"baz", &foo); + print("set_storage"); + storage::set(b"baz", &foo); - print("finished!"); - b"all ok!".to_vec() - } + print("finished!"); + b"all ok!".to_vec() + } - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input, None); - b"all ok!".to_vec() - } + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input, None); + b"all ok!".to_vec() + } - fn test_empty_return() {} + fn test_empty_return() {} - fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { - // This piece of code will dirty multiple pages of memory. The number of pages is given by - // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared - // is a wasm page that that follows the one that holds the `heap_base` address. - // - // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take - // 16 writes to process a single wasm page. + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. - let heap_ptr = heap_base as usize; + let heap_ptr = heap_base as usize; - // Find the next wasm page boundary. - let heap_ptr = round_up_to(heap_ptr, 65536); + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); - // Make it an actual pointer - let heap_ptr = heap_ptr as *mut u8; + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; - // Traverse the host pages and make each one dirty - let host_pages = heap_pages as usize * 16; - for i in 0..host_pages { - unsafe { - // technically this is an UB, but there is no way Rust can find this out. - heap_ptr.add(i * 4096).write(0); - } - } + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } - fn round_up_to(n: usize, divisor: usize) -> usize { - (n + divisor - 1) / divisor - } - } + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } fn test_allocate_vec(size: u32) -> Vec { Vec::with_capacity(size as usize) } - fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { - let a = f32::from_le_bytes(a); - let b = f32::from_le_bytes(b); - f32::to_le_bytes(a + b) - } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - sp_core::storage::StateVersion::V1, - ).as_ref().to_vec() - } - - fn test_offchain_index_set() { - sp_io::offchain_index::set(b"k", b"v"); - } - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } - - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; - - run().is_some() - } - - fn test_enter_span() -> u64 { - wasm_tracing::enter_span(Default::default()) - } - - fn test_exit_span(span_id: u64) { - wasm_tracing::exit(span_id) - } - - fn test_nested_spans() { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - wasm_tracing::exit(span_id); - } - wasm_tracing::exit(span_id); - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn returns_mutable_static_bss() -> u64 { - unsafe { - MUTABLE_STATIC_BSS += 1; - MUTABLE_STATIC_BSS - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = (heap_base + offset) as *mut u8; - - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; - - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } - - fn test_spawn() { - let data = vec![1u8, 2u8]; - let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); - - assert_eq!(data_new, vec![2u8, 3u8]); - } - - fn test_nested_spawn() { - let data = vec![7u8, 13u8]; - let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); - - assert_eq!(data_new, vec![10u8, 16u8]); - } - - fn test_panic_in_spawned() { - sp_tasks::spawn(tasks::panicker, vec![]).join(); - } + fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { + let a = f32::from_le_bytes(a); + let b = f32::from_le_bytes(b); + f32::to_le_bytes(a + b) + } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + sp_core::storage::StateVersion::V1, + ).as_ref().to_vec() + } + + fn test_offchain_index_set() { + sp_io::offchain_index::set(b"k", b"v"); + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + fn test_enter_span() -> u64 { + wasm_tracing::enter_span(Default::default()) + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit(span_id) + } + + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = (heap_base + offset) as *mut u8; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } + + fn test_spawn() { + let data = vec![1u8, 2u8]; + let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + + assert_eq!(data_new, vec![2u8, 3u8]); + } + + fn test_nested_spawn() { + let data = vec![7u8, 13u8]; + let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + + assert_eq!(data_new, vec![10u8, 16u8]); + } + + fn test_panic_in_spawned() { + sp_tasks::spawn(tasks::panicker, vec![]).join(); + } fn test_return_i8() -> i8 { -66 @@ -351,6 +351,11 @@ sp_core::wasm_export_functions! { fn test_unreachable_intrinsic() { core::arch::wasm32::unreachable() } + + fn test_return_value() -> u64 { + // Mainly a test that the macro is working when we have a return statement here. + return 1234; + } } #[cfg(not(feature = "std"))] diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 8ce0b56da2389..ba0c93630cf6c 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -918,3 +918,14 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { error => panic!("unexpected error: {:?}", error), } } + +test_wasm_execution!(return_value); +fn return_value(wasm_method: WasmExecutionMethod) { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + assert_eq!( + call_in_wasm("test_return_value", &[], wasm_method, &mut ext).unwrap(), + (1234u64).encode() + ); +} diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index d5ca1dc45fa0c..d3fa3fc86fce5 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -90,7 +90,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - $( $fn_impl )* + (|| { $( $fn_impl )* })() } $crate::to_substrate_wasm_fn_return_value(&()) @@ -118,7 +118,7 @@ macro_rules! wasm_export_functions { &mut &input[..], ).expect("Input data is correctly encoded"); - $( $fn_impl )* + (|| { $( $fn_impl )* })() }; $crate::to_substrate_wasm_fn_return_value(&output) From 2b54771bf923b1f220b6997e710d7169f3dc613b Mon Sep 17 00:00:00 2001 From: Sergejs Kostjucenko <85877331+sergejparity@users.noreply.github.com> Date: Thu, 25 Aug 2022 17:52:58 +0300 Subject: [PATCH 062/348] pin gha versions (#12100) --- .github/workflows/auto-label-issues.yml | 4 ++-- .github/workflows/auto-label-prs.yml | 4 ++-- .github/workflows/md-link-check.yml | 2 +- .github/workflows/monthly-tag.yml | 2 +- .github/workflows/release-tagging.yml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/auto-label-issues.yml b/.github/workflows/auto-label-issues.yml index cd889b5941989..2633bf55f0789 100644 --- a/.github/workflows/auto-label-issues.yml +++ b/.github/workflows/auto-label-issues.yml @@ -8,10 +8,10 @@ on: jobs: label-new-issues: - runs-on: ubuntu-latest + runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.issue.author_association == 'NONE' with: add-labels: 'Z0-unconfirmed' diff --git a/.github/workflows/auto-label-prs.yml b/.github/workflows/auto-label-prs.yml index f0b8e9b343e29..50539b80b98b7 100644 --- a/.github/workflows/auto-label-prs.yml +++ b/.github/workflows/auto-label-prs.yml @@ -8,13 +8,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Label drafts - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.pull_request.draft == true with: add-labels: 'A3-inprogress' remove-labels: 'A0-pleasereview' - name: Label PRs - uses: andymckay/labeler@master + uses: andymckay/labeler@e6c4322d0397f3240f0e7e30a33b5c5df2d39e90 # 1.0.4 if: github.event.pull_request.draft == false && ! contains(github.event.pull_request.labels.*.name, 'A2-insubstantial') with: add-labels: 'A0-pleasereview' diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml index 4b6b9166be2df..e1387f6da13f7 100644 --- a/.github/workflows/md-link-check.yml +++ b/.github/workflows/md-link-check.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: gaurav-nelson/github-action-markdown-link-check@9710f0fec812ce0a3b98bef4c9d842fc1f39d976 # v1.0.13 + - uses: gaurav-nelson/github-action-markdown-link-check@0a51127e9955b855a9bbfa1ff5577f1d1338c9a5 # 1.0.14 with: use-quiet-mode: 'yes' config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml index ade8bd4717c39..6bec03d27e7be 100644 --- a/.github/workflows/monthly-tag.yml +++ b/.github/workflows/monthly-tag.yml @@ -32,7 +32,7 @@ jobs: ./scripts/ci/github/generate_changelog.sh ${{ steps.tags.outputs.old }} >> Changelog.md - name: Release snapshot id: release-snapshot - uses: actions/create-release@latest + uses: actions/create-release@0cb9c9b65d5d1901c1f53e5e66eaf4afd303e70e # v1.1.4 latest version, repo archived env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/release-tagging.yml b/.github/workflows/release-tagging.yml index c55fc13a626e0..f7fa913c69709 100644 --- a/.github/workflows/release-tagging.yml +++ b/.github/workflows/release-tagging.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Set Git tag - uses: s3krit/walking-tag-action@master + uses: s3krit/walking-tag-action@d04f7a53b72ceda4e20283736ce3627011275178 # latest version from master with: TAG_NAME: release TAG_MESSAGE: Latest release From 4262c63fecf5de9a459775d772329828bcb549c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 Aug 2022 23:52:19 +0200 Subject: [PATCH 063/348] Bump parking_lot from 0.12.0 to 0.12.1 (#12098) Bumps [parking_lot](https://github.com/Amanieu/parking_lot) from 0.12.0 to 0.12.1. - [Release notes](https://github.com/Amanieu/parking_lot/releases) - [Changelog](https://github.com/Amanieu/parking_lot/blob/master/CHANGELOG.md) - [Commits](https://github.com/Amanieu/parking_lot/compare/0.12.0...0.12.1) --- updated-dependencies: - dependency-name: parking_lot dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 100 +++++++++--------- client/api/Cargo.toml | 2 +- client/basic-authorship/Cargo.toml | 2 +- client/beefy/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/common/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/network/test/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/service/test/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- client/tracing/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- client/utils/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 2 +- frame/state-trie-migration/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/database/Cargo.toml | 2 +- primitives/io/Cargo.toml | 2 +- primitives/keystore/Cargo.toml | 2 +- primitives/state-machine/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 2 +- .../runtime/transaction-pool/Cargo.toml | 2 +- 35 files changed, 84 insertions(+), 84 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed0d5777d94fe..f82cc5d61671e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -448,7 +448,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -491,7 +491,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-rpc", "sc-utils", "serde", @@ -2037,7 +2037,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "scale-info", ] @@ -3214,7 +3214,7 @@ dependencies = [ "hyper", "jsonrpsee-types", "lazy_static", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "rustc-hash", "serde", @@ -3453,7 +3453,7 @@ checksum = "ece7e668abd21387aeb6628130a6f4c802787f014fa46bc83221448322250357" dependencies = [ "kvdb", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", ] [[package]] @@ -3468,7 +3468,7 @@ dependencies = [ "num_cpus", "owning_ref", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "regex", "rocksdb", "smallvec", @@ -3574,7 +3574,7 @@ dependencies = [ "libp2p-websocket", "libp2p-yamux", "multiaddr", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project", "rand 0.7.3", "smallvec", @@ -3619,7 +3619,7 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project", "prost", "prost-build", @@ -3655,7 +3655,7 @@ dependencies = [ "futures", "libp2p-core", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "smallvec", "trust-dns-resolver", ] @@ -3804,7 +3804,7 @@ dependencies = [ "libp2p-core", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "smallvec", "unsigned-varint", @@ -4030,7 +4030,7 @@ dependencies = [ "futures-rustls", "libp2p-core", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "quicksink", "rw-stream-sink", "soketto", @@ -4046,7 +4046,7 @@ checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" dependencies = [ "futures", "libp2p-core", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "thiserror", "yamux", ] @@ -5597,7 +5597,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -6295,7 +6295,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "remote-externalities", "scale-info", "serde", @@ -6586,7 +6586,7 @@ dependencies = [ "hashbrown 0.12.3", "impl-trait-for-tuples", "parity-util-mem-derive", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "primitive-types", "smallvec", "winapi", @@ -6637,9 +6637,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core 0.9.1", @@ -7762,7 +7762,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -7869,7 +7869,7 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -7904,7 +7904,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "quickcheck", "rand 0.8.4", "sc-client-api", @@ -7930,7 +7930,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sc-utils", "serde", @@ -7953,7 +7953,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -7995,7 +7995,7 @@ dependencies = [ "num-rational 0.2.4", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", "sc-block-builder", @@ -8117,7 +8117,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sc-consensus", "sp-api", @@ -8178,7 +8178,7 @@ dependencies = [ "lru", "num_cpus", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "paste 1.0.6", "regex", "sc-executor-common", @@ -8277,7 +8277,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "sc-block-builder", "sc-chain-spec", @@ -8357,7 +8357,7 @@ version = "4.0.0-dev" dependencies = [ "async-trait", "hex", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "serde_json", "sp-application-crypto", "sp-core", @@ -8390,7 +8390,7 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project", "prost", "prost-build", @@ -8526,7 +8526,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8562,7 +8562,7 @@ dependencies = [ "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8617,7 +8617,7 @@ dependencies = [ "lazy_static", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -8652,7 +8652,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-chain-spec", "sc-transaction-pool-api", "scale-info", @@ -8707,7 +8707,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project", "rand 0.7.3", "sc-block-builder", @@ -8771,7 +8771,7 @@ dependencies = [ "hex-literal", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-client-db", @@ -8806,7 +8806,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-client-api", "sp-core", ] @@ -8855,7 +8855,7 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project", "rand 0.7.3", "serde", @@ -8876,7 +8876,7 @@ dependencies = [ "libc", "log", "once_cell", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "regex", "rustc-hash", "sc-client-api", @@ -8918,7 +8918,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-block-builder", "sc-client-api", "sc-transaction-pool-api", @@ -8958,7 +8958,7 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "prometheus", "tokio-test", ] @@ -9543,7 +9543,7 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sp-api", "sp-consensus", "sp-database", @@ -9670,7 +9670,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parity-util-mem", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "primitive-types", "rand 0.7.3", "regex", @@ -9724,7 +9724,7 @@ name = "sp-database" version = "4.0.0-dev" dependencies = [ "kvdb", - "parking_lot 0.12.0", + "parking_lot 0.12.1", ] [[package]] @@ -9787,7 +9787,7 @@ dependencies = [ "libsecp256k1", "log", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "secp256k1", "sp-core", "sp-externalities", @@ -9820,7 +9820,7 @@ dependencies = [ "futures", "merlin", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.7.3", "rand_chacha 0.2.2", "schnorrkel", @@ -10065,7 +10065,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pretty_assertions", "rand 0.7.3", "smallvec", @@ -10185,7 +10185,7 @@ dependencies = [ "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "scale-info", "sp-core", "sp-runtime", @@ -10522,7 +10522,7 @@ version = "2.0.0" dependencies = [ "futures", "parity-scale-codec", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "sc-transaction-pool", "sc-transaction-pool-api", "sp-blockchain", @@ -10808,7 +10808,7 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "pin-project-lite 0.2.6", "signal-hook-registry", "socket2", @@ -11070,7 +11070,7 @@ dependencies = [ "lazy_static", "log", "lru-cache", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", @@ -12137,7 +12137,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.0", + "parking_lot 0.12.1", "rand 0.8.4", "static_assertions", ] diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index c8e9dc7482823..8cb3ad565afb0 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -21,7 +21,7 @@ fnv = "1.0.6" futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index a2ccba486ae29..43493ada051f8 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -31,6 +31,6 @@ sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index e219420959c9f..cc29fb0d809b3 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -16,7 +16,7 @@ futures = "0.3" futures-timer = "3.0.1" hex = "0.4.2" log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" wasm-timer = "0.2.5" beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 46ee7640d710a..744dbb72ff277 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -13,7 +13,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive futures = "0.3.21" jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" beefy-gadget = { version = "4.0.0-dev", path = "../." } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 69499fa346e31..7f3164a6ae5be 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -37,7 +37,7 @@ sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" tempfile = "3.1.0" sc-keystore = { version = "4.0.0-dev", path = "../../keystore" } sc-network = { version = "0.10.0-dev", path = "../../network" } diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 59d7854619fc0..f8c057344db0f 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -24,7 +24,7 @@ merlin = "2.0" num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 12b630f36b89b..b713059074173 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -18,7 +18,7 @@ futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 7e9b43fac8a57..bc0ab55d4b492 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 144791fda5c3c..7f564ae642433 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -23,7 +23,7 @@ kvdb-rocksdb = { version = "0.15.2", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" parity-db = "0.3.16" -parking_lot = "0.12.0" +parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-arithmetic = { version = "5.0.0", path = "../../primitives/arithmetic" } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 566ed0a50fc0f..6a4b2a44a2d44 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" tracing = "0.1.29" wasmi = "0.9.1" diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index a5f20b9f3261d..ed29a9a11881d 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -23,7 +23,7 @@ futures-timer = "3.0.1" hex = "0.4.2" log = "0.4.17" parity-scale-codec = { version = "3.0.0", features = ["derive"] } -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.8.4" serde_json = "1.0.79" thiserror = "1.0" diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index be4adba2a52fe..cc6a597da72c7 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.50" hex = "0.4.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde_json = "1.0.79" thiserror = "1.0" sp-application-crypto = { version = "6.0.0", path = "../../primitives/application-crypto" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 2742262b57e40..0580e80d629f9 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -34,7 +34,7 @@ linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" pin-project = "1.0.10" prost = "0.10" rand = "0.7.2" diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 1aa6ebd8bf357..ecdf146696253 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" futures-timer = "3.0.1" libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 89b77dbb5fea7..ff97f29961155 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -24,7 +24,7 @@ hyper-rustls = { version = "0.23.0", features = ["http2"] } libp2p = { version = "0.46.1", default-features = false } num_cpus = "1.13" once_cell = "1.8" -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = "0.7.2" threadpool = "1.7" tracing = "0.1.29" diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 101d558663f9f..e9cd08923d020 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 2381323d25a24..205df580139b8 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -19,7 +19,7 @@ hash-db = { version = "0.15.2", default-features = false } jsonrpsee = { version = "0.15.1", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde_json = "1.0.79" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e8ddf40a0ae03..e3fe30b08d815 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -26,7 +26,7 @@ jsonrpsee = { version = "0.15.1", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" rand = "0.7.3" -parking_lot = "0.12.0" +parking_lot = "0.12.1" log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 01c3ee2348ef5..92df5381c202b 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -18,7 +18,7 @@ hex = "0.4" hex-literal = "0.3.4" log = "0.4.17" parity-scale-codec = "3.0.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" tempfile = "3.1.0" tokio = { version = "1.17.0", features = ["time"] } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 08856ce3f48a9..4243968ec79b4 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -17,6 +17,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 0d966dacfc2c8..040e7b5dc8ca3 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ chrono = "0.4.19" futures = "0.3.21" libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" -parking_lot = "0.12.0" +parking_lot = "0.12.1" pin-project = "1.0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 476e03ee741f3..8bc5d770c9c5a 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -20,7 +20,7 @@ lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } once_cell = "1.8.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" regex = "1.5.5" rustc-hash = "1.1.0" serde = "1.0.136" diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 3fdcde48d9e22..9aa05694b6619 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -19,7 +19,7 @@ futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } -parking_lot = "0.12.0" +parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 2df04be7fb4af..082ac3b55e80d 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" futures-timer = "3.0.2" lazy_static = "1.4.0" log = "0.4" -parking_lot = "0.12.0" +parking_lot = "0.12.1" prometheus = { version = "0.13.0", default-features = false } [features] diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index bbb0adf02e366..aea3b5cd6d2b0 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -40,7 +40,7 @@ rand = { version = "0.7.3", default-features = false, features = ["alloc", "smal strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" rand = { version = "0.7.3" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 5a93701141da9..f8a3b1ba3aab4 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -30,7 +30,7 @@ sp-std = { default-features = false, path = "../../primitives/std" } substrate-state-trie-migration-rpc = { optional = true, path = "../../utils/frame/rpc/state-trie-migration-rpc" } [dev-dependencies] -parking_lot = "0.12.0" +parking_lot = "0.12.1" tokio = { version = "1.17.0", features = ["macros"] } pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index a5137606e16a2..4d4718b4038d4 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = futures = "0.3.21" log = "0.4.17" lru = "0.7.5" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0.30" sp-api = { version = "4.0.0-dev", path = "../api" } sp-consensus = { version = "0.10.0-dev", path = "../consensus/common" } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 2cdfd04e942d7..d591657ee4bd9 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -35,7 +35,7 @@ num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index 5aa3d9a239aa3..a3f09536f4f5c 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -12,4 +12,4 @@ readme = "README.md" [dependencies] kvdb = "0.11.0" -parking_lot = "0.12.0" +parking_lot = "0.12.1" diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index fda6b7fdd11dd..2e271d3949dee 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -30,7 +30,7 @@ sp-externalities = { version = "0.12.0", default-features = false, path = "../ex sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } futures = { version = "0.3.21", features = ["thread-pool"], optional = true } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 3c3b7933c50da..ed12a5cfcee7f 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -17,7 +17,7 @@ async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.21" merlin = { version = "2.0", default-features = false } -parking_lot = { version = "0.12.0", default-features = false } +parking_lot = { version = "0.12.1", default-features = false } schnorrkel = { version = "0.9.1", default-features = false, features = ["preaudit_deprecated", "u64_backend"] } serde = { version = "1.0", optional = true } thiserror = "1.0" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 00acca7e86788..5de6eb7112eea 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hash-db = { version = "0.15.2", default-features = false } log = { version = "0.4.17", optional = true } num-traits = { version = "0.2.8", default-features = false } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } rand = { version = "0.7.2", optional = true } smallvec = "1.8.0" thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 84b97a9f88da8..291615c9354c1 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -26,7 +26,7 @@ lazy_static = { version = "1.4.0", optional = true } lru = { version = "0.7.5", optional = true } memory-db = { version = "0.29.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } -parking_lot = { version = "0.12.0", optional = true } +parking_lot = { version = "0.12.1", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.30", optional = true } tracing = { version = "0.1.29", optional = true } diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 98378309ad9c1..fa6dde5b5b57e 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -parking_lot = "0.12.0" +parking_lot = "0.12.1" thiserror = "1.0" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } From a5e28701dd9eb8fdbc315395609bf92cd8a5e731 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 09:31:56 +0200 Subject: [PATCH 064/348] Bump async-trait from 0.1.51 to 0.1.57 (#11948) Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.51 to 0.1.57. - [Release notes](https://github.com/dtolnay/async-trait/releases) - [Commits](https://github.com/dtolnay/async-trait/compare/0.1.51...0.1.57) --- updated-dependencies: - dependency-name: async-trait dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- client/beefy/Cargo.toml | 2 +- client/consensus/aura/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 2 +- client/consensus/common/Cargo.toml | 2 +- client/consensus/manual-seal/Cargo.toml | 2 +- client/consensus/pow/Cargo.toml | 2 +- client/consensus/slots/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/test/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- primitives/authorship/Cargo.toml | 2 +- primitives/consensus/aura/Cargo.toml | 2 +- primitives/consensus/babe/Cargo.toml | 2 +- primitives/consensus/common/Cargo.toml | 2 +- primitives/inherents/Cargo.toml | 2 +- primitives/keystore/Cargo.toml | 2 +- primitives/timestamp/Cargo.toml | 2 +- primitives/transaction-storage-proof/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 2 +- 22 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f82cc5d61671e..0eec6698fae5e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,9 +343,9 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index cc29fb0d809b3..5cc2952e26ba5 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -9,7 +9,7 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3" diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 7f3164a6ae5be..3fe9891e9a7ba 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index f8c057344db0f..e559ec165a793 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index b713059074173..180ad7c38008d 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.42" +async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" libp2p = { version = "0.46.1", default-features = false } diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 83c156ef5667f..9c3bc5413317d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } assert_matches = "1.3.0" -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" log = "0.4.17" diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index bc0ab55d4b492..4833786d2b990 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.1" diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 41a6c1ad5e641..208e31971257d 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.1" diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index ed29a9a11881d..0155b4d4197d7 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.7.6" -async-trait = "0.1.50" +async-trait = "0.1.57" dyn-clone = "1.0" finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.21" diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index cc6a597da72c7..2605a767f9c04 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" hex = "0.4.0" parking_lot = "0.12.1" serde_json = "1.0.79" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 8f0702e05cac5..ea50c8b794ccb 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" bitflags = "1.3.2" bytes = "1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index ecdf146696253..990129229a40f 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-std = "1.11.0" -async-trait = "0.1.50" +async-trait = "0.1.57" futures = "0.3.21" futures-timer = "3.0.1" libp2p = { version = "0.46.1", default-features = false } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e3fe30b08d815..0c448b5e80b94 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -80,7 +80,7 @@ tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.11.0", default-features = false, features = [ "primitive-types", ] } -async-trait = "0.1.50" +async-trait = "0.1.57" tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" directories = "4.0.1" diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 714d0a2610312..3a8cb3f37cbd3 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../inherents" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index dbb67a27c5144..30f5c89650a78 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../api" } diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 736a78ab67b1a..049e511175867 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } merlin = { version = "2.0", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 973cb3e410e0d..d160cd118998c 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.42" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index c7e10be32fe28..628f938880e9f 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index ed12a5cfcee7f..cbb8a22ba4dd6 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.21" merlin = { version = "2.0", default-features = false } diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 42701f5ad3bf1..2e8f281cd7c7b 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures-timer = { version = "3.0.2", optional = true } log = { version = "0.4.17", optional = true } diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index a5a80528c6734..e916462675435 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = { version = "0.1.50", optional = true } +async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index ce5ef2ffcc01a..28dbe29949660 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -12,7 +12,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1.50" +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hex = "0.4" From 6dd5631aefb004e9704ce9244c4ef79f4fc9c002 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 09:32:20 +0200 Subject: [PATCH 065/348] Bump serde_json from 1.0.79 to 1.0.85 (#12079) Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.79 to 1.0.85. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.79...v1.0.85) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- bin/node/bench/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 2 +- client/chain-spec/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/consensus/babe/rpc/Cargo.toml | 2 +- client/finality-grandpa/Cargo.toml | 2 +- client/keystore/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/rpc-api/Cargo.toml | 2 +- client/rpc-servers/Cargo.toml | 2 +- client/rpc/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/sync-state-rpc/Cargo.toml | 2 +- client/sysinfo/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- frame/merkle-mountain-range/rpc/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- frame/transaction-payment/Cargo.toml | 2 +- frame/transaction-payment/asset-tx-payment/Cargo.toml | 2 +- primitives/rpc/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/serializer/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- 27 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0eec6698fae5e..64e84fe4710af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9175,9 +9175,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa 1.0.1", "ryu", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 4d090c71a72e9..c7a747c30ed2e 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -18,7 +18,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.11.0" kvdb-rocksdb = "0.15.1" diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 89ca3ebb45576..d84b8491cb162 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -98,7 +98,7 @@ sc-cli = { version = "0.10.0-dev", optional = true, path = "../../../client/cli" frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } -serde_json = "1.0.79" +serde_json = "1.0.85" [target.'cfg(any(target_arch="x86_64", target_arch="aarch64"))'.dependencies] node-executor = { version = "3.0.0-dev", path = "../executor", features = ["wasmtime"] } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index 744dbb72ff277..3ccf83c1f5106 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -24,7 +24,7 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" sc-rpc = { version = "4.0.0-dev", features = [ "test-helpers", ], path = "../../rpc" } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index bd4ee548e296b..b38dba03d6b7f 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } impl-trait-for-tuples = "0.2.2" memmap2 = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" sc-chain-spec-derive = { version = "4.0.0-dev", path = "./derive" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 5f4c00d06aea3..50d110d40eabd 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -26,7 +26,7 @@ rand = "0.7.3" regex = "1.5.5" rpassword = "7.0.0" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" tiny-bip39 = "0.8.2" tokio = { version = "1.17.0", features = ["signal", "rt-multi-thread", "parking_lot"] } diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 488036277d7b6..8433e3ac92e57 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -30,7 +30,7 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" tempfile = "3.1.0" tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 0155b4d4197d7..5950b108ca4ab 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -25,7 +25,7 @@ log = "0.4.17" parity-scale-codec = { version = "3.0.0", features = ["derive"] } parking_lot = "0.12.1" rand = "0.8.4" -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 2605a767f9c04..9b7afba759c60 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.57" hex = "0.4.0" parking_lot = "0.12.1" -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0" sp-application-crypto = { version = "6.0.0", path = "../../primitives/application-crypto" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 0580e80d629f9..82f77fa44450f 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -39,7 +39,7 @@ pin-project = "1.0.10" prost = "0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" smallvec = "1.8.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 510ebf7006155..d3d6e267f1768 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] futures = "0.3.21" libp2p = { version = "0.46.1", default-features = false } log = "0.4.17" -serde_json = "1.0.79" +serde_json = "1.0.85" wasm-timer = "0.2" sc-utils = { version = "4.0.0-dev", path = "../utils" } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index e9cd08923d020..7c4057154bdb0 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -19,7 +19,7 @@ log = "0.4.17" parking_lot = "0.12.1" scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 8b40972527be8..ef2e6bec4cdb0 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -16,6 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] futures = "0.3.21" jsonrpsee = { version = "0.15.1", features = ["server"] } log = "0.4.17" -serde_json = "1.0.79" +serde_json = "1.0.85" tokio = { version = "1.17.0", features = ["parking_lot"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 205df580139b8..4131fecaf510e 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -20,7 +20,7 @@ jsonrpsee = { version = "0.15.1", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.17" parking_lot = "0.12.1" -serde_json = "1.0.79" +serde_json = "1.0.85" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 0c448b5e80b94..0acdbb1b1b63e 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -33,7 +33,7 @@ exit-future = "0.2.0" pin-project = "1.0.10" hash-db = "0.15.2" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-trie = { version = "6.0.0", path = "../../primitives/trie" } diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index d02637fcf884b..12ffc0c2e8d7a 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml index 0973631a3cc24..1e96f69a92dfe 100644 --- a/client/sysinfo/Cargo.toml +++ b/client/sysinfo/Cargo.toml @@ -21,7 +21,7 @@ rand = "0.7.3" rand_pcg = "0.2.1" regex = "1" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 040e7b5dc8ca3..4be7c186720fc 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -22,6 +22,6 @@ parking_lot = "0.12.1" pin-project = "1.0.10" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" +serde_json = "1.0.85" thiserror = "1.0.30" wasm-timer = "0.2.5" diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index c7d9662904747..3da6678a39bf2 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -23,4 +23,4 @@ sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle- sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 1b6488bbd4712..6f0831b751b45 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -39,7 +39,7 @@ sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/ k256 = { version = "0.10.4", default-features = false, features = ["ecdsa"] } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 51aeeabe99db8..9150f87c7175a 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -26,7 +26,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index 2d4da250212f2..de9772d885529 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -29,7 +29,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" serde = { version = "1.0.136", optional = true } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" sp-storage = { version = "6.0.0", default-features = false, path = "../../../primitives/storage" } diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 335eb6d6c9a0e..f4a4fe12f6c47 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -18,4 +18,4 @@ serde = { version = "1.0.136", features = ["derive"] } sp-core = { version = "6.0.0", path = "../core" } [dev-dependencies] -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 1493aa2324f56..9e4b36c584e91 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -32,7 +32,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] rand = "0.7.2" -serde_json = "1.0.79" +serde_json = "1.0.85" zstd = { version = "0.11.2", default-features = false } sp-api = { version = "4.0.0-dev", path = "../api" } sp-state-machine = { version = "0.12.0", path = "../state-machine" } diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index c81f1cd10a824..585e4b4e0dc20 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -15,4 +15,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 28dbe29949660..f8a1f437d45e1 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hex = "0.4" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, features = [ "test-helpers", diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 31b8f1332a653..4f9d703544096 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -30,7 +30,7 @@ memory-db = "0.29.0" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.136" -serde_json = "1.0.79" +serde_json = "1.0.85" serde_nanos = "0.1.2" tempfile = "3.2.0" thiserror = "1.0.30" From ea4f532cffde1b90651d8f1be13a10c3721dd3d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Sat, 27 Aug 2022 06:56:24 +0200 Subject: [PATCH 066/348] `try-runtime` - support all state versions (#12089) * Add argument * Apply * More verbose parsing * fmt * fmt again * Invalid state version * reuse parsing * type mismatch --- .../try-runtime/cli/src/commands/execute_block.rs | 3 ++- .../try-runtime/cli/src/commands/follow_chain.rs | 12 +++++++----- .../try-runtime/cli/src/commands/offchain_worker.rs | 2 +- .../cli/src/commands/on_runtime_upgrade.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 5 +++++ utils/frame/try-runtime/cli/src/parse.rs | 9 +++++++++ 6 files changed, 25 insertions(+), 8 deletions(-) diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 204acd879312f..aee3c34a1e0e8 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -139,7 +139,8 @@ where .state .builder::()? // make sure the state is being build with the parent hash, if it is online. - .overwrite_online_at(parent_hash.to_owned()); + .overwrite_online_at(parent_hash.to_owned()) + .state_version(shared.state_version); let builder = if command.overwrite_wasm_code { log::info!( diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index e2e6bd7244945..b6a11699a3d72 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -102,11 +102,13 @@ where // create an ext at the state of this block, whatever is the first subscription event. if maybe_state_ext.is_none() { - let builder = Builder::::new().mode(Mode::Online(OnlineConfig { - transport: command.uri.clone().into(), - at: Some(*header.parent_hash()), - ..Default::default() - })); + let builder = Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: command.uri.clone().into(), + at: Some(*header.parent_hash()), + ..Default::default() + })) + .state_version(shared.state_version); let new_ext = builder .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 50780f4513b2f..11ceb0a81cf37 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -128,7 +128,7 @@ where ); let ext = { - let builder = command.state.builder::()?; + let builder = command.state.builder::()?.state_version(shared.state_version); let builder = if command.overwrite_wasm_code { log::info!( diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 616498da02497..9bc6d32d4762a 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -52,7 +52,7 @@ where let execution = shared.execution; let ext = { - let builder = command.state.builder::()?; + let builder = command.state.builder::()?.state_version(shared.state_version); let (code_key, code) = extract_code(&config.chain_spec)?; builder.inject_hashed_key_value(&[(code_key, code)]).build().await? }; diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index be7ec923a55bc..9013da95f1722 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -294,6 +294,7 @@ use sp_runtime::{ DeserializeOwned, }; use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; +use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; mod commands; @@ -421,6 +422,10 @@ pub struct SharedParams { /// When enabled, the spec name check will not panic, and instead only show a warning. #[clap(long)] pub no_spec_name_check: bool, + + /// State version that is used by the chain. + #[clap(long, default_value = "1", parse(try_from_str = parse::state_version))] + pub state_version: StateVersion, } /// Our `try-runtime` command. diff --git a/utils/frame/try-runtime/cli/src/parse.rs b/utils/frame/try-runtime/cli/src/parse.rs index 15a0251ebc34a..257a99566979f 100644 --- a/utils/frame/try-runtime/cli/src/parse.rs +++ b/utils/frame/try-runtime/cli/src/parse.rs @@ -17,6 +17,8 @@ //! Utils for parsing user input +use sp_version::StateVersion; + pub(crate) fn hash(block_hash: &str) -> Result { let (block_hash, offset) = if let Some(block_hash) = block_hash.strip_prefix("0x") { (block_hash, 2) @@ -42,3 +44,10 @@ pub(crate) fn url(s: &str) -> Result { Err("not a valid WS(S) url: must start with 'ws://' or 'wss://'") } } + +pub(crate) fn state_version(s: &str) -> Result { + s.parse::() + .map_err(|_| ()) + .and_then(StateVersion::try_from) + .map_err(|_| "Invalid state version.") +} From 4c83ee0a406939f1393d19f87675e1fbc49e328d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sun, 28 Aug 2022 13:55:54 +0100 Subject: [PATCH 067/348] Fix nomination pools pending rewards RPC (#12095) * Fix nomination pools pending rewards RPC * Fix node * Update frame/nomination-pools/src/lib.rs * Fix docs --- bin/node/runtime/src/lib.rs | 2 +- frame/nomination-pools/src/lib.rs | 18 ++++++---- frame/nomination-pools/src/tests.rs | 53 +++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 8 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index ff793a49b5ce6..0816fedff0347 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1842,7 +1842,7 @@ impl_runtime_apis! { impl pallet_nomination_pools_runtime_api::NominationPoolsApi for Runtime { fn pending_rewards(member_account: AccountId) -> Balance { - NominationPools::pending_rewards(member_account) + NominationPools::pending_rewards(member_account).unwrap_or_default() } } diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 62d0b3ddd55cb..40db4ac64851a 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -2156,16 +2156,20 @@ pub mod pallet { impl Pallet { /// Returns the pending rewards for the specified `member_account`. /// - /// In the case of error the function returns balance of zero. - pub fn pending_rewards(member_account: T::AccountId) -> BalanceOf { + /// In the case of error, `None` is returned. + pub fn pending_rewards(member_account: T::AccountId) -> Option> { if let Some(pool_member) = PoolMembers::::get(member_account) { - if let Some(reward_pool) = RewardPools::::get(pool_member.pool_id) { - return pool_member - .pending_rewards(reward_pool.last_recorded_reward_counter()) - .unwrap_or_default() + if let Some((reward_pool, bonded_pool)) = RewardPools::::get(pool_member.pool_id) + .zip(BondedPools::::get(pool_member.pool_id)) + { + let current_reward_counter = reward_pool + .current_reward_counter(pool_member.pool_id, bonded_pool.points) + .ok()?; + return pool_member.pending_rewards(current_reward_counter).ok() } } - BalanceOf::::default() + + None } /// The amount of bond that MUST REMAIN IN BONDED in ALL POOLS. diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 5aa8e97266e0d..705f8ce3a6449 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -1285,6 +1285,59 @@ mod claim_payout { }); } + #[test] + fn pending_rewards_per_member_works() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + assert_eq!(Pools::pending_rewards(10), Some(0)); + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + assert_eq!(Pools::pending_rewards(10), Some(30)); + assert_eq!(Pools::pending_rewards(20), None); + + Balances::make_free_balance_be(&20, ed + 10); + assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + + assert_eq!(Pools::pending_rewards(10), Some(30)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); + assert_eq!(Pools::pending_rewards(20), Some(50)); + assert_eq!(Pools::pending_rewards(30), None); + + Balances::make_free_balance_be(&30, ed + 10); + assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); + assert_eq!(Pools::pending_rewards(20), Some(50)); + assert_eq!(Pools::pending_rewards(30), Some(0)); + + Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); + + assert_eq!(Pools::pending_rewards(10), Some(30 + 50 + 20)); + assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + // 10 should claim 10, 20 should claim nothing. + assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + assert_eq!(Pools::pending_rewards(30), Some(20)); + + assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_eq!(Pools::pending_rewards(10), Some(0)); + assert_eq!(Pools::pending_rewards(20), Some(0)); + assert_eq!(Pools::pending_rewards(30), Some(0)); + }); + } + #[test] fn rewards_distribution_is_fair_bond_extra() { ExtBuilder::default().build_and_execute(|| { From 915f3c10f22777a1dc49c1ed894525280ce12244 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Kula?= Date: Mon, 29 Aug 2022 10:01:37 +0200 Subject: [PATCH 068/348] Additional constraints for NotificationSender (#12121) Co-authored-by: Jedrzej Kula --- client/network/common/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 0fff32b12d66c..6df99acf0bdfd 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -387,7 +387,7 @@ pub trait NotificationSenderReady { /// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. #[async_trait::async_trait] -pub trait NotificationSender { +pub trait NotificationSender: Send + Sync + 'static { /// Returns a future that resolves when the `NotificationSender` is ready to send a /// notification. async fn ready(&self) From 419924c4e252b9af75957d3112c9c7a8ea521034 Mon Sep 17 00:00:00 2001 From: Dmitry Novikov Date: Mon, 29 Aug 2022 14:22:40 +0300 Subject: [PATCH 069/348] Bug fix of currencies implementation for `pallet-balances` (#11875) * Add test * Fix the bug * Add similar test for named reservable * Extend test with "overflow" repatriation * Expand test for `NamedReservableCurrency` * Add notes about return values meaning --- frame/balances/src/lib.rs | 6 +++++- frame/balances/src/tests.rs | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 0cb32a4e3ecd6..0a45350366449 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -1000,6 +1000,8 @@ impl, I: 'static> Pallet { /// Is a no-op if: /// - the value to be moved is zero; or /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + /// + /// NOTE: returns actual amount of transferred value in `Ok` case. fn do_transfer_reserved( slashed: &T::AccountId, beneficiary: &T::AccountId, @@ -1013,7 +1015,7 @@ impl, I: 'static> Pallet { if slashed == beneficiary { return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Free => Ok(value.saturating_sub(Self::unreserve(slashed, value))), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), } } @@ -1785,6 +1787,8 @@ where /// Unreserve some funds, returning any amount that was unable to be unreserved. /// /// Is a no-op if the value to be unreserved is zero or the account does not exist. + /// + /// NOTE: returns amount value which wasn't successfully unreserved. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { if value.is_zero() { return Zero::zero() diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 8f5470ae3cac2..a9e44f085977a 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -528,6 +528,22 @@ macro_rules! decl_tests { }); } + #[test] + fn transferring_reserved_balance_to_yourself_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 50, Status::Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + + assert_ok!(Balances::reserve(&1, 50)); + assert_ok!(Balances::repatriate_reserved(&1, &1, 60, Status::Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance(1), 0); + }); + } + #[test] fn transferring_reserved_balance_to_nonexistent_should_fail() { <$ext_builder>::default().build().execute_with(|| { @@ -1167,6 +1183,25 @@ macro_rules! decl_tests { }); } + #[test] + fn reserved_named_to_yourself_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&1, 110); + + let id = [1u8; 8]; + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 50, Status::Free), 0); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + + assert_ok!(Balances::reserve_named(&id, &1, 50)); + assert_ok!(Balances::repatriate_reserved_named(&id, &1, &1, 60, Status::Free), 10); + assert_eq!(Balances::free_balance(1), 110); + assert_eq!(Balances::reserved_balance_named(&id, &1), 0); + }); + } + #[test] fn ensure_reserved_named_should_work() { <$ext_builder>::default().build().execute_with(|| { From 32c1a0efeddd622707e3ba5e16eb19334c730d28 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 29 Aug 2022 14:54:57 +0300 Subject: [PATCH 070/348] Make `NetworkService::add_reserved_peer()` accept `MultiaddrWithPeerId` (#12102) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make `add_reserved_peer()` accept `MultiaddrWithPeerId` * minor: cargo fmt * minor: error to string conversion Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- .../src/communication/tests.rs | 3 ++- client/network-gossip/src/bridge.rs | 3 ++- client/network-gossip/src/state_machine.rs | 3 ++- client/network/common/src/service.rs | 8 +++--- client/network/src/service.rs | 16 +++++------ client/offchain/src/api.rs | 7 +++-- client/offchain/src/lib.rs | 3 ++- client/service/src/lib.rs | 13 ++++++--- client/service/test/src/lib.rs | 27 ++++++++++--------- 9 files changed, 49 insertions(+), 34 deletions(-) diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 5b2436b23351e..4d709f56f3d61 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -27,6 +27,7 @@ use futures::prelude::*; use parity_scale_codec::Encode; use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; use sc_network_common::{ + config::MultiaddrWithPeerId, protocol::event::{Event as NetworkEvent, ObservedRole}, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -87,7 +88,7 @@ impl NetworkPeers for TestNetwork { unimplemented!(); } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { unimplemented!(); } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index b63e3f64a1256..45eff09332f67 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -317,6 +317,7 @@ mod tests { }; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network_common::{ + config::MultiaddrWithPeerId, protocol::event::ObservedRole, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -371,7 +372,7 @@ mod tests { unimplemented!(); } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { unimplemented!(); } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index a870ca6a3acdf..ff75b4520bf03 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -514,6 +514,7 @@ mod tests { use crate::multiaddr::Multiaddr; use futures::prelude::*; use sc_network_common::{ + config::MultiaddrWithPeerId, protocol::event::Event, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -610,7 +611,7 @@ mod tests { unimplemented!(); } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { unimplemented!(); } diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 6df99acf0bdfd..9544cddf013cf 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -19,6 +19,7 @@ // If you read this, you are very thorough, congratulations. use crate::{ + config::MultiaddrWithPeerId, protocol::event::Event, request_responses::{IfDisconnected, RequestFailure}, sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, @@ -179,12 +180,11 @@ pub trait NetworkPeers { /// purposes. fn deny_unreserved_peers(&self); - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. + /// Adds a `PeerId` and its `Multiaddr` as reserved. /// /// Returns an `Err` if the given string is not a valid multiaddress /// or contains an invalid peer ID (which includes the local peer ID). - fn add_reserved_peer(&self, peer: String) -> Result<(), String>; + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; /// Removes a `PeerId` from the list of reserved peers. fn remove_reserved_peer(&self, peer_id: PeerId); @@ -285,7 +285,7 @@ where T::deny_unreserved_peers(self) } - fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { T::add_reserved_peer(self, peer) } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 162b9c6a30491..68ac4b8db8a7d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -62,7 +62,7 @@ use parking_lot::Mutex; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ - config::parse_str_addr, + config::MultiaddrWithPeerId, protocol::event::{DhtEvent, Event}, request_responses::{IfDisconnected, RequestFailure}, service::{ @@ -702,9 +702,8 @@ where self.service.remove_reserved_peer(peer); } - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + /// Adds a `PeerId` and its `Multiaddr` as reserved. + pub fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { self.service.add_reserved_peer(peer) } @@ -912,17 +911,16 @@ where let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } - fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String> { // Make sure the local peer ID is never added to the PSM. - if peer_id == self.local_peer_id { + if peer.peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer.peer_id, peer.multiaddr)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer.peer_id)); Ok(()) } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 5c8139d4505e7..f40bceb615148 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -326,7 +326,10 @@ mod tests { use super::*; use libp2p::PeerId; use sc_client_db::offchain::LocalStorage; - use sc_network_common::service::{NetworkPeers, NetworkStateInfo}; + use sc_network_common::{ + config::MultiaddrWithPeerId, + service::{NetworkPeers, NetworkStateInfo}, + }; use sc_peerset::ReputationChange; use sp_core::offchain::{DbExternalities, Externalities}; use std::{borrow::Cow, time::SystemTime}; @@ -362,7 +365,7 @@ mod tests { unimplemented!(); } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { unimplemented!(); } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 5eed142ff3871..e215a016872cc 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -249,6 +249,7 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; + use sc_network_common::config::MultiaddrWithPeerId; use sc_peerset::ReputationChange; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; @@ -300,7 +301,7 @@ mod tests { unimplemented!(); } - fn add_reserved_peer(&self, _peer: String) -> Result<(), String> { + fn add_reserved_peer(&self, _peer: MultiaddrWithPeerId) -> Result<(), String> { unimplemented!(); } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 2f35a04451e79..19358c1e5bc4c 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -42,7 +42,7 @@ use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; -use sc_network_common::service::NetworkBlock; +use sc_network_common::{config::MultiaddrWithPeerId, service::NetworkBlock}; use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -230,8 +230,15 @@ async fn build_network_future< } } sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { - let x = network.add_reserved_peer(peer_addr) - .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let result = match MultiaddrWithPeerId::try_from(peer_addr) { + Ok(peer) => { + network.add_reserved_peer(peer) + }, + Err(err) => { + Err(err.to_string()) + }, + }; + let x = result.map_err(sc_rpc::system::error::Error::MalformattedPeerArg); let _ = sender.send(x); } sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 919c90400e94c..11c1cbaf7afb1 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -24,9 +24,12 @@ use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; use sc_network::{ config::{NetworkConfiguration, TransportConfig}, - multiaddr, Multiaddr, + multiaddr, +}; +use sc_network_common::{ + config::MultiaddrWithPeerId, + service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; -use sc_network_common::service::{NetworkBlock, NetworkPeers, NetworkStateInfo}; use sc_service::{ client::Client, config::{BasePath, DatabaseSource, KeystoreConfig}, @@ -49,8 +52,8 @@ const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); struct TestNet { runtime: Runtime, - authority_nodes: Vec<(usize, F, U, Multiaddr)>, - full_nodes: Vec<(usize, F, U, Multiaddr)>, + authority_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, + full_nodes: Vec<(usize, F, U, MultiaddrWithPeerId)>, chain_spec: GenericChainSpec, base_port: u16, nodes: usize, @@ -320,7 +323,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); + MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -340,7 +343,7 @@ where handle.spawn(service.clone().map_err(|_| ())); let addr = - addr.with(multiaddr::Protocol::P2p((service.network().local_peer_id()).into())); + MultiaddrWithPeerId { multiaddr: addr, peer_id: service.network().local_peer_id() }; self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -382,7 +385,7 @@ where for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } @@ -414,7 +417,7 @@ where if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { service .network() - .add_reserved_peer(address.to_string()) + .add_reserved_peer(address) .expect("Error adding reserved peer"); address = node_id.clone(); } @@ -479,7 +482,7 @@ pub fn sync( for (_, service, _, _) in network.full_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } @@ -532,13 +535,13 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } network.run_until_all_full(|_index, service| { @@ -556,7 +559,7 @@ pub fn consensus( for (_, service, _, _) in network.full_nodes.iter() { service .network() - .add_reserved_peer(first_address.to_string()) + .add_reserved_peer(first_address.clone()) .expect("Error adding reserved peer"); } From 3db5c9512c6782efa651fa185061806c7dceb6aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 29 Aug 2022 17:11:19 +0200 Subject: [PATCH 071/348] Fix flaky trie-db cache test (#12139) --- primitives/trie/src/cache/mod.rs | 44 ++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index 67348919250cd..f6a447763a80e 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -593,26 +593,35 @@ mod tests { .map(|d| d.0) .all(|l| TEST_DATA.iter().any(|d| l.storage_key().unwrap() == d.0))); - { - let local_cache = shared_cache.local_cache(); + // Run this in a loop. The first time we check that with the filled value cache, + // the expected values are at the top of the LRU. + // The second run is using an empty value cache to ensure that we access the nodes. + for _ in 0..2 { + { + let local_cache = shared_cache.local_cache(); - let mut cache = local_cache.as_trie_db_cache(root); - let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); + let mut cache = local_cache.as_trie_db_cache(root); + let trie = TrieDBBuilder::::new(&db, &root).with_cache(&mut cache).build(); - for (k, _) in TEST_DATA.iter().take(2) { - trie.get(k).unwrap().unwrap(); + for (k, _) in TEST_DATA.iter().take(2) { + trie.get(k).unwrap().unwrap(); + } } - } - // Ensure that the accessed items are most recently used items of the shared value cache. - assert!(shared_cache - .read_lock_inner() - .value_cache() - .lru - .iter() - .take(2) - .map(|d| d.0) - .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); + // Ensure that the accessed items are most recently used items of the shared value + // cache. + assert!(shared_cache + .read_lock_inner() + .value_cache() + .lru + .iter() + .take(2) + .map(|d| d.0) + .all(|l| { TEST_DATA.iter().take(2).any(|d| l.storage_key().unwrap() == d.0) })); + + // Delete the value cache, so that we access the nodes. + shared_cache.reset_value_cache(); + } let most_recently_used_nodes = shared_cache .read_lock_inner() @@ -622,9 +631,6 @@ mod tests { .map(|d| *d.0) .collect::>(); - // Delete the value cache, so that we access the nodes. - shared_cache.reset_value_cache(); - { let local_cache = shared_cache.local_cache(); From 4ba739adc0d2b2c81687cfa9faf3a747ffe6193d Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 30 Aug 2022 00:30:37 +0900 Subject: [PATCH 072/348] Force `rustix` to use libc (#12123) --- Cargo.lock | 1 + client/executor/wasmtime/Cargo.toml | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 64e84fe4710af..bf06efd65aa53 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8249,6 +8249,7 @@ dependencies = [ "parity-scale-codec", "parity-wasm 0.42.2", "paste 1.0.6", + "rustix 0.33.7", "rustix 0.35.6", "sc-allocator", "sc-executor-common", diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 2dcfde378bb87..e75f2d9d043d0 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -18,6 +18,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0" } libc = "0.2.121" log = "0.4.17" parity-wasm = "0.42.0" + +# When bumping wasmtime do not forget to also bump rustix +# to exactly the same version as used by wasmtime! wasmtime = { version = "0.38.0", default-features = false, features = [ "cache", "cranelift", @@ -33,9 +36,16 @@ sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } [target.'cfg(target_os = "linux")'.dependencies] -rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param"] } +rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } once_cell = "1.12.0" +# Here we include the rustix crate used by wasmtime just to enable its 'use-libc' flag. +# +# By default rustix directly calls the appropriate syscalls completely bypassing libc; +# this doesn't have any actual benefits for us besides making it harder to debug memory +# problems (since then `mmap` etc. cannot be easily hooked into). +rustix_wasmtime = { package = "rustix", version = "0.33.7", default-features = false, features = ["std", "use-libc"] } + [dev-dependencies] wat = "1.0" sc-runtime-test = { version = "2.0.0", path = "../runtime-test" } From 5cdb054049980eecb449c36a7ccfe4990329c494 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 29 Aug 2022 18:02:23 +0200 Subject: [PATCH 073/348] Alliance pallet: retirement notice call (#11970) * Alliance pallet: retirement notice * add alliance pallet to benchmark list for dev chain * fix type * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance * link weight generated by bench for retirement_notice method * migration to clear UpForKicking storage prefix * rename migration from v1 to v0_to_v1 * Apply suggestions from code review Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * rename `retirement-notice to give-retirement-notice * Apply suggestions from code review Co-authored-by: Squirrel * review fixes: update doc, saturating add, BlockNumber type alias * add suffix to duratin consts *_IN_BLOCKS * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance * add negative tests (#11995) * add negative tests * remove tests powerless asserts checking against announcment origin * assert bad origin from announcement origin checks Co-authored-by: muharem Co-authored-by: command-bot <> Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Squirrel --- bin/node/runtime/src/lib.rs | 16 +- frame/alliance/README.md | 1 + frame/alliance/src/benchmarking.rs | 31 ++- frame/alliance/src/lib.rs | 79 ++++-- frame/alliance/src/migration.rs | 72 +++++ frame/alliance/src/mock.rs | 16 +- frame/alliance/src/tests.rs | 110 ++++++-- frame/alliance/src/weights.rs | 428 +++++++++++++++++------------ 8 files changed, 511 insertions(+), 242 deletions(-) create mode 100644 frame/alliance/src/migration.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0816fedff0347..f8ea312d1a496 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1518,8 +1518,10 @@ impl pallet_state_trie_migration::Config for Runtime { type WeightInfo = (); } +const ALLIANCE_MOTION_DURATION_IN_BLOCKS: BlockNumber = 5 * DAYS; + parameter_types! { - pub const AllianceMotionDuration: BlockNumber = 5 * DAYS; + pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS; pub const AllianceMaxProposals: u32 = 100; pub const AllianceMaxMembers: u32 = 100; } @@ -1541,6 +1543,7 @@ parameter_types! { pub const MaxFellows: u32 = AllianceMaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: Balance = 10 * DOLLARS; + pub const RetirementPeriod: BlockNumber = ALLIANCE_MOTION_DURATION_IN_BLOCKS + (1 * DAYS); } impl pallet_alliance::Config for Runtime { @@ -1577,6 +1580,7 @@ impl pallet_alliance::Config for Runtime { type MaxMembersCount = AllianceMaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = pallet_alliance::weights::SubstrateWeight; + type RetirementPeriod = RetirementPeriod; } construct_runtime!( @@ -1682,9 +1686,16 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - pallet_nomination_pools::migration::v2::MigrateToV2, + Migrations, >; +// All migrations executed on runtime upgrade as a nested tuple of types implementing +// `OnRuntimeUpgrade`. +type Migrations = ( + pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_alliance::migration::Migration, +); + /// MMR helper types. mod mmr { use super::Runtime; @@ -1703,6 +1714,7 @@ extern crate frame_benchmarking; mod benches { define_benchmarks!( [frame_benchmarking, BaselineBench::] + [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] [pallet_bags_list, BagsList] diff --git a/frame/alliance/README.md b/frame/alliance/README.md index f0900c84cbd85..f91475a5984ea 100644 --- a/frame/alliance/README.md +++ b/frame/alliance/README.md @@ -43,6 +43,7 @@ to update the Alliance's rule and make announcements. #### For Members (All) +- `give_retirement_notice` - Give a retirement notice and start a retirement period required to pass in order to retire. - `retire` - Retire from the Alliance and release the caller's deposit. #### For Members (Founders/Fellows) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index c23ded1640b58..25dcb18b175ef 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -691,12 +691,37 @@ benchmarks_instance_pallet! { assert_last_event::(Event::AllyElevated { ally: ally1 }.into()); } + give_retirement_notice { + set_members::(); + let fellow2 = fellow::(2); + + assert!(Alliance::::is_fellow(&fellow2)); + }: _(SystemOrigin::Signed(fellow2.clone())) + verify { + assert!(Alliance::::is_member_of(&fellow2, MemberRole::Retiring)); + + assert_eq!( + RetiringMembers::::get(&fellow2), + Some(System::::block_number() + T::RetirementPeriod::get()) + ); + assert_last_event::( + Event::MemberRetirementPeriodStarted {member: fellow2}.into() + ); + } + retire { set_members::(); let fellow2 = fellow::(2); assert!(Alliance::::is_fellow(&fellow2)); - assert!(!Alliance::::is_up_for_kicking(&fellow2)); + + assert_eq!( + Alliance::::give_retirement_notice( + SystemOrigin::Signed(fellow2.clone()).into() + ), + Ok(()) + ); + System::::set_block_number(System::::block_number() + T::RetirementPeriod::get()); assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); }: _(SystemOrigin::Signed(fellow2.clone())) @@ -713,11 +738,7 @@ benchmarks_instance_pallet! { set_members::(); let fellow2 = fellow::(2); - UpForKicking::::insert(&fellow2, true); - assert!(Alliance::::is_member_of(&fellow2, MemberRole::Fellow)); - assert!(Alliance::::is_up_for_kicking(&fellow2)); - assert_eq!(DepositOf::::get(&fellow2), Some(T::AllyDeposit::get())); let fellow2_lookup = T::Lookup::unlookup(fellow2.clone()); diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 0f4d43505e3f9..bc0a119cd54a6 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -60,6 +60,8 @@ //! //! #### For Members (All) //! +//! - `give_retirement_notice` - Give a retirement notice and start a retirement period required to +//! pass in order to retire. //! - `retire` - Retire from the Alliance and release the caller's deposit. //! //! #### For Members (Founders/Fellows) @@ -93,13 +95,14 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; mod types; pub mod weights; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; use sp_runtime::{ - traits::{StaticLookup, Zero}, + traits::{Saturating, StaticLookup, Zero}, RuntimeDebug, }; use sp_std::{convert::TryInto, prelude::*}; @@ -124,6 +127,9 @@ pub use pallet::*; pub use types::*; pub use weights::*; +/// The log target of this pallet. +pub const LOG_TARGET: &str = "runtime::alliance"; + /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -198,6 +204,7 @@ pub enum MemberRole { Founder, Fellow, Ally, + Retiring, } /// The type of item that may be deemed unscrupulous. @@ -218,6 +225,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub (super) trait Store)] + #[pallet::storage_version(migration::STORAGE_VERSION)] pub struct Pallet(PhantomData<(T, I)>); #[pallet::config] @@ -308,6 +316,10 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + + /// The number of blocks a member must wait between giving a retirement notice and retiring. + /// Supposed to be greater than time required to `kick_member`. + type RetirementPeriod: Get; } #[pallet::error] @@ -324,8 +336,6 @@ pub mod pallet { NotAlly, /// Account is not a founder. NotFounder, - /// This member is up for being kicked from the Alliance and cannot perform this operation. - UpForKicking, /// Account does not have voting rights. NoVotingRights, /// Account is already an elevated (fellow) member. @@ -357,6 +367,12 @@ pub mod pallet { TooManyMembers, /// Number of announcements exceeds `MaxAnnouncementsCount`. TooManyAnnouncements, + /// Account already gave retirement notice + AlreadyRetiring, + /// Account did not give a retirement notice required to retire. + RetirementNoticeNotGiven, + /// Retirement period has not passed. + RetirementPeriodNotPassed, } #[pallet::event] @@ -382,6 +398,8 @@ pub mod pallet { }, /// An ally has been elevated to Fellow. AllyElevated { ally: T::AccountId }, + /// A member gave retirement notice and their retirement period started. + MemberRetirementPeriodStarted { member: T::AccountId }, /// A member has retired with its deposit unreserved. MemberRetired { member: T::AccountId, unreserved: Option> }, /// A member has been kicked out with its deposit slashed. @@ -485,12 +503,12 @@ pub mod pallet { ValueQuery, >; - /// A set of members that are (potentially) being kicked out. They cannot retire until the - /// motion is settled. + /// A set of members who gave a retirement notice. They can retire after the end of retirement + /// period stored as a future block number. #[pallet::storage] - #[pallet::getter(fn up_for_kicking)] - pub type UpForKicking, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, T::AccountId, bool, ValueQuery>; + #[pallet::getter(fn retiring_members)] + pub type RetiringMembers, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, T::AccountId, T::BlockNumber, OptionQuery>; /// The current list of accounts deemed unscrupulous. These accounts non grata cannot submit /// candidacy. @@ -525,11 +543,6 @@ pub mod pallet { let proposor = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&proposor), Error::::NoVotingRights); - if let Some(Call::kick_member { who }) = proposal.is_sub_type() { - let strike = T::Lookup::lookup(who.clone())?; - >::insert(strike, true); - } - T::ProposalProvider::propose_proposal(proposor, threshold, proposal, length_bound)?; Ok(()) } @@ -783,15 +796,40 @@ pub mod pallet { Ok(()) } + /// As a member, give a retirement notice and start a retirement period required to pass in + /// order to retire. + #[pallet::weight(T::WeightInfo::give_retirement_notice())] + pub fn give_retirement_notice(origin: OriginFor) -> DispatchResult { + let who = ensure_signed(origin)?; + let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; + ensure!(role.ne(&MemberRole::Retiring), Error::::AlreadyRetiring); + + Self::remove_member(&who, role)?; + Self::add_member(&who, MemberRole::Retiring)?; + >::insert( + &who, + frame_system::Pallet::::block_number() + .saturating_add(T::RetirementPeriod::get()), + ); + + Self::deposit_event(Event::MemberRetirementPeriodStarted { member: who }); + Ok(()) + } + /// As a member, retire from the alliance and unreserve the deposit. + /// This can only be done once you have `give_retirement_notice` and it has expired. #[pallet::weight(T::WeightInfo::retire())] pub fn retire(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; - // A member up for kicking cannot retire. - ensure!(!Self::is_up_for_kicking(&who), Error::::UpForKicking); + let retirement_period_end = RetiringMembers::::get(&who) + .ok_or(Error::::RetirementNoticeNotGiven)?; + ensure!( + frame_system::Pallet::::block_number() >= retirement_period_end, + Error::::RetirementPeriodNotPassed + ); - let role = Self::member_role_of(&who).ok_or(Error::::NotMember)?; - Self::remove_member(&who, role)?; + Self::remove_member(&who, MemberRole::Retiring)?; + >::remove(&who); let deposit = DepositOf::::take(&who); if let Some(deposit) = deposit { let err_amount = T::Currency::unreserve(&who, deposit); @@ -814,8 +852,6 @@ pub mod pallet { T::Slashed::on_unbalanced(T::Currency::slash_reserved(&member, deposit).0); } - >::remove(&member); - Self::deposit_event(Event::MemberKicked { member, slashed: deposit }); Ok(()) } @@ -930,11 +966,6 @@ impl, I: 'static> Pallet { founders.into() } - /// Check if an account's forced removal is up for consideration. - fn is_up_for_kicking(who: &T::AccountId) -> bool { - >::contains_key(&who) - } - /// Add a user to the sorted alliance member set. fn add_member(who: &T::AccountId, role: MemberRole) -> DispatchResult { >::try_mutate(role, |members| -> DispatchResult { diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs new file mode 100644 index 0000000000000..cd54eed7a9a75 --- /dev/null +++ b/frame/alliance/src/migration.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Config, Pallet, Weight, LOG_TARGET}; +use frame_support::{pallet_prelude::*, storage::migration, traits::OnRuntimeUpgrade}; +use log; + +/// The current storage version. +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +/// Wrapper for all migrations of this pallet. +pub fn migrate, I: 'static>() -> Weight { + let onchain_version = Pallet::::on_chain_storage_version(); + let mut weight: Weight = 0; + + if onchain_version < 1 { + weight = weight.saturating_add(v0_to_v1::migrate::()); + } + + STORAGE_VERSION.put::>(); + weight = weight.saturating_add(T::DbWeight::get().writes(1)); + + weight +} + +/// Implements `OnRuntimeUpgrade` trait. +pub struct Migration(PhantomData<(T, I)>); + +impl, I: 'static> OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + migrate::() + } +} + +/// v0_to_v1: `UpForKicking` is replaced by a retirement period. +mod v0_to_v1 { + use super::*; + + pub fn migrate, I: 'static>() -> Weight { + if migration::clear_storage_prefix( + >::name().as_bytes(), + b"UpForKicking", + b"", + None, + None, + ) + .maybe_cursor + .is_some() + { + log::error!( + target: LOG_TARGET, + "Storage prefix 'UpForKicking' is not completely cleared." + ); + } + + T::DbWeight::get().writes(1) + } +} diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index 91986300aa2e1..adc313e28ed7e 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -37,8 +37,10 @@ pub use crate as pallet_alliance; use super::*; +type BlockNumber = u64; + parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: BlockNumber = 250; } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -47,7 +49,7 @@ impl frame_system::Config for Test { type Origin = Origin; type Call = Call; type Index = u64; - type BlockNumber = u64; + type BlockNumber = BlockNumber; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; @@ -83,8 +85,10 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; } +const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; + parameter_types! { - pub const MotionDuration: u64 = 3; + pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; pub const MaxProposals: u32 = 100; pub const MaxMembers: u32 = 100; } @@ -199,6 +203,7 @@ parameter_types! { pub const MaxFellows: u32 = MaxMembers::get() - MaxFounders::get(); pub const MaxAllies: u32 = 100; pub const AllyDeposit: u64 = 25; + pub const RetirementPeriod: BlockNumber = MOTION_DURATION_IN_BLOCKS + 1; } impl Config for Test { type Event = Event; @@ -225,6 +230,7 @@ impl Config for Test { type MaxMembersCount = MaxMembers; type AllyDeposit = AllyDeposit; type WeightInfo = (); + type RetirementPeriod = RetirementPeriod; } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -324,7 +330,3 @@ pub fn make_proposal(value: u64) -> Call { pub fn make_set_rule_proposal(rule: Cid) -> Call { Call::Alliance(pallet_alliance::Call::set_rule { rule }) } - -pub fn make_kick_member_proposal(who: u64) -> Call { - Call::Alliance(pallet_alliance::Call::kick_member { who }) -} diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 14265cad5aa76..918cfa840c3f0 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -19,7 +19,7 @@ use sp_runtime::traits::Hash; -use frame_support::{assert_noop, assert_ok, Hashable}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin, Hashable}; use frame_system::{EventRecord, Phase}; use super::*; @@ -241,6 +241,9 @@ fn set_rule_works() { fn announce_works() { new_test_ext().execute_with(|| { let cid = test_cid(); + + assert_noop!(Alliance::announce(Origin::signed(2), cid.clone()), BadOrigin); + assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); @@ -390,48 +393,111 @@ fn elevate_ally_works() { } #[test] -fn retire_works() { +fn give_retirement_notice_work() { new_test_ext().execute_with(|| { - let proposal = make_kick_member_proposal(2); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Alliance::propose( - Origin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len + assert_noop!( + Alliance::give_retirement_notice(Origin::signed(4)), + Error::::NotMember + ); + + assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); + assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); + assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); + System::assert_last_event(mock::Event::Alliance( + crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); - assert_noop!(Alliance::retire(Origin::signed(2)), Error::::UpForKicking); - assert_noop!(Alliance::retire(Origin::signed(4)), Error::::NotMember); + assert_noop!( + Alliance::give_retirement_notice(Origin::signed(3)), + Error::::AlreadyRetiring + ); + }); +} + +#[test] +fn retire_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Alliance::retire(Origin::signed(2)), + Error::::RetirementNoticeNotGiven + ); + + assert_noop!( + Alliance::retire(Origin::signed(4)), + Error::::RetirementNoticeNotGiven + ); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); + assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_noop!( + Alliance::retire(Origin::signed(3)), + Error::::RetirementPeriodNotPassed + ); + System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(Origin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); + System::assert_last_event(mock::Event::Alliance(crate::Event::MemberRetired { + member: (3), + unreserved: None, + })); + + // Move time on: + System::set_block_number(System::block_number() + RetirementPeriod::get()); + + assert_powerless(Origin::signed(3)); }); } +fn assert_powerless(user: Origin) { + //vote / veto with a valid propsal + let cid = test_cid(); + let proposal = make_proposal(42); + + assert_noop!(Alliance::init_members(user.clone(), vec![], vec![], vec![]), BadOrigin); + + assert_noop!(Alliance::set_rule(user.clone(), cid.clone()), BadOrigin); + + assert_noop!(Alliance::retire(user.clone()), Error::::RetirementNoticeNotGiven); + + assert_noop!(Alliance::give_retirement_notice(user.clone()), Error::::NotMember); + + assert_noop!(Alliance::elevate_ally(user.clone(), 4), BadOrigin); + + assert_noop!(Alliance::kick_member(user.clone(), 1), BadOrigin); + + assert_noop!(Alliance::nominate_ally(user.clone(), 4), Error::::NoVotingRights); + + assert_noop!( + Alliance::propose(user.clone(), 5, Box::new(proposal), 1000), + Error::::NoVotingRights + ); +} + #[test] fn kick_member_works() { new_test_ext().execute_with(|| { - let proposal = make_kick_member_proposal(2); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - assert_ok!(Alliance::propose( - Origin::signed(1), - 3, - Box::new(proposal.clone()), - proposal_len - )); - assert_eq!(Alliance::up_for_kicking(2), true); - assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); + assert_noop!(Alliance::kick_member(Origin::signed(4), 4), BadOrigin); + + assert_noop!(Alliance::kick_member(Origin::signed(2), 4), Error::::NotMember); + >::insert(2, 25); + assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); assert_ok!(Alliance::kick_member(Origin::signed(2), 2)); assert_eq!(Alliance::members(MemberRole::Founder), vec![1]); + assert_eq!(>::get(2), None); + System::assert_last_event(mock::Event::Alliance(crate::Event::MemberKicked { + member: (2), + slashed: Some(25), + })); }); } #[test] fn add_unscrupulous_items_works() { new_test_ext().execute_with(|| { + assert_noop!(Alliance::add_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_ok!(Alliance::add_unscrupulous_items( Origin::signed(3), vec![ @@ -455,6 +521,8 @@ fn add_unscrupulous_items_works() { #[test] fn remove_unscrupulous_items_works() { new_test_ext().execute_with(|| { + assert_noop!(Alliance::remove_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_noop!( Alliance::remove_unscrupulous_items( Origin::signed(3), diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 495dd1b83df93..aa9ebde528683 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,24 +18,25 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-10-11, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 +//! DATE: 2022-08-26, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/release/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark -// --chain=dev +// pallet // --steps=50 // --repeat=20 -// --pallet=pallet_alliance // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_alliance +// --chain=dev // --output=./frame/alliance/src/weights.rs // --template=./.maintain/frame-weight-template.hbs - #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -59,6 +60,7 @@ pub trait WeightInfo { fn join_alliance() -> Weight; fn nominate_ally() -> Weight; fn elevate_ally() -> Weight; + fn give_retirement_notice() -> Weight; fn retire() -> Weight; fn kick_member() -> Weight; fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight; @@ -73,23 +75,33 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (39_992_000 as Weight) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (22_575_000 as Weight) + // Standard Error: 0 + .saturating_add((11_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 23_000 + .saturating_add((158_000 as Weight).saturating_mul(x as Weight)) // Standard Error: 2_000 - .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + .saturating_add((90_000 as Weight).saturating_mul(y as Weight)) // Standard Error: 2_000 - .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((216_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) + /// The range of component `x` is `[3, 10]`. + /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (36_649_000 as Weight) - // Standard Error: 90_000 - .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) + (45_486_000 as Weight) + // Standard Error: 29_000 + .saturating_add((78_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((128_000 as Weight).saturating_mul(y as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -97,125 +109,135 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (30_301_000 as Weight) - // Standard Error: 1_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + (35_296_000 as Weight) + // Standard Error: 2_000 + .saturating_add((171_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (40_472_000 as Weight) - // Standard Error: 69_000 - .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) + (39_252_000 as Weight) + // Standard Error: 18_000 + .saturating_add((75_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 1_000 + .saturating_add((107_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (52_076_000 as Weight) - // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 77_000 - .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + (50_357_000 as Weight) + // Standard Error: 1_000 + .saturating_add((103_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((204_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (47_009_000 as Weight) - // Standard Error: 66_000 - .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { + (41_258_000 as Weight) + // Standard Error: 1_000 + .saturating_add((111_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((186_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (43_650_000 as Weight) - // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 85_000 - .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 3_000 - .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (40_490_000 as Weight) + // Standard Error: 16_000 + .saturating_add((119_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 1_000 + .saturating_add((93_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:3 w:3) + // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (45_100_000 as Weight) - // Standard Error: 4_000 - .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 4_000 - .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + (35_186_000 as Weight) + // Standard Error: 1_000 + .saturating_add((161_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((127_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (14_517_000 as Weight) + (18_189_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (16_801_000 as Weight) + (21_106_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (17_133_000 as Weight) + (22_208_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) - // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (95_370_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (53_771_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:4 w:0) + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (44_764_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) + (41_912_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:3 w:2) @@ -223,23 +245,29 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (44_013_000 as Weight) + (36_811_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } - // Storage: Alliance KickingMembers (r:1 w:0) - // Storage: Alliance Members (r:3 w:1) + // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) - fn retire() -> Weight { - (60_183_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + // Storage: Alliance RetiringMembers (r:0 w:1) + fn give_retirement_notice() -> Weight { + (41_079_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } - // Storage: Alliance KickingMembers (r:1 w:0) + // Storage: Alliance RetiringMembers (r:1 w:1) + // Storage: Alliance Members (r:1 w:1) + // Storage: Alliance DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn retire() -> Weight { + (42_703_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -247,29 +275,33 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (67_467_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + (61_370_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_000 - .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((1_385_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_000 + .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { (0 as Weight) - // Standard Error: 343_000 - .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) // Standard Error: 153_000 - .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((21_484_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 59_000 + .saturating_add((3_772_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -282,23 +314,33 @@ impl WeightInfo for () { // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion ProposalCount (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) - fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (39_992_000 as Weight) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (22_575_000 as Weight) + // Standard Error: 0 + .saturating_add((11_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 23_000 + .saturating_add((158_000 as Weight).saturating_mul(x as Weight)) // Standard Error: 2_000 - .saturating_add((44_000 as Weight).saturating_mul(y as Weight)) + .saturating_add((90_000 as Weight).saturating_mul(y as Weight)) // Standard Error: 2_000 - .saturating_add((323_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((216_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) + /// The range of component `x` is `[3, 10]`. + /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (36_649_000 as Weight) - // Standard Error: 90_000 - .saturating_add((42_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((195_000 as Weight).saturating_mul(y as Weight)) + (45_486_000 as Weight) + // Standard Error: 29_000 + .saturating_add((78_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 2_000 + .saturating_add((128_000 as Weight).saturating_mul(y as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -306,125 +348,135 @@ impl WeightInfo for () { // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: AllianceMotion Voting (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (30_301_000 as Weight) - // Standard Error: 1_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) + (35_296_000 as Weight) + // Standard Error: 2_000 + .saturating_add((171_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (40_472_000 as Weight) - // Standard Error: 69_000 - .saturating_add((485_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((192_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((330_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + (39_252_000 as Weight) + // Standard Error: 18_000 + .saturating_add((75_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 1_000 + .saturating_add((107_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((170_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) + // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (52_076_000 as Weight) - // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 77_000 - .saturating_add((194_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((188_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((329_000 as Weight).saturating_mul(p as Weight)) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + (50_357_000 as Weight) + // Standard Error: 1_000 + .saturating_add((103_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((204_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (47_009_000 as Weight) - // Standard Error: 66_000 - .saturating_add((256_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 2_000 - .saturating_add((176_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 2_000 - .saturating_add((327_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { + (41_258_000 as Weight) + // Standard Error: 1_000 + .saturating_add((111_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((186_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Alliance Members (r:1 w:0) - // Storage: AllianceMotion ProposalOf (r:1 w:1) // Storage: AllianceMotion Voting (r:1 w:1) // Storage: AllianceMotion Members (r:1 w:0) // Storage: AllianceMotion Prime (r:1 w:0) // Storage: AllianceMotion Proposals (r:1 w:1) - fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (43_650_000 as Weight) - // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - // Standard Error: 85_000 - .saturating_add((124_000 as Weight).saturating_mul(x as Weight)) - // Standard Error: 3_000 - .saturating_add((199_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 3_000 - .saturating_add((326_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + // Storage: AllianceMotion ProposalOf (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[2, 90]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + (40_490_000 as Weight) + // Standard Error: 16_000 + .saturating_add((119_000 as Weight).saturating_mul(x as Weight)) + // Standard Error: 1_000 + .saturating_add((93_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((195_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:3 w:3) + // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) + /// The range of component `x` is `[2, 10]`. + /// The range of component `y` is `[0, 90]`. + /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (45_100_000 as Weight) - // Standard Error: 4_000 - .saturating_add((162_000 as Weight).saturating_mul(y as Weight)) - // Standard Error: 4_000 - .saturating_add((151_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + (35_186_000 as Weight) + // Standard Error: 1_000 + .saturating_add((161_000 as Weight).saturating_mul(y as Weight)) + // Standard Error: 1_000 + .saturating_add((127_000 as Weight).saturating_mul(z as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (14_517_000 as Weight) + (18_189_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (16_801_000 as Weight) + (21_106_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (17_133_000 as Weight) + (22_208_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) - // Storage: Alliance Members (r:4 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (95_370_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (53_771_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } - // Storage: Alliance Members (r:4 w:0) + // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (44_764_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + (41_912_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Alliance Members (r:3 w:2) @@ -432,23 +484,29 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (44_013_000 as Weight) + (36_811_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } - // Storage: Alliance KickingMembers (r:1 w:0) - // Storage: Alliance Members (r:3 w:1) + // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) - // Storage: Alliance DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) - fn retire() -> Weight { - (60_183_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + // Storage: Alliance RetiringMembers (r:0 w:1) + fn give_retirement_notice() -> Weight { + (41_079_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } - // Storage: Alliance KickingMembers (r:1 w:0) + // Storage: Alliance RetiringMembers (r:1 w:1) + // Storage: Alliance Members (r:1 w:1) + // Storage: Alliance DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) + fn retire() -> Weight { + (42_703_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: Alliance DepositOf (r:1 w:1) @@ -456,29 +514,33 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (67_467_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + (61_370_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { (0 as Weight) - // Standard Error: 16_000 - .saturating_add((2_673_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 7_000 - .saturating_add((224_000 as Weight).saturating_mul(l as Weight)) + // Standard Error: 2_000 + .saturating_add((1_385_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_000 + .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) + /// The range of component `n` is `[1, 100]`. + /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { (0 as Weight) - // Standard Error: 343_000 - .saturating_add((59_025_000 as Weight).saturating_mul(n as Weight)) // Standard Error: 153_000 - .saturating_add((6_725_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((21_484_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 59_000 + .saturating_add((3_772_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } From ca1f81cd0864ae0614aaff07fa121d2d42110586 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 30 Aug 2022 11:12:19 +0200 Subject: [PATCH 074/348] Fix sporadic test failure (#12140) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 12 ++++++++++ frame/benchmarking/Cargo.toml | 1 + frame/benchmarking/src/tests.rs | 42 +++++++++++++++++++-------------- 3 files changed, 37 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf06efd65aa53..d90de9da4f579 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2107,6 +2107,7 @@ dependencies = [ "log", "parity-scale-codec", "paste 1.0.6", + "rusty-fork", "scale-info", "serde", "sp-api", @@ -7672,6 +7673,17 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error 1.2.3", + "tempfile", +] + [[package]] name = "rw-stream-sink" version = "0.3.0" diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index d3a2704724f12..c098eee528233 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -32,6 +32,7 @@ sp-storage = { version = "6.0.0", default-features = false, path = "../../primit [dev-dependencies] hex-literal = "0.3.4" +rusty-fork = { version = "0.3.0", default-features = false } sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } [features] diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index be5c23fff17f6..b8a888767bf2c 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -139,6 +139,7 @@ mod benchmarks { use crate::{account, BenchmarkError, BenchmarkParameter, BenchmarkResult, BenchmarkingSetup}; use frame_support::{assert_err, assert_ok, ensure, traits::Get}; use frame_system::RawOrigin; + use rusty_fork::rusty_fork_test; use sp_std::prelude::*; // Additional used internally by the benchmark macro. @@ -409,24 +410,29 @@ mod benchmarks { }); } - /// Test that the benchmarking uses the correct values for each component and - /// that the number of components can be controlled with `VALUES_PER_COMPONENT`. - #[test] - fn test_values_per_component() { - let tests = vec![ - (Some("1"), Err("`VALUES_PER_COMPONENT` must be at least 2".into())), - (Some("asdf"), Err("Could not parse env var `VALUES_PER_COMPONENT` as u32.".into())), - (None, Ok(vec![0, 2, 4, 6, 8, 10])), - (Some("2"), Ok(vec![0, 10])), - (Some("4"), Ok(vec![0, 3, 6, 10])), - (Some("6"), Ok(vec![0, 2, 4, 6, 8, 10])), - (Some("10"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), - (Some("11"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), - (Some("99"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), - ]; - - for (num, expected) in tests { - run_test_values_per_component(num, expected); + rusty_fork_test! { + /// Test that the benchmarking uses the correct values for each component and + /// that the number of components can be controlled with `VALUES_PER_COMPONENT`. + /// + /// NOTE: This test needs to run in its own process, since it + /// otherwise messes up the env variable for the other tests. + #[test] + fn test_values_per_component() { + let tests = vec![ + (Some("1"), Err("`VALUES_PER_COMPONENT` must be at least 2".into())), + (Some("asdf"), Err("Could not parse env var `VALUES_PER_COMPONENT` as u32.".into())), + (None, Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("2"), Ok(vec![0, 10])), + (Some("4"), Ok(vec![0, 3, 6, 10])), + (Some("6"), Ok(vec![0, 2, 4, 6, 8, 10])), + (Some("10"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 10])), + (Some("11"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + (Some("99"), Ok(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])), + ]; + + for (num, expected) in tests { + run_test_values_per_component(num, expected); + } } } From 0246883c404d498090f33e795feb8075fa8d3b6b Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 31 Aug 2022 12:26:13 +0100 Subject: [PATCH 075/348] Weight v1.5: Opaque Struct (#12138) * initial idea * update frame_support * update a bunch more * add ord * adjust RuntimeDbWeight * frame_system builds * re-export * frame_support tests pass * frame_executive compile * frame_executive builds * frame_system tests passing * pallet-utility tests pass * fix a bunch of pallets * more * phragmen * state-trie-migration * scheduler and referenda * pallet-election-provider-multi-phase * aura * staking * more * babe * balances * bunch more * sudo * transaction-payment * asset-tx-payment * last pallets * fix alliance merge * fix node template runtime * fix pallet-contracts cc @athei * fix node runtime * fix compile on runtime-benchmarks feature * comment * fix frame-support-test * fix more tests * weight regex * frame system works * fix a bunch * more * more * more * more * more * more fixes * update templates * fix contracts benchmarks * Update lib.rs * Update lib.rs * fix ui * make scalar saturating mul const * more const functions * scalar div * refactor using constant functions * move impl * fix overhead template * use compactas * Update lib.rs --- .maintain/frame-weight-template.hbs | 26 +- bin/node-template/pallets/template/src/lib.rs | 4 +- bin/node/executor/tests/basic.rs | 6 +- bin/node/executor/tests/fees.rs | 2 +- bin/node/runtime/src/impls.rs | 39 +- bin/node/runtime/src/lib.rs | 6 +- frame/alliance/src/benchmarking.rs | 8 +- frame/alliance/src/migration.rs | 2 +- frame/alliance/src/weights.rs | 318 ++-- frame/assets/src/weights.rs | 326 ++-- frame/atomic-swap/src/lib.rs | 9 +- frame/atomic-swap/src/tests.rs | 2 +- frame/aura/src/mock.rs | 2 +- frame/authority-discovery/src/lib.rs | 2 +- frame/authorship/src/lib.rs | 4 +- frame/babe/src/default_weights.rs | 8 +- frame/babe/src/lib.rs | 4 +- frame/babe/src/mock.rs | 2 +- frame/babe/src/tests.rs | 2 +- frame/bags-list/src/migrations.rs | 1 - frame/bags-list/src/weights.rs | 38 +- frame/balances/src/tests.rs | 8 +- frame/balances/src/tests_composite.rs | 2 +- frame/balances/src/tests_local.rs | 2 +- frame/balances/src/tests_reentrancy.rs | 2 +- frame/balances/src/weights.rs | 86 +- frame/benchmarking/src/utils.rs | 2 +- frame/benchmarking/src/weights.rs | 54 +- frame/bounties/src/migrations/v4.rs | 4 +- frame/bounties/src/weights.rs | 146 +- frame/child-bounties/src/tests.rs | 2 +- frame/child-bounties/src/weights.rs | 102 +- frame/collective/src/benchmarking.rs | 8 +- frame/collective/src/migrations/v4.rs | 4 +- frame/collective/src/tests.rs | 24 +- frame/collective/src/weights.rs | 210 +-- frame/contracts/rpc/src/lib.rs | 4 +- frame/contracts/src/benchmarking/mod.rs | 4 +- frame/contracts/src/chain_extension.rs | 7 +- frame/contracts/src/exec.rs | 46 +- frame/contracts/src/gas.rs | 24 +- frame/contracts/src/lib.rs | 12 +- frame/contracts/src/migration.rs | 6 +- frame/contracts/src/schedule.rs | 116 +- frame/contracts/src/storage.rs | 20 +- frame/contracts/src/tests.rs | 34 +- frame/contracts/src/wasm/code_cache.rs | 8 +- frame/contracts/src/wasm/mod.rs | 8 +- frame/contracts/src/wasm/runtime.rs | 8 +- frame/contracts/src/weights.rs | 1534 ++++++++--------- frame/conviction-voting/src/tests.rs | 3 +- frame/conviction-voting/src/weights.rs | 110 +- frame/democracy/src/lib.rs | 2 +- frame/democracy/src/tests.rs | 2 +- frame/democracy/src/weights.rs | 398 ++--- .../election-provider-multi-phase/src/lib.rs | 7 +- .../election-provider-multi-phase/src/mock.rs | 8 +- .../src/signed.rs | 12 +- .../src/unsigned.rs | 191 +- .../src/weights.rs | 158 +- .../election-provider-support/src/weights.rs | 34 +- frame/elections-phragmen/src/lib.rs | 31 +- frame/elections-phragmen/src/migrations/v3.rs | 4 +- frame/elections-phragmen/src/migrations/v4.rs | 4 +- frame/elections-phragmen/src/migrations/v5.rs | 2 +- frame/elections-phragmen/src/weights.rs | 182 +- frame/examples/basic/src/lib.rs | 4 +- frame/examples/basic/src/tests.rs | 4 +- frame/examples/basic/src/weights.rs | 38 +- frame/examples/offchain-worker/src/tests.rs | 2 +- frame/executive/README.md | 2 +- frame/executive/src/lib.rs | 43 +- frame/gilt/src/lib.rs | 2 +- frame/gilt/src/weights.rs | 122 +- frame/grandpa/src/default_weights.rs | 2 +- frame/grandpa/src/migrations/v4.rs | 4 +- frame/grandpa/src/mock.rs | 2 +- frame/grandpa/src/tests.rs | 2 +- frame/identity/src/tests.rs | 2 +- frame/identity/src/weights.rs | 310 ++-- frame/im-online/src/mock.rs | 2 +- frame/im-online/src/weights.rs | 22 +- frame/indices/src/mock.rs | 2 +- frame/indices/src/weights.rs | 62 +- frame/lottery/src/weights.rs | 74 +- frame/membership/src/lib.rs | 2 +- frame/membership/src/migrations/v4.rs | 4 +- frame/membership/src/weights.rs | 110 +- .../src/default_weights.rs | 2 +- frame/merkle-mountain-range/src/tests.rs | 4 +- frame/multisig/src/benchmarking.rs | 32 +- frame/multisig/src/lib.rs | 4 +- frame/multisig/src/tests.rs | 173 +- frame/multisig/src/weights.rs | 170 +- frame/nicks/src/lib.rs | 2 +- frame/node-authorization/src/weights.rs | 20 +- frame/nomination-pools/src/migration.rs | 1 + frame/nomination-pools/src/weights.rs | 198 +-- frame/offences/benchmarking/src/mock.rs | 4 +- frame/preimage/src/mock.rs | 2 +- frame/preimage/src/weights.rs | 158 +- frame/proxy/src/lib.rs | 8 +- frame/proxy/src/tests.rs | 2 +- frame/proxy/src/weights.rs | 174 +- frame/randomness-collective-flip/src/lib.rs | 4 +- frame/ranked-collective/src/tests.rs | 3 +- frame/ranked-collective/src/weights.rs | 98 +- frame/recovery/src/mock.rs | 2 +- frame/recovery/src/weights.rs | 122 +- frame/referenda/src/branch.rs | 16 +- frame/referenda/src/mock.rs | 6 +- frame/referenda/src/weights.rs | 326 ++-- frame/remark/src/weights.rs | 14 +- frame/scheduler/src/mock.rs | 2 +- frame/scheduler/src/tests.rs | 209 ++- frame/scheduler/src/weights.rs | 290 ++-- frame/scored-pool/src/lib.rs | 2 +- frame/scored-pool/src/mock.rs | 2 +- frame/session/src/lib.rs | 2 +- frame/session/src/migrations/v1.rs | 4 +- frame/session/src/mock.rs | 2 +- frame/session/src/weights.rs | 26 +- frame/society/src/lib.rs | 2 +- frame/society/src/mock.rs | 2 +- frame/staking/src/pallet/impls.rs | 2 +- frame/staking/src/tests.rs | 74 +- frame/staking/src/weights.rs | 458 ++--- frame/state-trie-migration/src/lib.rs | 34 +- frame/state-trie-migration/src/weights.rs | 70 +- frame/sudo/src/lib.rs | 3 +- frame/sudo/src/mock.rs | 3 +- frame/sudo/src/tests.rs | 64 +- frame/support/src/dispatch.rs | 47 +- frame/support/src/lib.rs | 2 +- frame/support/src/migrations.rs | 2 +- frame/support/src/traits/hooks.rs | 84 +- frame/support/src/weights.rs | 255 ++- frame/support/src/weights/block_weights.rs | 2 +- .../support/src/weights/extrinsic_weights.rs | 2 +- frame/support/src/weights/paritydb_weights.rs | 4 +- frame/support/src/weights/rocksdb_weights.rs | 4 +- frame/support/src/weights/weight_v2.rs | 483 ++++++ frame/support/test/tests/construct_runtime.rs | 14 +- frame/support/test/tests/pallet.rs | 34 +- .../test/tests/pallet_compatibility.rs | 17 +- .../tests/pallet_compatibility_instance.rs | 17 +- frame/support/test/tests/pallet_instance.rs | 28 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- .../tests/pallet_with_name_trait_is_valid.rs | 2 +- frame/system/benches/bench.rs | 7 +- .../system/src/extensions/check_mortality.rs | 9 +- frame/system/src/extensions/check_weight.rs | 159 +- frame/system/src/lib.rs | 12 +- frame/system/src/limits.rs | 22 +- frame/system/src/migrations/mod.rs | 6 +- frame/system/src/mock.rs | 6 +- frame/system/src/tests.rs | 51 +- frame/system/src/weights.rs | 62 +- frame/timestamp/src/mock.rs | 2 +- frame/timestamp/src/weights.rs | 18 +- frame/tips/src/migrations/v4.rs | 4 +- frame/tips/src/weights.rs | 98 +- .../asset-tx-payment/src/tests.rs | 80 +- frame/transaction-payment/src/lib.rs | 303 ++-- frame/transaction-payment/src/types.rs | 8 +- frame/transaction-storage/src/weights.rs | 42 +- frame/treasury/src/lib.rs | 4 +- frame/treasury/src/tests.rs | 2 +- frame/treasury/src/weights.rs | 90 +- frame/uniques/src/migration.rs | 7 +- frame/uniques/src/weights.rs | 354 ++-- frame/utility/src/lib.rs | 20 +- frame/utility/src/tests.rs | 30 +- frame/utility/src/weights.rs | 34 +- frame/vesting/src/mock.rs | 2 +- frame/vesting/src/weights.rs | 162 +- frame/whitelist/src/mock.rs | 2 +- frame/whitelist/src/tests.rs | 12 +- frame/whitelist/src/weights.rs | 54 +- test-utils/runtime/src/lib.rs | 4 +- .../frame/benchmarking-cli/src/block/bench.rs | 3 +- .../benchmarking-cli/src/overhead/README.md | 58 +- .../benchmarking-cli/src/overhead/weights.hbs | 11 +- .../benchmarking-cli/src/pallet/template.hbs | 14 +- 187 files changed, 5913 insertions(+), 4911 deletions(-) create mode 100644 frame/support/src/weights/weight_v2.rs diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 593da06f4a7c0..c6df85194ac99 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -31,7 +31,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for {{pallet}}. @@ -64,22 +64,22 @@ impl WeightInfo for SubstrateWeight { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} @@ -99,22 +99,22 @@ impl WeightInfo for () { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index f1519efe062bd..a9209a9040b6d 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -64,7 +64,7 @@ pub mod pallet { impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. - #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] + #[pallet::weight(10_000 + T::DbWeight::get().writes(1).ref_time())] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. @@ -81,7 +81,7 @@ pub mod pallet { } /// An example dispatchable that may throw a custom error. - #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] + #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1).ref_time())] pub fn cause_error(origin: OriginFor) -> DispatchResult { let _who = ensure_signed(origin)?; diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 468b9c8ac4c1b..3426df5872c35 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ traits::Currency, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; @@ -733,7 +733,7 @@ fn deploying_wasm_contract_should_work() { function: Call::Contracts( pallet_contracts::Call::instantiate_with_code:: { value: 0, - gas_limit: 500_000_000, + gas_limit: Weight::from_ref_time(500_000_000), storage_deposit_limit: None, code: transfer_code, data: Vec::new(), @@ -746,7 +746,7 @@ fn deploying_wasm_contract_should_work() { function: Call::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, - gas_limit: 500_000_000, + gas_limit: Weight::from_ref_time(500_000_000), storage_deposit_limit: None, data: vec![0x00, 0x01, 0x02, 0x03], }), diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 296d1d8e28798..dd1318254d9b7 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -208,7 +208,7 @@ fn transaction_fee_is_correct() { // we know that weight to fee multiplier is effect-less in block 1. // current weight of transfer = 200_000_000 // Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance) - assert_eq!(weight_fee, weight as Balance); + assert_eq!(weight_fee, weight.ref_time() as Balance); balance_alice -= base_fee; balance_alice -= weight_fee; balance_alice -= tip; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index eaf872d6f3fec..510955a5b7b3e 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -163,13 +163,13 @@ mod multiplier_tests { let previous_float = previous_float.max(min_multiplier().into_inner() as f64 / accuracy); // maximum tx weight - let m = max_normal() as f64; + let m = max_normal().ref_time() as f64; // block weight always truncated to max weight - let block_weight = (block_weight as f64).min(m); + let block_weight = (block_weight.ref_time() as f64).min(m); let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight - let ss = target() as f64; + let ss = target().ref_time() as f64; // Current saturation in terms of weight let s = block_weight; @@ -197,9 +197,9 @@ mod multiplier_tests { fn truth_value_update_poc_works() { let fm = Multiplier::saturating_from_rational(1, 2); let test_set = vec![ - (0, fm), - (100, fm), - (1000, fm), + (Weight::zero(), fm), + (Weight::from_ref_time(100), fm), + (Weight::from_ref_time(1000), fm), (target(), fm), (max_normal() / 2, fm), (max_normal(), fm), @@ -229,7 +229,7 @@ mod multiplier_tests { #[test] fn multiplier_cannot_go_below_limit() { // will not go any further below even if block is empty. - run_with_system_weight(0, || { + run_with_system_weight(Weight::new(), || { let next = runtime_multiplier_update(min_multiplier()); assert_eq!(next, min_multiplier()); }) @@ -247,7 +247,7 @@ mod multiplier_tests { // 1 < 0.00001 * k * 0.1875 // 10^9 / 1875 < k // k > 533_333 ~ 18,5 days. - run_with_system_weight(0, || { + run_with_system_weight(Weight::new(), || { // start from 1, the default. let mut fm = Multiplier::one(); let mut iterations: u64 = 0; @@ -283,7 +283,8 @@ mod multiplier_tests { // `cargo test congested_chain_simulation -- --nocapture` to get some insight. // almost full. The entire quota of normal transactions is taken. - let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - 100; + let block_weight = BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap() - + Weight::from_ref_time(100); // Default substrate weight. let tx_weight = frame_support::weights::constants::ExtrinsicBaseWeight::get(); @@ -407,27 +408,27 @@ mod multiplier_tests { #[test] fn weight_to_fee_should_not_overflow_on_large_weights() { - let kb = 1024 as Weight; + let kb = Weight::from_ref_time(1024); let mb = kb * kb; let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ - 0, - 1, - 10, - 1000, + Weight::zero(), + Weight::one(), + Weight::from_ref_time(10), + Weight::from_ref_time(1000), kb, 10 * kb, 100 * kb, mb, 10 * mb, - 2147483647, - 4294967295, + Weight::from_ref_time(2147483647), + Weight::from_ref_time(4294967295), BlockWeights::get().max_block / 2, BlockWeights::get().max_block, - Weight::max_value() / 2, - Weight::max_value(), + Weight::MAX / 2, + Weight::MAX, ] .into_iter() .for_each(|i| { @@ -440,7 +441,7 @@ mod multiplier_tests { // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { + vec![t + Weight::from_ref_time(100), t * 2, t * 4].into_iter().for_each(|i| { run_with_system_weight(i, || { let fm = runtime_multiplier_update(max_fm); // won't grow. The convert saturates everything. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f8ea312d1a496..5d4a21cea09b2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -170,7 +170,7 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.scalar_saturating_mul(2); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; @@ -1936,7 +1936,7 @@ impl_runtime_apis! { storage_deposit_limit: Option, input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { - Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) + Contracts::bare_call(origin, dest, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, input_data, true) } fn instantiate( @@ -1949,7 +1949,7 @@ impl_runtime_apis! { salt: Vec, ) -> pallet_contracts_primitives::ContractInstantiateResult { - Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) + Contracts::bare_instantiate(origin, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, code, data, salt, true) } fn upload_code( diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 25dcb18b175ef..33cf933aba421 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -332,7 +332,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -417,7 +417,7 @@ benchmarks_instance_pallet! { index, true, )?; - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -489,7 +489,7 @@ benchmarks_instance_pallet! { System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); @@ -562,7 +562,7 @@ benchmarks_instance_pallet! { // caller is prime, prime already votes aye by creating the proposal System::::set_block_number(T::BlockNumber::max_value()); - }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash.clone(), index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs index cd54eed7a9a75..010603902f0fd 100644 --- a/frame/alliance/src/migration.rs +++ b/frame/alliance/src/migration.rs @@ -25,7 +25,7 @@ pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { let onchain_version = Pallet::::on_chain_storage_version(); - let mut weight: Weight = 0; + let mut weight: Weight = Weight::new(); if onchain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index aa9ebde528683..048bea3d1e5a4 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_alliance. @@ -80,30 +80,30 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (22_575_000 as Weight) + Weight::from_ref_time(22_575_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((11_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 23_000 - .saturating_add((158_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((90_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((216_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (45_486_000 as Weight) + Weight::from_ref_time(45_486_000 as RefTimeWeight) // Standard Error: 29_000 - .saturating_add((78_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((128_000 as Weight).saturating_mul(y as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) @@ -111,11 +111,11 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (35_296_000 as Weight) + Weight::from_ref_time(35_296_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((171_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -126,15 +126,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (39_252_000 as Weight) + Weight::from_ref_time(39_252_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((75_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((107_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((170_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -146,13 +146,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (50_357_000 as Weight) + Weight::from_ref_time(50_357_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((103_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((204_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -164,13 +164,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - (41_258_000 as Weight) + Weight::from_ref_time(41_258_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((111_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((186_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -183,15 +183,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (40_490_000 as Weight) + Weight::from_ref_time(40_490_000 as RefTimeWeight) // Standard Error: 16_000 - .saturating_add((119_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((93_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) @@ -199,55 +199,55 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (35_186_000 as Weight) + Weight::from_ref_time(35_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((127_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (18_189_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_189_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (21_106_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_106_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (22_208_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_208_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (53_771_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(53_771_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (41_912_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_912_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (36_811_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(36_811_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -255,18 +255,18 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - (41_079_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(41_079_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance RetiringMembers (r:1 w:1) // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - (42_703_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_703_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -275,35 +275,35 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (61_370_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_370_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_385_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 153_000 - .saturating_add((21_484_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 59_000 - .saturating_add((3_772_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -319,30 +319,30 @@ impl WeightInfo for () { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (22_575_000 as Weight) + Weight::from_ref_time(22_575_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((11_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 23_000 - .saturating_add((158_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((90_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((216_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(x: u32, y: u32, ) -> Weight { - (45_486_000 as Weight) + Weight::from_ref_time(45_486_000 as RefTimeWeight) // Standard Error: 29_000 - .saturating_add((78_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((128_000 as Weight).saturating_mul(y as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) @@ -350,11 +350,11 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - (35_296_000 as Weight) + Weight::from_ref_time(35_296_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((171_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -365,15 +365,15 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - (39_252_000 as Weight) + Weight::from_ref_time(39_252_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((75_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((107_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((170_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -385,13 +385,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - (50_357_000 as Weight) + Weight::from_ref_time(50_357_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((103_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((204_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -403,13 +403,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - (41_258_000 as Weight) + Weight::from_ref_time(41_258_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((111_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((186_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -422,15 +422,15 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - (40_490_000 as Weight) + Weight::from_ref_time(40_490_000 as RefTimeWeight) // Standard Error: 16_000 - .saturating_add((119_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((93_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:2 w:3) // Storage: AllianceMotion Members (r:1 w:1) @@ -438,55 +438,55 @@ impl WeightInfo for () { /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - (35_186_000 as Weight) + Weight::from_ref_time(35_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(y as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((127_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - (18_189_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_189_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - (21_106_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_106_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - (22_208_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_208_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - (53_771_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(53_771_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - (41_912_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_912_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - (36_811_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(36_811_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -494,18 +494,18 @@ impl WeightInfo for () { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - (41_079_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(41_079_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance RetiringMembers (r:1 w:1) // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - (42_703_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_703_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -514,34 +514,34 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - (61_370_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_370_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_385_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((119_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 153_000 - .saturating_add((21_484_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 59_000 - .saturating_add((3_772_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index e8f1184cf570f..971728df46c24 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_assets. @@ -74,15 +74,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - (27_167_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(27_167_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - (15_473_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(15_473_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -90,168 +90,168 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - (30_819_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_819_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - (35_212_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(35_212_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (47_401_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_401_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (42_300_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_300_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (47_946_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_946_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - (21_670_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_670_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - (21_503_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_503_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - (18_158_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_158_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - (18_525_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_525_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - (19_858_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_858_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - (18_045_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_045_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - (32_395_000 as Weight) + Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - (32_893_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_893_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (19_586_000 as Weight) + Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - (32_478_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_478_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - (17_143_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_143_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - (36_389_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_389_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - (61_854_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_854_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - (36_759_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_759_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - (37_753_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_753_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -259,15 +259,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - (27_167_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(27_167_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - (15_473_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(15_473_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -275,167 +275,167 @@ impl WeightInfo for () { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add((17_145_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add((19_333_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add((17_046_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(a as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - (30_819_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_819_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - (35_212_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(35_212_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (47_401_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_401_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (42_300_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_300_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (47_946_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(47_946_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - (21_670_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_670_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - (21_503_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_503_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - (18_158_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_158_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - (18_525_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_525_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - (19_858_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_858_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - (18_045_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_045_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - (32_395_000 as Weight) + Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - (32_893_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_893_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - (19_586_000 as Weight) + Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - (32_478_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_478_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - (17_143_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_143_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - (36_389_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_389_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - (61_854_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(61_854_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - (36_759_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(36_759_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - (37_753_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_753_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 54643001ad6fc..1b6c62c55ee20 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -243,7 +243,7 @@ pub mod pallet { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn create_swap( origin: OriginFor, target: T::AccountId, @@ -280,9 +280,10 @@ pub mod pallet { /// the operation fails. This is used for weight calculation. #[pallet::weight( T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((proof.len() as Weight).saturating_mul(100)) .saturating_add(action.weight()) + .ref_time() + .saturating_add(40_000_000) + .saturating_add((proof.len() as u64).saturating_mul(100)) )] pub fn claim_swap( origin: OriginFor, @@ -317,7 +318,7 @@ pub mod pallet { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).ref_time().saturating_add(40_000_000))] pub fn cancel_swap( origin: OriginFor, target: T::AccountId, diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index 688d3ff17f786..ffb548a1f29ed 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -30,7 +30,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 636a28692ba28..5023feeaf8aea 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -49,7 +49,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index a56d8e785f6ac..d5f9783f153c8 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -227,7 +227,7 @@ mod tests { pub const Period: BlockNumber = 1; pub const Offset: BlockNumber = 0; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 8ddccfd9cf939..ff18e047db048 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -190,7 +190,7 @@ pub mod pallet { T::EventHandler::note_author(author); } - 0 + Weight::zero() } fn on_finalize(_: T::BlockNumber) { @@ -460,7 +460,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index 57c74323b7932..cc1d11108b2e1 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -38,8 +38,8 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + let ref_time_weight = (35 * WEIGHT_PER_MICROS) + .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof .saturating_add(110 * WEIGHT_PER_MICROS) @@ -47,6 +47,8 @@ impl crate::WeightInfo for () { .saturating_add(110 * WEIGHT_PER_MICROS) .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) - .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) + .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)); + + ref_time_weight } } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 1effc2c1989fa..48ca62a5b1a09 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -336,7 +336,7 @@ pub mod pallet { /// Initialization fn on_initialize(now: BlockNumberFor) -> Weight { Self::initialize(now); - 0 + Weight::zero() } /// Block finalization @@ -1008,6 +1008,6 @@ pub mod migrations { writes += 3; - T::DbWeight::get().writes(writes) + T::DbWeight::get().reads(reads) + T::DbWeight::get().reads_writes(reads, writes) } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index c2ba3c2be06d8..3a6348404b9a7 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -66,7 +66,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index ece0883387709..2f967b658e396 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -852,7 +852,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > 0); + assert!(info.weight > Weight::zero()); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index a77beb23bd667..49b7d136125e2 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -21,7 +21,6 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_election_provider_support::ScoreProvider; use frame_support::traits::OnRuntimeUpgrade; -use sp_runtime::traits::Zero; #[cfg(feature = "try-runtime")] use frame_support::ensure; diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index a554a9bd4ad1a..049967e3024c0 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bags_list. @@ -57,18 +57,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - (55_040_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_040_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - (53_671_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(53_671_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -76,9 +76,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - (56_410_000 as Weight) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(56_410_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } } @@ -89,18 +89,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - (55_040_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_040_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - (53_671_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(53_671_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -108,8 +108,8 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - (56_410_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(10 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(56_410_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index a9e44f085977a..6605af530563d 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -188,14 +188,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, )); @@ -206,14 +206,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(1), + &info_from_weight(Weight::one()), 1, ).is_err()); }); diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 4ab913cf1411a..266c37063d2f8 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -46,7 +46,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 48c6574c3b39f..ebb82f41a545c 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -47,7 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 4c028840d553c..fbfd62eb63259 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,7 +51,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index f612d31997996..17b2541e0a998 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_balances. @@ -58,45 +58,45 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (41_860_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_860_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (32_760_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_760_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (22_279_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_279_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (25_488_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_488_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (42_190_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(42_190_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (37_789_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(37_789_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - (20_056_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(20_056_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -104,44 +104,44 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (41_860_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(41_860_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (32_760_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(32_760_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (22_279_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_279_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (25_488_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_488_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (42_190_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(42_190_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (37_789_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(37_789_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - (20_056_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(20_056_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 8c642f74358db..b483208e3ef69 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -127,7 +127,7 @@ pub struct BenchmarkResult { impl BenchmarkResult { pub fn from_weight(w: Weight) -> Self { - Self { extrinsic_time: (w as u128) / 1_000, ..Default::default() } + Self { extrinsic_time: (w.ref_time() / 1_000) as u128, ..Default::default() } } } diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 8b36601940cf3..dc2ca26e1af02 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_benchmarking. @@ -58,75 +58,75 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn addition(_i: u32, ) -> Weight { - (103_000 as Weight) + Weight::from_ref_time(103_000 as RefTimeWeight) } fn subtraction(_i: u32, ) -> Weight { - (105_000 as Weight) + Weight::from_ref_time(105_000 as RefTimeWeight) } fn multiplication(_i: u32, ) -> Weight { - (113_000 as Weight) + Weight::from_ref_time(113_000 as RefTimeWeight) } fn division(_i: u32, ) -> Weight { - (102_000 as Weight) + Weight::from_ref_time(102_000 as RefTimeWeight) } fn hashing(_i: u32, ) -> Weight { - (20_865_902_000 as Weight) + Weight::from_ref_time(20_865_902_000 as RefTimeWeight) } fn sr25519_verification(i: u32, ) -> Weight { - (319_000 as Weight) + Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } // For backwards compatibility and tests impl WeightInfo for () { fn addition(_i: u32, ) -> Weight { - (103_000 as Weight) + Weight::from_ref_time(103_000 as RefTimeWeight) } fn subtraction(_i: u32, ) -> Weight { - (105_000 as Weight) + Weight::from_ref_time(105_000 as RefTimeWeight) } fn multiplication(_i: u32, ) -> Weight { - (113_000 as Weight) + Weight::from_ref_time(113_000 as RefTimeWeight) } fn division(_i: u32, ) -> Weight { - (102_000 as Weight) + Weight::from_ref_time(102_000 as RefTimeWeight) } fn hashing(_i: u32, ) -> Weight { - (20_865_902_000 as Weight) + Weight::from_ref_time(20_865_902_000 as RefTimeWeight) } fn sr25519_verification(i: u32, ) -> Weight { - (319_000 as Weight) + Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add((47_171_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((2_110_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((372_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } diff --git a/frame/bounties/src/migrations/v4.rs b/frame/bounties/src/migrations/v4.rs index 8f5f3ebe55bf4..2f81c97127bcd 100644 --- a/frame/bounties/src/migrations/v4.rs +++ b/frame/bounties/src/migrations/v4.rs @@ -54,7 +54,7 @@ pub fn migrate< target: "runtime::bounties", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let on_chain_storage_version =

::on_chain_storage_version(); @@ -105,7 +105,7 @@ pub fn migrate< "Attempted to apply migration to v4 but failed because storage version is {:?}", on_chain_storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index d3e054cfc6351..27a23cb4ffeae 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bounties. @@ -65,88 +65,88 @@ impl WeightInfo for SubstrateWeight { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - (28_903_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_903_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - (10_997_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(10_997_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - (8_967_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_967_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (28_665_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_665_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (25_141_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(25_141_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - (21_295_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_295_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - (67_951_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_951_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - (33_654_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_654_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - (50_582_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_582_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - (18_322_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_322_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } } @@ -157,87 +157,87 @@ impl WeightInfo for () { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - (28_903_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_903_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - (10_997_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(10_997_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - (8_967_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_967_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (28_665_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_665_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (25_141_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(25_141_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - (21_295_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_295_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - (67_951_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_951_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - (33_654_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(33_654_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - (50_582_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_582_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - (18_322_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_322_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((29_233_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } } diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index 2584445071471..f42715f14bbee 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -60,7 +60,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockWeight: Weight = Weight::from_ref_time(1024); pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index ad08e00149a30..c5d00d6ed0bd4 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_child_bounties. @@ -64,51 +64,51 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - (51_064_000 as Weight) + Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - (15_286_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(15_286_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (29_929_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_929_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (32_449_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_449_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - (23_793_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(23_793_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - (67_529_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_529_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -117,9 +117,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - (48_436_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(48_436_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -128,9 +128,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - (58_044_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(58_044_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } } @@ -143,51 +143,51 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - (51_064_000 as Weight) + Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - (15_286_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(15_286_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - (29_929_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_929_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - (32_449_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_449_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - (23_793_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(23_793_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - (67_529_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(67_529_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -196,9 +196,9 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - (48_436_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(48_436_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -207,8 +207,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - (58_044_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(58_044_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } } diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 076afcd203030..b80a4aef28d38 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -355,7 +355,7 @@ benchmarks_instance_pallet! { // Whitelist voter account from further DB operations. let voter_key = frame_system::Account::::hashed_key_for(&voter); frame_benchmarking::benchmarking::add_to_whitelist(voter_key.into()); - }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(voter), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -436,7 +436,7 @@ benchmarks_instance_pallet! { index, approve, )?; - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { // The last proposal is removed. assert_eq!(Collective::::proposals().len(), (p - 1) as usize); @@ -511,7 +511,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime nay will close it as disapproved - }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, index, Weight::MAX, bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved { proposal_hash: last_hash }.into()); @@ -583,7 +583,7 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), p as usize); // Prime aye will close it as approved - }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::max_value(), bytes_in_storage) + }: close(SystemOrigin::Signed(caller), last_hash, p - 1, Weight::MAX, bytes_in_storage) verify { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Executed { proposal_hash: last_hash, result: Err(DispatchError::BadOrigin) }.into()); diff --git a/frame/collective/src/migrations/v4.rs b/frame/collective/src/migrations/v4.rs index 4e6cd05584138..483c3f9fa9e69 100644 --- a/frame/collective/src/migrations/v4.rs +++ b/frame/collective/src/migrations/v4.rs @@ -45,7 +45,7 @@ pub fn migrate::on_chain_storage_version(); @@ -70,7 +70,7 @@ pub fn migrate::WrongProposalWeight ); assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); @@ -301,7 +307,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { Origin::signed(4), hash, 0, - proposal_weight - 100, + proposal_weight - Weight::from_ref_time(100), proposal_len )); }) @@ -687,7 +693,11 @@ fn correct_validate_and_get_proposal() { Error::::WrongProposalLength ); assert_noop!( - Collective::validate_and_get_proposal(&hash, length, weight - 10), + Collective::validate_and_get_proposal( + &hash, + length, + weight - Weight::from_ref_time(10) + ), Error::::WrongProposalWeight ); let res = Collective::validate_and_get_proposal(&hash, length, weight); @@ -1196,18 +1206,18 @@ fn close_disapprove_does_not_care_about_weight_or_len() { assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); // It will not close with bad weight/len information assert_noop!( - Collective::close(Origin::signed(2), hash, 0, 0, 0), + Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0), Error::::WrongProposalLength, ); assert_noop!( - Collective::close(Origin::signed(2), hash, 0, 0, proposal_len), + Collective::close(Origin::signed(2), hash, 0, Weight::zero(), proposal_len), Error::::WrongProposalWeight, ); // Now we make the proposal fail assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); // It can close even if the weight/len information is bad - assert_ok!(Collective::close(Origin::signed(2), hash, 0, 0, 0)); + assert_ok!(Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0)); }) } diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 2f5c6f590a999..a0cc64cc2408d 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_collective. @@ -64,36 +64,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - (16_819_000 as Weight) + Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - (18_849_000 as Weight) + Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -101,52 +101,52 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (22_204_000 as Weight) + Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - (30_941_000 as Weight) + Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (32_485_000 as Weight) + Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (33_487_000 as Weight) + Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -154,13 +154,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - (33_494_000 as Weight) + Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -168,25 +168,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (36_566_000 as Weight) + Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - (20_159_000 as Weight) + Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -197,36 +197,36 @@ impl WeightInfo for () { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((10_280_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((126_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((13_310_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - (16_819_000 as Weight) + Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - (18_849_000 as Weight) + Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -234,52 +234,52 @@ impl WeightInfo for () { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (22_204_000 as Weight) + Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((8_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((49_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - (30_941_000 as Weight) + Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (32_485_000 as Weight) + Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((39_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (33_487_000 as Weight) + Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((66_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((157_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -287,13 +287,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - (33_494_000 as Weight) + Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((58_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((124_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -301,24 +301,24 @@ impl WeightInfo for () { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (36_566_000 as Weight) + Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((63_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((158_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - (20_159_000 as Weight) + Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 0df8f90237ed3..1df7a5753f77e 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -229,7 +229,7 @@ where call_request; let value: Balance = decode_hex(value, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let gas_limit: u64 = decode_hex(gas_limit, "weight")?; let storage_deposit_limit: Option = storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; limit_gas(gas_limit)?; @@ -259,7 +259,7 @@ where } = instantiate_request; let value: Balance = decode_hex(value, "balance")?; - let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + let gas_limit: u64 = decode_hex(gas_limit, "weight")?; let storage_deposit_limit: Option = storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; limit_gas(gas_limit)?; diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 1a0b735ac76ce..e29cb51728e1e 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -2853,8 +2853,8 @@ benchmarks! { println!("{:#?}", Schedule::::default()); println!("###############################################"); println!("Lazy deletion throughput per block (empty queue, full queue): {}, {}", - weight_limit / weight_per_key, - (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key, + weight_limit / weight_per_key.ref_time(), + (weight_limit - weight_per_queue_item * queue_depth) / weight_per_key.ref_time(), ); } #[cfg(not(feature = "std"))] diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index 29f8ad44a56ac..d0e0cf5cf95cb 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -238,7 +238,7 @@ where /// /// Weight is synonymous with gas in substrate. pub fn charge_weight(&mut self, amount: Weight) -> Result { - self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount)) + self.inner.runtime.charge_gas(RuntimeCosts::ChainExtension(amount.ref_time())) } /// Adjust a previously charged amount down to its actual amount. @@ -248,7 +248,7 @@ where pub fn adjust_weight(&mut self, charged: ChargedAmount, actual_weight: Weight) { self.inner .runtime - .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) + .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight.ref_time())) } /// Grants access to the execution environment of the current contract call. @@ -411,7 +411,8 @@ where buffer, allow_skip, |len| { - weight_per_byte.map(|w| RuntimeCosts::ChainExtension(w.saturating_mul(len.into()))) + weight_per_byte + .map(|w| RuntimeCosts::ChainExtension(w.ref_time().saturating_mul(len.into()))) }, ) } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 3b186236dd0fa..81e1aef92cbfc 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -664,7 +664,7 @@ where debug_message: Option<&'a mut Vec>, ) -> Result<(Self, E), ExecError> { let (first_frame, executable, nonce) = - Self::new_frame(args, value, gas_meter, storage_meter, 0, schedule)?; + Self::new_frame(args, value, gas_meter, storage_meter, Weight::zero(), schedule)?; let stack = Self { origin, schedule, @@ -1089,7 +1089,7 @@ where delegated_call: Some(DelegatedCall { executable, caller: self.caller().clone() }), }, value, - 0, + Weight::zero(), )?; self.run(executable, input_data) } @@ -1825,7 +1825,7 @@ mod tests { let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. - let r = ctx.ext.call(0, BOB, 0, vec![], true); + let r = ctx.ext.call(Weight::zero(), BOB, 0, vec![], true); REACHED_BOTTOM.with(|reached_bottom| { let mut reached_bottom = reached_bottom.borrow_mut(); @@ -1880,7 +1880,7 @@ mod tests { .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2011,7 +2011,7 @@ mod tests { // ALICE is the origin of the call stack assert!(ctx.ext.caller_is_origin()); // BOB calls CHARLIE - ctx.ext.call(0, CHARLIE, 0, vec![], true) + ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true) }); ExtBuilder::default().build().execute_with(|| { @@ -2041,7 +2041,7 @@ mod tests { assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); + assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -2190,7 +2190,7 @@ mod tests { let (address, output) = ctx .ext .instantiate( - 0, + Weight::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2250,7 +2250,7 @@ mod tests { // Instantiate a contract and save it's address in `instantiated_contract_address`. assert_matches!( ctx.ext.instantiate( - 0, + Weight::zero(), dummy_ch, ::Currency::minimum_balance(), vec![], @@ -2342,13 +2342,13 @@ mod tests { let info = ctx.ext.contract_info(); assert_eq!(info.storage_deposit, 0); info.storage_deposit = 42; - assert_eq!(ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped()); + assert_eq!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), exec_trapped()); assert_eq!(ctx.ext.contract_info().storage_deposit, 42); } exec_success() }); let code_charlie = MockLoader::insert(Call, |ctx, _| { - assert!(ctx.ext.call(0, BOB, 0, vec![99], true).is_ok()); + assert!(ctx.ext.call(Weight::zero(), BOB, 0, vec![99], true).is_ok()); exec_trapped() }); @@ -2377,7 +2377,7 @@ mod tests { fn recursive_call_during_constructor_fails() { let code = MockLoader::insert(Constructor, |ctx, _| { assert_matches!( - ctx.ext.call(0, ctx.ext.address().clone(), 0, vec![], true), + ctx.ext.call(Weight::zero(), ctx.ext.address().clone(), 0, vec![], true), Err(ExecError{error, ..}) if error == >::ContractNotFound.into() ); exec_success() @@ -2479,7 +2479,7 @@ mod tests { // call the contract passed as input with disabled reentry let code_bob = MockLoader::insert(Call, |ctx, _| { let dest = Decode::decode(&mut ctx.input_data.as_ref()).unwrap(); - ctx.ext.call(0, dest, 0, vec![], false) + ctx.ext.call(Weight::zero(), dest, 0, vec![], false) }); let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); @@ -2524,7 +2524,7 @@ mod tests { fn call_deny_reentry() { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { - ctx.ext.call(0, CHARLIE, 0, vec![], false) + ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], false) } else { exec_success() } @@ -2532,7 +2532,7 @@ mod tests { // call BOB with input set to '1' let code_charlie = - MockLoader::insert(Call, |ctx, _| ctx.ext.call(0, BOB, 0, vec![1], true)); + MockLoader::insert(Call, |ctx, _| ctx.ext.call(Weight::zero(), BOB, 0, vec![1], true)); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2695,18 +2695,30 @@ mod tests { let success_code = MockLoader::insert(Constructor, |_, _| exec_success()); let succ_fail_code = MockLoader::insert(Constructor, move |ctx, _| { ctx.ext - .instantiate(0, fail_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .instantiate( + Weight::zero(), + fail_code, + ctx.ext.minimum_balance() * 100, + vec![], + &[], + ) .ok(); exec_success() }); let succ_succ_code = MockLoader::insert(Constructor, move |ctx, _| { let (account_id, _) = ctx .ext - .instantiate(0, success_code, ctx.ext.minimum_balance() * 100, vec![], &[]) + .instantiate( + Weight::zero(), + success_code, + ctx.ext.minimum_balance() * 100, + vec![], + &[], + ) .unwrap(); // a plain call should not influence the account counter - ctx.ext.call(0, account_id, 0, vec![], false).unwrap(); + ctx.ext.call(Weight::zero(), account_id, 0, vec![], false).unwrap(); exec_success() }); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 41df125da0170..ae20e4eeb0def 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,7 +107,7 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == 0 { self.gas_left } else { amount }; + let amount = if amount == Weight::zero() { self.gas_left } else { amount }; // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. @@ -121,7 +121,7 @@ where /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left == 0 { + if self.gas_left == Weight::zero() { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. @@ -157,7 +157,7 @@ where } let amount = token.weight(); - let new_value = self.gas_left.checked_sub(amount); + let new_value = self.gas_left.checked_sub(&amount); // We always consume the gas even if there is not enough gas. self.gas_left = new_value.unwrap_or_else(Zero::zero); @@ -227,7 +227,7 @@ where #[cfg(test)] mod tests { - use super::{GasMeter, Token}; + use super::{GasMeter, Token, Weight}; use crate::tests::Test; /// A simple utility macro that helps to match against a @@ -271,20 +271,20 @@ mod tests { #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); impl Token for SimpleToken { - fn weight(&self) -> u64 { - self.0 + fn weight(&self) -> Weight { + Weight::from_ref_time(self.0) } } #[test] fn it_works() { - let gas_meter = GasMeter::::new(50000); - assert_eq!(gas_meter.gas_left(), 50000); + let gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); + assert_eq!(gas_meter.gas_left(), Weight::from_ref_time(50000)); } #[test] fn tracing() { - let mut gas_meter = GasMeter::::new(50000); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(50000)); assert!(!gas_meter.charge(SimpleToken(1)).is_err()); let mut tokens = gas_meter.tokens().iter(); @@ -294,7 +294,7 @@ mod tests { // This test makes sure that nothing can be executed if there is no gas. #[test] fn refuse_to_execute_anything_if_zero() { - let mut gas_meter = GasMeter::::new(0); + let mut gas_meter = GasMeter::::new(Weight::zero()); assert!(gas_meter.charge(SimpleToken(1)).is_err()); } @@ -305,7 +305,7 @@ mod tests { // if the gas meter runs out of gas. However, this is just a nice property to have. #[test] fn overcharge_is_unrecoverable() { - let mut gas_meter = GasMeter::::new(200); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); @@ -318,7 +318,7 @@ mod tests { // possible. #[test] fn charge_exact_amount() { - let mut gas_meter = GasMeter::::new(25); + let mut gas_meter = GasMeter::::new(Weight::from_ref_time(25)); assert!(!gas_meter.charge(SimpleToken(25)).is_err()); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 06fd419d88bf3..ee0db17ade95b 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -110,7 +110,7 @@ use frame_support::{ dispatch::Dispatchable, ensure, traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, - weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, Weight}, + weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, Weight}, BoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -214,7 +214,7 @@ impl, const P: u32> Get for DefaultContractAccessWe .get(DispatchClass::Normal) .max_total .unwrap_or(block_weights.max_block) / - Weight::from(P) + RefTimeWeight::from(P) } } @@ -873,8 +873,8 @@ where ); ContractExecResult { result: output.result.map_err(|r| r.error), - gas_consumed: output.gas_meter.gas_consumed(), - gas_required: output.gas_meter.gas_required(), + gas_consumed: output.gas_meter.gas_consumed().ref_time(), + gas_required: output.gas_meter.gas_required().ref_time(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -918,8 +918,8 @@ where .result .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) .map_err(|e| e.error), - gas_consumed: output.gas_meter.gas_consumed(), - gas_required: output.gas_meter.gas_required(), + gas_consumed: output.gas_meter.gas_consumed().ref_time(), + gas_required: output.gas_meter.gas_required().ref_time(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 19e699a855461..9d90cf38269b1 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -26,7 +26,7 @@ use sp_std::{marker::PhantomData, prelude::*}; /// Wrapper for all migrations of this pallet, based on `StorageVersion`. pub fn migrate() -> Weight { let version = StorageVersion::get::>(); - let mut weight: Weight = 0; + let mut weight = Weight::new(); if version < 4 { weight = weight.saturating_add(v4::migrate::()); @@ -127,7 +127,7 @@ mod v5 { type DeletionQueue = StorageValue, Vec>; pub fn migrate() -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); @@ -216,7 +216,7 @@ mod v6 { type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; pub fn migrate() -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 907ce9e088648..b5c80642a5356 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -21,7 +21,7 @@ use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; -use frame_support::{weights::Weight, DefaultNoBound}; +use frame_support::{weights::RefTimeWeight, DefaultNoBound}; use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -255,166 +255,166 @@ pub struct InstructionWeights { #[scale_info(skip_type_params(T))] pub struct HostFnWeights { /// Weight of calling `seal_caller`. - pub caller: Weight, + pub caller: RefTimeWeight, /// Weight of calling `seal_is_contract`. - pub is_contract: Weight, + pub is_contract: RefTimeWeight, /// Weight of calling `seal_code_hash`. - pub code_hash: Weight, + pub code_hash: RefTimeWeight, /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: Weight, + pub own_code_hash: RefTimeWeight, /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: Weight, + pub caller_is_origin: RefTimeWeight, /// Weight of calling `seal_address`. - pub address: Weight, + pub address: RefTimeWeight, /// Weight of calling `seal_gas_left`. - pub gas_left: Weight, + pub gas_left: RefTimeWeight, /// Weight of calling `seal_balance`. - pub balance: Weight, + pub balance: RefTimeWeight, /// Weight of calling `seal_value_transferred`. - pub value_transferred: Weight, + pub value_transferred: RefTimeWeight, /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: Weight, + pub minimum_balance: RefTimeWeight, /// Weight of calling `seal_block_number`. - pub block_number: Weight, + pub block_number: RefTimeWeight, /// Weight of calling `seal_now`. - pub now: Weight, + pub now: RefTimeWeight, /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: Weight, + pub weight_to_fee: RefTimeWeight, /// Weight of calling `gas`. - pub gas: Weight, + pub gas: RefTimeWeight, /// Weight of calling `seal_input`. - pub input: Weight, + pub input: RefTimeWeight, /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: Weight, + pub input_per_byte: RefTimeWeight, /// Weight of calling `seal_return`. - pub r#return: Weight, + pub r#return: RefTimeWeight, /// Weight per byte returned through `seal_return`. - pub return_per_byte: Weight, + pub return_per_byte: RefTimeWeight, /// Weight of calling `seal_terminate`. - pub terminate: Weight, + pub terminate: RefTimeWeight, /// Weight of calling `seal_random`. - pub random: Weight, + pub random: RefTimeWeight, /// Weight of calling `seal_reposit_event`. - pub deposit_event: Weight, + pub deposit_event: RefTimeWeight, /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: Weight, + pub deposit_event_per_topic: RefTimeWeight, /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: Weight, + pub deposit_event_per_byte: RefTimeWeight, /// Weight of calling `seal_debug_message`. - pub debug_message: Weight, + pub debug_message: RefTimeWeight, /// Weight of calling `seal_set_storage`. - pub set_storage: Weight, + pub set_storage: RefTimeWeight, /// Weight per written byten of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: Weight, + pub set_storage_per_new_byte: RefTimeWeight, /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: Weight, + pub set_storage_per_old_byte: RefTimeWeight, /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: Weight, + pub set_code_hash: RefTimeWeight, /// Weight of calling `seal_clear_storage`. - pub clear_storage: Weight, + pub clear_storage: RefTimeWeight, /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: Weight, + pub clear_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_contains_storage`. - pub contains_storage: Weight, + pub contains_storage: RefTimeWeight, /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: Weight, + pub contains_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_get_storage`. - pub get_storage: Weight, + pub get_storage: RefTimeWeight, /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: Weight, + pub get_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_take_storage`. - pub take_storage: Weight, + pub take_storage: RefTimeWeight, /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: Weight, + pub take_storage_per_byte: RefTimeWeight, /// Weight of calling `seal_transfer`. - pub transfer: Weight, + pub transfer: RefTimeWeight, /// Weight of calling `seal_call`. - pub call: Weight, + pub call: RefTimeWeight, /// Weight of calling `seal_delegate_call`. - pub delegate_call: Weight, + pub delegate_call: RefTimeWeight, /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: Weight, + pub call_transfer_surcharge: RefTimeWeight, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: Weight, + pub call_per_cloned_byte: RefTimeWeight, /// Weight of calling `seal_instantiate`. - pub instantiate: Weight, + pub instantiate: RefTimeWeight, /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: Weight, + pub instantiate_transfer_surcharge: RefTimeWeight, /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: Weight, + pub instantiate_per_salt_byte: RefTimeWeight, /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: Weight, + pub hash_sha2_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: Weight, + pub hash_sha2_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: Weight, + pub hash_keccak_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: Weight, + pub hash_keccak_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: Weight, + pub hash_blake2_256: RefTimeWeight, /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: Weight, + pub hash_blake2_256_per_byte: RefTimeWeight, /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: Weight, + pub hash_blake2_128: RefTimeWeight, /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: Weight, + pub hash_blake2_128_per_byte: RefTimeWeight, /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: Weight, + pub ecdsa_recover: RefTimeWeight, /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: Weight, + pub ecdsa_to_eth_address: RefTimeWeight, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -435,19 +435,19 @@ macro_rules! call_zero { macro_rules! cost_args { ($name:ident, $( $arg: expr ),+) => { - (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))) + (T::WeightInfo::$name($( $arg ),+).saturating_sub(call_zero!($name, $( $arg ),+))).ref_time() } } macro_rules! cost_batched_args { ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / Weight::from(API_BENCHMARK_BATCH_SIZE) + cost_args!($name, $( $arg ),+) / RefTimeWeight::from(API_BENCHMARK_BATCH_SIZE) } } macro_rules! cost_instr_no_params_with_batch_size { ($name:ident, $batch_size:expr) => { - (cost_args!($name, 1) / Weight::from($batch_size)) as u32 + (cost_args!($name, 1) / RefTimeWeight::from($batch_size)) as u32 }; } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 01c809da8675e..76e2d74f3d887 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -28,7 +28,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchError, DispatchResult}, storage::child::{self, ChildInfo}, - weights::Weight, + weights::{RefTimeWeight, Weight}, }; use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; @@ -227,9 +227,11 @@ where let base_weight = T::WeightInfo::on_process_deletion_queue_batch(); let weight_per_queue_item = T::WeightInfo::on_initialize_per_queue_item(1) - T::WeightInfo::on_initialize_per_queue_item(0); - let weight_per_key = T::WeightInfo::on_initialize_per_trie_key(1) - - T::WeightInfo::on_initialize_per_trie_key(0); - let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as Weight); + let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - + T::WeightInfo::on_initialize_per_trie_key(0)) + .ref_time(); + let decoding_weight = + weight_per_queue_item.scalar_saturating_mul(queue_len as RefTimeWeight); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. @@ -237,7 +239,8 @@ where .saturating_sub(base_weight) .saturating_sub(decoding_weight) .checked_div(weight_per_key) - .unwrap_or(0) as u32; + .unwrap_or(Weight::zero()) + .ref_time() as u32; (weight_per_key, key_budget) } @@ -248,7 +251,7 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return 0 + return Weight::zero() } let (weight_per_key, mut remaining_key_budget) = @@ -282,7 +285,10 @@ where } >::put(queue); - weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) + let ref_time_weight = weight_limit + .ref_time() + .saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as RefTimeWeight)); + Weight::from_ref_time(ref_time_weight) } /// Generates a unique trie id by returning `hash(account_id ++ nonce)`. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 204908cc4a989..3571434bb823b 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -175,7 +175,7 @@ impl ChainExtension for TestExtension { }, 0x8002 => { let mut env = env.buf_in_buf_out(); - let weight = env.read(5)?[4].into(); + let weight = Weight::from_ref_time(env.read(5)?[4].into()); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, @@ -332,7 +332,7 @@ parameter_types! { impl Convert> for Test { fn convert(w: Weight) -> BalanceOf { - w + w.ref_time() } } @@ -355,6 +355,10 @@ impl Contains for TestFilter { } } +parameter_types! { + pub const DeletionWeightLimit: Weight = Weight::from_ref_time(500_000_000_000); +} + impl Config for Test { type Time = Timestamp; type Randomness = Randomness; @@ -368,7 +372,7 @@ impl Config for Test { type ChainExtension = (TestExtension, DisabledExtension, RevertingExtension, TempStorageExtension); type DeletionQueueDepth = ConstU32<1024>; - type DeletionWeightLimit = ConstU64<500_000_000_000>; + type DeletionWeightLimit = DeletionWeightLimit; type Schedule = MySchedule; type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; @@ -384,7 +388,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = 100_000_000_000; +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000); pub struct ExtBuilder { existential_deposit: u64, @@ -642,7 +646,7 @@ fn run_out_of_gas() { Origin::signed(ALICE), addr, // newly created account 0, - 1_000_000_000_000, + Weight::from_ref_time(1_000_000_000_000), None, vec![], ), @@ -1826,7 +1830,7 @@ fn lazy_removal_works() { assert_matches!(child::get(trie, &[99]), Some(42)); // Run the lazy removal - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // Value should be gone now assert_matches!(child::get::(trie, &[99]), None); @@ -1896,7 +1900,7 @@ fn lazy_batch_removal_works() { } // Run single lazy removal - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // The single lazy removal should have removed all queued tries for trie in tries.iter() { @@ -1911,7 +1915,7 @@ fn lazy_removal_partial_remove_works() { // We create a contract with some extra keys above the weight limit let extra_keys = 7u32; - let weight_limit = 5_000_000_000; + let weight_limit = Weight::from_ref_time(5_000_000_000); let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); let vals: Vec<_> = (0..max_keys + extra_keys) .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) @@ -2085,7 +2089,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { assert_matches!(child::get::(trie, &[99]), Some(42)); // Run on_idle with max remaining weight, this should remove the value - Contracts::on_idle(System::block_number(), Weight::max_value()); + Contracts::on_idle(System::block_number(), Weight::MAX); // Value should be gone assert_matches!(child::get::(trie, &[99]), None); @@ -2096,7 +2100,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { fn lazy_removal_does_not_use_all_weight() { let (code, hash) = compile_module::("self_destruct").unwrap(); - let weight_limit = 5_000_000_000; + let weight_limit = Weight::from_ref_time(5_000_000_000); let mut ext = ExtBuilder::default().existential_deposit(50).build(); let (trie, vals, weight_per_key) = ext.execute_with(|| { @@ -2167,7 +2171,7 @@ fn lazy_removal_does_not_use_all_weight() { let weight_used = Storage::::process_deletion_queue_batch(weight_limit); // We have one less key in our trie than our weight limit suffices for - assert_eq!(weight_used, weight_limit - weight_per_key); + assert_eq!(weight_used, weight_limit - Weight::from_ref_time(weight_per_key)); // All the keys are removed for val in vals { @@ -2322,7 +2326,7 @@ fn reinstrument_does_charge() { assert!(result2.gas_consumed > result1.gas_consumed); assert_eq!( result2.gas_consumed, - result1.gas_consumed + ::WeightInfo::reinstrument(code_len), + result1.gas_consumed + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } @@ -2430,7 +2434,7 @@ fn gas_estimation_nested_call_fixed_limit() { let input: Vec = AsRef::<[u8]>::as_ref(&addr_callee) .iter() .cloned() - .chain((GAS_LIMIT / 5).to_le_bytes()) + .chain((GAS_LIMIT / 5).ref_time().to_le_bytes()) .collect(); // Call in order to determine the gas that is required for this call @@ -2454,7 +2458,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - result.gas_required, + Weight::from_ref_time(result.gas_required), Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2524,7 +2528,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - result.gas_required, + Weight::from_ref_time(result.gas_required), None, call.encode(), false, diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 61826c7c323aa..659caafa7f0b0 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -218,14 +218,16 @@ impl Token for CodeToken { // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. - match *self { + let ref_time_weight = match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), Load(len) => { let computation = T::WeightInfo::call_with_code_per_byte(len) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwidth = T::ContractAccessWeight::get().saturating_mul(len.into()); + let bandwidth = T::ContractAccessWeight::get().scalar_saturating_mul(len as u64); computation.max(bandwidth) }, - } + }; + + ref_time_weight } } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 02a360fe86b45..f989c21b00ffc 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -365,7 +365,7 @@ mod tests { events: Default::default(), runtime_calls: Default::default(), schedule: Default::default(), - gas_meter: GasMeter::new(10_000_000_000), + gas_meter: GasMeter::new(Weight::from_ref_time(10_000_000_000)), debug_buffer: Default::default(), ecdsa_recover: Default::default(), } @@ -406,7 +406,7 @@ mod tests { code_hash, value, data: data.to_vec(), - gas_left: gas_limit, + gas_left: gas_limit.ref_time(), salt: salt.to_vec(), }); Ok(( @@ -520,7 +520,7 @@ mod tests { 16_384 } fn get_weight_price(&self, weight: Weight) -> BalanceOf { - BalanceOf::::from(1312_u32).saturating_mul(weight.into()) + BalanceOf::::from(1312_u32).saturating_mul(weight.ref_time().into()) } fn schedule(&self) -> &Schedule { &self.schedule @@ -1911,7 +1911,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left() > 0); + assert!(mock_ext.gas_meter.gas_left() > Weight::zero()); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 1d5478a5277cd..296f322f494d0 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -327,14 +327,14 @@ impl RuntimeCosts { EcdsaRecovery => s.ecdsa_recover, ChainExtension(amount) => amount, #[cfg(feature = "unstable-interface")] - CallRuntime(weight) => weight, + CallRuntime(weight) => weight.ref_time(), SetCodeHash => s.set_code_hash, EcdsaToEthAddress => s.ecdsa_to_eth_address, }; RuntimeToken { #[cfg(test)] _created_from: *self, - weight, + weight: Weight::from_ref_time(weight), } } } @@ -857,7 +857,7 @@ where self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } self.ext.call( - gas, + Weight::from_ref_time(gas), callee, value, input_data, @@ -906,6 +906,7 @@ where salt_ptr: u32, salt_len: u32, ) -> Result { + let gas = Weight::from_ref_time(gas); self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; if value > 0u32.into() { @@ -1704,6 +1705,7 @@ pub mod env { out_ptr: u32, out_len_ptr: u32, ) -> Result<(), TrapReason> { + let gas = Weight::from_ref_time(gas); ctx.charge_gas(RuntimeCosts::WeightToFee)?; Ok(ctx.write_sandbox_output( out_ptr, diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 3c90579e65d53..17be3c3da0138 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_contracts. @@ -166,37 +166,37 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_654_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_654_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (8_564_000 as Weight) + Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (19_016_000 as Weight) + Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -204,11 +204,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (205_194_000 as Weight) + Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -220,13 +220,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (288_487_000 as Weight) + Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -236,46 +236,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (186_136_000 as Weight) + Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (149_232_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(149_232_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (51_721_000 as Weight) + Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (30_016_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(30_016_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (27_192_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(27_192_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -283,11 +283,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (206_405_000 as Weight) + Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -295,12 +295,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (106_220_000 as Weight) + Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -308,12 +308,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (104_498_000 as Weight) + Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -321,11 +321,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (208_696_000 as Weight) + Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -333,11 +333,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (205_612_000 as Weight) + Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -345,11 +345,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (206_947_000 as Weight) + Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -357,11 +357,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (208_692_000 as Weight) + Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -369,11 +369,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (209_811_000 as Weight) + Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -381,11 +381,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (207_406_000 as Weight) + Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -393,11 +393,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (209_260_000 as Weight) + Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -405,11 +405,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (206_448_000 as Weight) + Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -417,11 +417,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (206_969_000 as Weight) + Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -430,11 +430,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (211_611_000 as Weight) + Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -442,11 +442,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (134_484_000 as Weight) + Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -454,11 +454,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (208_556_000 as Weight) + Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -466,11 +466,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (268_886_000 as Weight) + Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -478,9 +478,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - (203_591_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(203_591_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -488,11 +488,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (204_258_000 as Weight) + Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -502,13 +502,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (206_625_000 as Weight) + Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -517,11 +517,11 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (208_866_000 as Weight) + Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -529,11 +529,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (220_860_000 as Weight) + Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -543,15 +543,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (439_782_000 as Weight) + Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -559,128 +559,128 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (140_280_000 as Weight) + Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (161_247_000 as Weight) + Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_247_000 as Weight) + Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (529_812_000 as Weight) + Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (184_803_000 as Weight) + Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (500_958_000 as Weight) + Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(52 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(52 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (177_682_000 as Weight) + Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (465_285_000 as Weight) + Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (179_118_000 as Weight) + Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (423_056_000 as Weight) + Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(54 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(54 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (188_884_000 as Weight) + Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (532_408_000 as Weight) + Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(55 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(53 as Weight)) - .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -688,13 +688,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (127_181_000 as Weight) + Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -702,13 +702,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -716,11 +716,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -729,15 +729,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_196_444_000 as Weight) + Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(85 as Weight)) - .saturating_add(T::DbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(81 as Weight)) - .saturating_add(T::DbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(85 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(81 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -747,13 +747,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -764,15 +764,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_282_498_000 as Weight) + Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(167 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) - .saturating_add(T::DbWeight::get().writes(165 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(167 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(165 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -780,11 +780,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_959_000 as Weight) + Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -792,11 +792,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (349_915_000 as Weight) + Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -804,11 +804,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (209_219_000 as Weight) + Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -816,11 +816,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (208_860_000 as Weight) + Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -828,11 +828,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (206_165_000 as Weight) + Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -840,11 +840,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (255_955_000 as Weight) + Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -852,11 +852,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (208_153_000 as Weight) + Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -864,11 +864,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (278_368_000 as Weight) + Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -876,11 +876,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (331_955_000 as Weight) + Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -888,11 +888,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (207_838_000 as Weight) + Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -901,317 +901,317 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (73_955_000 as Weight) + Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_057_000 as Weight) + Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_137_000 as Weight) + Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_844_000 as Weight) + Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_924_000 as Weight) + Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_574_000 as Weight) + Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_343_000 as Weight) + Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_267_000 as Weight) + Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (74_877_000 as Weight) + Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_665_000 as Weight) + Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_600_000 as Weight) + Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_555_000 as Weight) + Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_329_000 as Weight) + Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_612_000 as Weight) + Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_906_000 as Weight) + Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_979_000 as Weight) + Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (74_370_000 as Weight) + Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_584_000 as Weight) + Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_206_000 as Weight) + Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_992_000 as Weight) + Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_985_000 as Weight) + Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_117_000 as Weight) + Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (73_981_000 as Weight) + Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_104_000 as Weight) + Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (74_293_000 as Weight) + Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (74_055_000 as Weight) + Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_710_000 as Weight) + Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_917_000 as Weight) + Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (74_048_000 as Weight) + Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (74_029_000 as Weight) + Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (74_267_000 as Weight) + Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_952_000 as Weight) + Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_851_000 as Weight) + Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (74_034_000 as Weight) + Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (74_000_000 as Weight) + Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (73_883_000 as Weight) + Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_216_000 as Weight) + Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_989_000 as Weight) + Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_857_000 as Weight) + Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_801_000 as Weight) + Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (74_130_000 as Weight) + Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (74_071_000 as Weight) + Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (74_201_000 as Weight) + Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (74_241_000 as Weight) + Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (74_331_000 as Weight) + Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_674_000 as Weight) + Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_807_000 as Weight) + Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_725_000 as Weight) + Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_755_000 as Weight) + Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } } @@ -1219,37 +1219,37 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - (1_654_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_654_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (8_564_000 as Weight) + Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((868_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_944_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - (19_016_000 as Weight) + Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((49_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -1257,11 +1257,11 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - (205_194_000 as Weight) + Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((53_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1273,13 +1273,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (288_487_000 as Weight) + Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((124_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1289,46 +1289,46 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - (186_136_000 as Weight) + Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - (149_232_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(149_232_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - (51_721_000 as Weight) + Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((48_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - (30_016_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(30_016_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - (27_192_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(27_192_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1336,11 +1336,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - (206_405_000 as Weight) + Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add((40_987_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1348,12 +1348,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - (106_220_000 as Weight) + Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add((307_648_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1361,12 +1361,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - (104_498_000 as Weight) + Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add((368_901_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1374,11 +1374,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - (208_696_000 as Weight) + Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add((44_445_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1386,11 +1386,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - (205_612_000 as Weight) + Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add((17_145_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1398,11 +1398,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - (206_947_000 as Weight) + Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add((40_789_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1410,11 +1410,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - (208_692_000 as Weight) + Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add((40_600_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1422,11 +1422,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - (209_811_000 as Weight) + Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add((116_831_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1434,11 +1434,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - (207_406_000 as Weight) + Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add((40_702_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1446,11 +1446,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - (209_260_000 as Weight) + Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add((40_479_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1458,11 +1458,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - (206_448_000 as Weight) + Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add((40_134_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1470,11 +1470,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - (206_969_000 as Weight) + Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((40_251_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1483,11 +1483,11 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - (211_611_000 as Weight) + Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add((98_675_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1495,11 +1495,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - (134_484_000 as Weight) + Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add((19_329_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1507,11 +1507,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - (208_556_000 as Weight) + Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add((40_328_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1519,11 +1519,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - (268_886_000 as Weight) + Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((9_627_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1531,9 +1531,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - (203_591_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(203_591_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1541,11 +1541,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - (204_258_000 as Weight) + Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((183_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1555,13 +1555,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - (206_625_000 as Weight) + Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add((59_377_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1570,11 +1570,11 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - (208_866_000 as Weight) + Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add((133_438_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1582,11 +1582,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - (220_860_000 as Weight) + Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add((239_951_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1596,15 +1596,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (439_782_000 as Weight) + Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add((264_687_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add((67_636_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1612,128 +1612,128 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - (140_280_000 as Weight) + Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add((32_717_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - (161_247_000 as Weight) + Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add((423_997_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - (529_247_000 as Weight) + Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add((85_282_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - (529_812_000 as Weight) + Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add((74_554_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - (184_803_000 as Weight) + Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add((404_933_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - (500_958_000 as Weight) + Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add((75_996_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(52 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(52 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - (177_682_000 as Weight) + Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add((338_172_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (465_285_000 as Weight) + Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add((155_106_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - (179_118_000 as Weight) + Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add((311_083_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - (423_056_000 as Weight) + Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add((69_665_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(54 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(54 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - (188_884_000 as Weight) + Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add((432_781_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - (532_408_000 as Weight) + Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add((164_943_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(55 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(53 as Weight)) - .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1741,13 +1741,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - (127_181_000 as Weight) + Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add((1_500_589_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1755,13 +1755,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add((14_860_909_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((80 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((80 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1769,11 +1769,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add((14_797_140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1782,15 +1782,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - (9_196_444_000 as Weight) + Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add((1_458_153_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((9_718_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(85 as Weight)) - .saturating_add(RocksDbWeight::get().reads((81 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(81 as Weight)) - .saturating_add(RocksDbWeight::get().writes((81 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(85 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(81 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1800,13 +1800,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add((21_201_529_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().reads((320 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((320 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1817,15 +1817,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - (12_282_498_000 as Weight) + Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add((720_795_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add((124_274_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(167 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(t as Weight))) - .saturating_add(RocksDbWeight::get().writes(165 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(t as Weight))) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(167 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(165 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1833,11 +1833,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - (203_959_000 as Weight) + Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add((61_311_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1845,11 +1845,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (349_915_000 as Weight) + Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add((320_652_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1857,11 +1857,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - (209_219_000 as Weight) + Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add((73_728_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1869,11 +1869,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (208_860_000 as Weight) + Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((245_718_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1881,11 +1881,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - (206_165_000 as Weight) + Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add((51_644_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1893,11 +1893,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (255_955_000 as Weight) + Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_090_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1905,11 +1905,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - (208_153_000 as Weight) + Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add((51_264_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1917,11 +1917,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (278_368_000 as Weight) + Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((95_006_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1929,11 +1929,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - (331_955_000 as Weight) + Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add((3_069_955_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1941,11 +1941,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - (207_838_000 as Weight) + Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add((2_058_503_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1954,316 +1954,316 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add((774_380_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads((79 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes((79 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - (73_955_000 as Weight) + Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((612_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - (74_057_000 as Weight) + Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - (74_137_000 as Weight) + Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_427_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - (73_844_000 as Weight) + Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_773_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_952_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - (73_924_000 as Weight) + Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((941_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - (73_574_000 as Weight) + Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_439_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - (73_343_000 as Weight) + Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_603_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - (76_267_000 as Weight) + Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(e as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - (74_877_000 as Weight) + Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add((7_144_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - (88_665_000 as Weight) + Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((9_142_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (98_600_000 as Weight) + Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((469_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - (74_555_000 as Weight) + Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((624_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - (74_329_000 as Weight) + Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((688_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - (74_612_000 as Weight) + Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((909_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - (76_906_000 as Weight) + Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_192_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - (76_979_000 as Weight) + Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_361_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - (74_370_000 as Weight) + Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((661_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - (73_584_000 as Weight) + Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add((187_114_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - (74_206_000 as Weight) + Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((884_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - (73_992_000 as Weight) + Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((893_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - (73_985_000 as Weight) + Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((891_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - (74_117_000 as Weight) + Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((901_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - (73_981_000 as Weight) + Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((866_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - (74_104_000 as Weight) + Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((868_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - (74_293_000 as Weight) + Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((878_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - (74_055_000 as Weight) + Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - (73_710_000 as Weight) + Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - (73_917_000 as Weight) + Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_355_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - (74_048_000 as Weight) + Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - (74_029_000 as Weight) + Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_349_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - (74_267_000 as Weight) + Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - (73_952_000 as Weight) + Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_350_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - (73_851_000 as Weight) + Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_368_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - (74_034_000 as Weight) + Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_348_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - (73_979_000 as Weight) + Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_353_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - (74_000_000 as Weight) + Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_328_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - (73_883_000 as Weight) + Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_331_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - (74_216_000 as Weight) + Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((1_324_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - (73_989_000 as Weight) + Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_998_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - (73_857_000 as Weight) + Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_073_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - (73_801_000 as Weight) + Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_027_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - (74_130_000 as Weight) + Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_064_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - (74_071_000 as Weight) + Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_327_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - (74_201_000 as Weight) + Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((1_330_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - (74_241_000 as Weight) + Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_321_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - (74_331_000 as Weight) + Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((1_347_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - (73_674_000 as Weight) + Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_359_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - (73_807_000 as Weight) + Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - (73_725_000 as Weight) + Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_358_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - (73_755_000 as Weight) + Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((1_360_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) } } diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index 9eb7f679efca3..cbd2b0619ac2b 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, parameter_types, traits::{ConstU32, ConstU64, Contains, Polling, VoteTally}, + weights::Weight, }; use sp_core::H256; use sp_runtime::{ @@ -57,7 +58,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index 330d02755cb8b..10c5c975a81f1 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_conviction_voting. @@ -62,9 +62,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - (148_804_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(148_804_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -72,24 +72,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - (313_333_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(313_333_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - (300_591_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(300_591_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - (53_887_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(53_887_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -97,33 +97,33 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - (51_518_000 as Weight) + Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - (37_885_000 as Weight) + Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - (67_703_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(67_703_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -135,9 +135,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - (148_804_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(148_804_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -145,24 +145,24 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - (313_333_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(313_333_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - (300_591_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(300_591_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - (53_887_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(53_887_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -170,32 +170,32 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - (51_518_000 as Weight) + Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add((27_235_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - (37_885_000 as Weight) + Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add((24_395_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - (67_703_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(67_703_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index a347f47efe121..5bbc97fa16e30 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1765,7 +1765,7 @@ impl Pallet { /// # fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; - let mut weight = 0; + let mut weight = Weight::new(); let next = Self::lowest_unbaked(); let last = Self::referendum_count(); diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 0fe83a07610d1..def8f84067909 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -78,7 +78,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 45686b43f7152..351ed42cca8e9 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. @@ -80,44 +80,44 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (48_328_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(48_328_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (30_923_000 as Weight) + Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (40_345_000 as Weight) + Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (39_853_000 as Weight) + Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((150_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (19_364_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_364_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -126,82 +126,82 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (57_708_000 as Weight) + Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((192_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (10_714_000 as Weight) + Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (3_697_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_697_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (3_831_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_831_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (20_271_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(20_271_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (21_319_000 as Weight) + Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (43_960_000 as Weight) + Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((184_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (13_475_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_475_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (24_320_000 as Weight) + Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((560_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (3_428_000 as Weight) + Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_171_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -210,103 +210,103 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - (7_867_000 as Weight) + Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_177_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (37_902_000 as Weight) + Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((4_335_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (21_272_000 as Weight) + Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((4_351_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (4_913_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(4_913_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (27_986_000 as Weight) + Weight::from_ref_time(27_986_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (20_058_000 as Weight) + Weight::from_ref_time(20_058_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (28_619_000 as Weight) + Weight::from_ref_time(28_619_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (26_619_000 as Weight) + Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (25_373_000 as Weight) + Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (15_961_000 as Weight) + Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (15_992_000 as Weight) + Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((113_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -317,44 +317,44 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (48_328_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(48_328_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (30_923_000 as Weight) + Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (40_345_000 as Weight) + Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((140_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (39_853_000 as Weight) + Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((150_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (19_364_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_364_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -363,82 +363,82 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (57_708_000 as Weight) + Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((192_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (10_714_000 as Weight) + Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((33_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (3_697_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_697_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (3_831_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_831_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (20_271_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(20_271_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (21_319_000 as Weight) + Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (43_960_000 as Weight) + Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((184_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (13_475_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_475_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (24_320_000 as Weight) + Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((560_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (3_428_000 as Weight) + Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_171_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -447,102 +447,102 @@ impl WeightInfo for () { // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - (7_867_000 as Weight) + Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_177_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (37_902_000 as Weight) + Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((4_335_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (21_272_000 as Weight) + Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((4_351_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (4_913_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(4_913_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (27_986_000 as Weight) + Weight::from_ref_time(27_986_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (20_058_000 as Weight) + Weight::from_ref_time(20_058_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (28_619_000 as Weight) + Weight::from_ref_time(28_619_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (26_619_000 as Weight) + Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (25_373_000 as Weight) + Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (15_961_000 as Weight) + Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (15_992_000 as Weight) + Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((113_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index e1d3cb8ed5dee..906de8a6c9d20 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -234,7 +234,6 @@ use frame_election_provider_support::{ ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, }; use frame_support::{ - dispatch::DispatchResultWithPostInfo, ensure, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, weights::{DispatchClass, Weight}, @@ -877,7 +876,7 @@ pub mod pallet { origin: OriginFor, raw_solution: Box>>, witness: SolutionOrSnapshotSize, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_none(origin)?; let error_message = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward."; @@ -905,7 +904,7 @@ pub mod pallet { prev_ejected: ejected_a_solution, }); - Ok(None.into()) + Ok(()) } /// Set a new value for `MinimumUntrustedScore`. @@ -992,7 +991,7 @@ pub mod pallet { let deposit = Self::deposit_for(&raw_solution, size); let call_fee = { let call = Call::submit { raw_solution: raw_solution.clone() }; - T::EstimateCallFee::estimate_call_fee(&call, None.into()) + T::EstimateCallFee::estimate_call_fee(&call, None::.into()) }; let submission = SignedSubmission { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 7eff70b47eba5..72b3ec9764079 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -349,9 +349,11 @@ impl MinerConfig for Runtime { fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { match MockWeightInfo::get() { - MockedWeightInfo::Basic => - (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)), - MockedWeightInfo::Complex => (0 * v + 0 * t + 1000 * a + 0 * d) as Weight, + MockedWeightInfo::Basic => Weight::from_ref_time( + (10 as u64).saturating_add((5 as u64).saturating_mul(a as u64)), + ), + MockedWeightInfo::Complex => + Weight::from_ref_time((0 * v + 0 * t + 1000 * a + 0 * d) as u64), MockedWeightInfo::Real => <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d), } diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index eca75139f925a..b9abfdfba14fb 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -514,8 +514,8 @@ impl Pallet { let feasibility_weight = Self::solution_weight_of(raw_solution, size); let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); - let weight_deposit = - T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); + let weight_deposit = T::SignedDepositWeight::get() + .saturating_mul(feasibility_weight.ref_time().saturated_into()); T::SignedDepositBase::get() .saturating_add(len_deposit) @@ -957,7 +957,7 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(40) + .signed_weight(Weight::from_ref_time(40)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(15); @@ -971,13 +971,13 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); + assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), 40); + assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(raw.clone()))); - ::set(30); + ::set(Weight::from_ref_time(30)); // note: resubmitting the same solution is technically okay as long as the queue has // space. diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index de25355f0ca5b..8ef7d87473159 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -699,54 +699,153 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2990), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2999), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3000), 3); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 3); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 5500), 5); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 7777), 7); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 9999), 9); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_000), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 10_999), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 11_000), 10); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 22_000), 10); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::zero()), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2990)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2999)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3000)), + 3 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 3 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(5500)), + 5 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(7777)), + 7 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(9999)), + 9 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_000)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_999)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(11_000)), + 10 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(22_000)), + 10 + ); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1990), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 1); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 1 + ); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 0), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 999), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1000), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1001), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 1999), 1); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2000), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2001), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 2010), 2); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, 3333), 2); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); + assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + 1 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + 2 + ); + assert_eq!( + Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + 2 + ); } } @@ -1024,7 +1123,7 @@ mod tests { #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(100) + .miner_weight(Weight::from_ref_time(100)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(25); @@ -1038,11 +1137,11 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); + assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(25); + ::set(Weight::from_ref_time(25)); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( @@ -1052,7 +1151,7 @@ mod tests { raw.solution.unique_targets().len() as u32, ); // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 25); + assert_eq!(solution_weight, Weight::from_ref_time(25)); assert_eq!(raw.solution.voter_count(), 3); }) } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 68ce00dd0de32..7ceb4a20e042a 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_multi_phase. @@ -68,46 +68,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (13_495_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) + Weight::from_ref_time(13_495_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (14_114_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(14_114_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (13_756_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_756_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (28_467_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_467_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (21_991_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_991_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - (3_186_000 as Weight) + Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -119,13 +119,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (137_653_000 as Weight) + Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -134,9 +134,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - (49_313_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(49_313_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -146,33 +146,33 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) } } @@ -187,46 +187,46 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (13_495_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + Weight::from_ref_time(13_495_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (14_114_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(14_114_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (13_756_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_756_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (28_467_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_467_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (21_991_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(21_991_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - (3_186_000 as Weight) + Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((202_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((60_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -238,13 +238,13 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (137_653_000 as Weight) + Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((640_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add((48_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -253,9 +253,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - (49_313_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(49_313_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -265,32 +265,32 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((867_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add((107_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add((6_907_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add((1_427_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((844_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add((150_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add((5_421_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add((1_167_000 as Weight).saturating_mul(d as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) } } diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index c603b196519b5..4f9e47b09a4da 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_support_benchmarking. @@ -53,43 +53,43 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add((32_973_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add((1_334_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add((2_636_364_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add((21_073_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add((65_000 as Weight).saturating_mul(t as Weight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add((1_711_424_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) } } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 1076ae77fbdda..539e90d0179ff 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -279,7 +279,7 @@ pub mod pallet { if !term_duration.is_zero() && (n % term_duration).is_zero() { Self::do_phragmen() } else { - 0 + Weight::zero() } } } @@ -363,7 +363,7 @@ pub mod pallet { T::Currency::set_lock(T::PalletId::get(), &who, locked_stake, WithdrawReasons::all()); Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); - Ok(None.into()) + Ok(None::.into()) } /// Remove `origin` as a voter. @@ -372,11 +372,11 @@ pub mod pallet { /// /// The dispatch origin of this call must be signed and be a voter. #[pallet::weight(T::WeightInfo::remove_voter())] - pub fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { + pub fn remove_voter(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); Self::do_remove_voter(&who); - Ok(None.into()) + Ok(()) } /// Submit oneself for candidacy. A fixed amount of deposit is recorded. @@ -398,7 +398,7 @@ pub mod pallet { pub fn submit_candidacy( origin: OriginFor, #[pallet::compact] candidate_count: u32, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let who = ensure_signed(origin)?; let actual_count = >::decode_len().unwrap_or(0) as u32; @@ -417,7 +417,7 @@ pub mod pallet { .map_err(|_| Error::::InsufficientCandidateFunds)?; >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); - Ok(None.into()) + Ok(()) } /// Renounce one's intention to be a candidate for the next election round. 3 potential @@ -443,10 +443,7 @@ pub mod pallet { Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), })] - pub fn renounce_candidacy( - origin: OriginFor, - renouncing: Renouncing, - ) -> DispatchResultWithPostInfo { + pub fn renounce_candidacy(origin: OriginFor, renouncing: Renouncing) -> DispatchResult { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { @@ -482,7 +479,7 @@ pub mod pallet { })?; }, }; - Ok(None.into()) + Ok(()) } /// Remove a particular member from the set. This is effective immediately and the bond of @@ -513,7 +510,7 @@ pub mod pallet { who: AccountIdLookupOf, slash_bond: bool, rerun_election: bool, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; @@ -525,7 +522,7 @@ pub mod pallet { } // no refund needed. - Ok(None.into()) + Ok(()) } /// Clean all voters who are defunct (i.e. they do not serve any purpose at all). The @@ -543,13 +540,13 @@ pub mod pallet { origin: OriginFor, _num_voters: u32, _num_defunct: u32, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { let _ = ensure_root(origin)?; >::iter() .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) .for_each(|(dv, _)| Self::do_remove_voter(&dv)); - Ok(None.into()) + Ok(()) } } @@ -1177,7 +1174,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { @@ -1488,7 +1485,7 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: Origin) -> DispatchResultWithPostInfo { + fn submit_candidacy(origin: Origin) -> sp_runtime::DispatchResult { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } diff --git a/frame/elections-phragmen/src/migrations/v3.rs b/frame/elections-phragmen/src/migrations/v3.rs index b1cdd4be98541..9ec9c6e7eea6c 100644 --- a/frame/elections-phragmen/src/migrations/v3.rs +++ b/frame/elections-phragmen/src/migrations/v3.rs @@ -101,14 +101,14 @@ pub fn apply( StorageVersion::new(3).put::>(); - Weight::max_value() + Weight::MAX } else { log::warn!( target: "runtime::elections-phragmen", "Attempted to apply migration to V3 but failed because storage version is {:?}", storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs index e0fc17ec2a12d..76ef630706c50 100644 --- a/frame/elections-phragmen/src/migrations/v4.rs +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -38,7 +38,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( @@ -63,7 +63,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { "Attempted to apply migration to v4 but failed because storage version is {:?}", storage_version, ); - 0 + Weight::zero() } } diff --git a/frame/elections-phragmen/src/migrations/v5.rs b/frame/elections-phragmen/src/migrations/v5.rs index a9fb018ba0219..eb96d7ddf6a99 100644 --- a/frame/elections-phragmen/src/migrations/v5.rs +++ b/frame/elections-phragmen/src/migrations/v5.rs @@ -8,7 +8,7 @@ use super::super::*; /// situation where they could increase their free balance but still not be able to use their funds /// because they were less than the lock. pub fn migrate(to_migrate: Vec) -> Weight { - let mut weight = 0; + let mut weight = Weight::new(); for who in to_migrate.iter() { if let Ok(mut voter) = Voting::::try_get(who) { diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 0e067699e5fac..14c69bf16f7f1 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_elections_phragmen. @@ -70,11 +70,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_011_000 as Weight) + Weight::from_ref_time(27_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -83,11 +83,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_240_000 as Weight) + Weight::from_ref_time(40_240_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -96,38 +96,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_394_000 as Weight) + Weight::from_ref_time(40_394_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (37_651_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_651_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (42_217_000 as Weight) + Weight::from_ref_time(42_217_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (46_459_000 as Weight) + Weight::from_ref_time(46_459_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -135,19 +135,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_189_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_189_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_516_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_516_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - (2_000_000_000_000 as Weight) + Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -156,9 +156,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (51_838_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(51_838_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -169,12 +169,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[5000, 10000]`. /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 76_000 - .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -189,15 +189,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 773_000 - .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 51_000 - .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(280 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(280 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) } } @@ -210,11 +210,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - (27_011_000 as Weight) + Weight::from_ref_time(27_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((214_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -223,11 +223,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - (40_240_000 as Weight) + Weight::from_ref_time(40_240_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((244_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -236,38 +236,38 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - (40_394_000 as Weight) + Weight::from_ref_time(40_394_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((217_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - (37_651_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_651_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - (42_217_000 as Weight) + Weight::from_ref_time(42_217_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((50_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (46_459_000 as Weight) + Weight::from_ref_time(46_459_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((26_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -275,19 +275,19 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - (45_189_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_189_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - (34_516_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_516_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - (2_000_000_000_000 as Weight) + Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -296,9 +296,9 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - (51_838_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(51_838_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -309,12 +309,12 @@ impl WeightInfo for () { /// The range of component `v` is `[5000, 10000]`. /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 76_000 - .saturating_add((63_721_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -329,14 +329,14 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 773_000 - .saturating_add((81_534_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 51_000 - .saturating_add((4_453_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(280 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(c as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(c as Weight))) + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(280 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) } } diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 03dc2c613c01e..ad46bdc4185bd 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -329,7 +329,7 @@ impl WeighData<(&BalanceOf,)> for WeightForSetDum let multiplier = self.0; // *target.0 is the amount passed into the extrinsic let cents = *target.0 / >::from(MILLICENTS); - (cents * multiplier).saturated_into::() + Weight::from_ref_time((cents * multiplier).saturated_into::()) } } @@ -392,7 +392,7 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. - 0 + Weight::zero() } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 0f659e12fb443..f6afb7a0c77f1 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -52,7 +52,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; @@ -190,7 +190,7 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight > 0); + assert!(info1.weight > Weight::zero()); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index 5fc6434e396eb..e8fc44bc4b050 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -49,7 +49,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. @@ -63,39 +63,39 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_dummy_benchmark(b: u32, ) -> Weight { - (5_834_000 as Weight) - .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_834_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { - (51_353_000 as Weight) - .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(51_353_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { - (2_569_000 as Weight) + Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn set_dummy_benchmark(b: u32, ) -> Weight { - (5_834_000 as Weight) - .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_834_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { - (51_353_000 as Weight) - .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(51_353_000 as RefTimeWeight) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { - (2_569_000 as Weight) + Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) } } diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 703220e64fa8a..5b03614333cc9 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -53,7 +53,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/executive/README.md b/frame/executive/README.md index e96d07b0843f2..c14c3912b082d 100644 --- a/frame/executive/README.md +++ b/frame/executive/README.md @@ -56,7 +56,7 @@ struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { // Do whatever you want. - 0 + frame_support::weights::Weight::zero() } } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index cd3e1c500db26..45361084f2f42 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -107,7 +107,7 @@ //! impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { //! fn on_runtime_upgrade() -> frame_support::weights::Weight { //! // Do whatever you want. -//! 0 +//! frame_support::weights::Weight::zero() //! } //! } //! @@ -123,12 +123,12 @@ use frame_support::{ EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; use sp_runtime::{ generic::Digest, traits::{ - self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, Saturating, + self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, ValidateUnsigned, Zero, }, transaction_validity::{TransactionSource, TransactionValidity}, @@ -299,7 +299,7 @@ where // This means the format of all the event related storages must always be compatible. >::reset_events(); - let mut weight = 0; + let mut weight = Weight::new(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } @@ -413,7 +413,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight > 0 { + if remaining_weight > Weight::zero() { let used_weight = >::on_idle( block_number, remaining_weight, @@ -593,12 +593,12 @@ mod tests { // one with block number arg and one without fn on_initialize(n: T::BlockNumber) -> Weight { println!("on_initialize({})", n); - 175 + Weight::from_ref_time(175) } fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { println!("on_idle{}, {})", n, remaining_weight); - 175 + Weight::from_ref_time(175) } fn on_finalize(n: T::BlockNumber) { @@ -607,7 +607,7 @@ mod tests { fn on_runtime_upgrade() -> Weight { sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 200 + Weight::from_ref_time(200) } fn offchain_worker(n: T::BlockNumber) { @@ -721,9 +721,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::builder() - .base_block(10) - .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = 5) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = 1024.into()) + .base_block(Weight::from_ref_time(10)) + .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, @@ -814,7 +814,7 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); - 100 + Weight::from_ref_time(100) } } @@ -988,12 +988,12 @@ mod tests { sign_extra(1, 0, 0), ); let encoded = xt.encode(); - let encoded_len = encoded.len() as Weight; + let encoded_len = encoded.len() as u64; // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); - let base_block_weight = 175 + block_weights.base_block; + let base_block_weight = Weight::from_ref_time(175) + block_weights.base_block; let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; - let num_to_exhaust_block = limit / (encoded_len + 5); + let num_to_exhaust_block = limit.ref_time() / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( 1, @@ -1016,7 +1016,7 @@ mod tests { assert_eq!( >::block_weight().total(), //--------------------- on_initialize + block_execution + extrinsic_base weight - (encoded_len + 5) * (nonce + 1) + base_block_weight, + Weight::from_ref_time((encoded_len + 5) * (nonce + 1)) + base_block_weight, ); assert_eq!( >::extrinsic_index(), @@ -1047,8 +1047,8 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = - 175 + ::BlockWeights::get().base_block; + let base_block_weight = Weight::from_ref_time(175) + + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -1066,7 +1066,7 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + + let extrinsic_weight = Weight::from_ref_time(len as u64) + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; @@ -1180,7 +1180,10 @@ mod tests { // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!(>::block_weight().total(), 175 + 175 + 10); + assert_eq!( + >::block_weight().total(), + Weight::from_ref_time(175 + 175 + 10) + ); }) } diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 59522f9a106f2..b94b7d164f04c 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -335,7 +335,7 @@ pub mod pallet { if (n % T::IntakePeriod::get()).is_zero() { Self::pursue_target(T::MaxIntakeBids::get()) } else { - 0 + Weight::zero() } } } diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 952080a2d030b..3d2b629e8b16b 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_gilt. @@ -60,70 +60,70 @@ impl WeightInfo for SubstrateWeight { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - (41_605_000 as Weight) + Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - (97_715_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(97_715_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - (42_061_000 as Weight) + Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - (5_026_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_026_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - (47_753_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_753_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - (1_663_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_663_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - (40_797_000 as Weight) + Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - (14_944_000 as Weight) + Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) } } @@ -132,69 +132,69 @@ impl WeightInfo for () { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - (41_605_000 as Weight) + Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((62_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - (97_715_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(97_715_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - (42_061_000 as Weight) + Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - (5_026_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(5_026_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - (47_753_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_753_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - (1_663_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_663_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - (40_797_000 as Weight) + Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((4_122_000 as Weight).saturating_mul(b as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - (14_944_000 as Weight) + Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((8_135_000 as Weight).saturating_mul(q as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) } } diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index 330e9bb255177..f21c3ddc101f7 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -35,7 +35,7 @@ impl crate::WeightInfo for () { // checking membership proof (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) + .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof .saturating_add(95 * WEIGHT_PER_MICROS) diff --git a/frame/grandpa/src/migrations/v4.rs b/frame/grandpa/src/migrations/v4.rs index ab43f7baef4e9..81dbd3bab4b67 100644 --- a/frame/grandpa/src/migrations/v4.rs +++ b/frame/grandpa/src/migrations/v4.rs @@ -37,7 +37,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0 + return Weight::zero() } let storage_version = StorageVersion::get::>(); log::info!( @@ -57,7 +57,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { ::BlockWeights::get().max_block } else { - 0 + Weight::zero() } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 5e6c955c441c5..d246466cf0db4 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -71,7 +71,7 @@ impl_opaque_keys! { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index ab0a9c677b00e..9c39069bf9538 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,7 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > 0); + assert!(info.weight > Weight::zero()); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 6066f176a6106..a0773e9904a1c 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 7d3371c31b03b..780abf1bb01df 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_identity. @@ -69,48 +69,48 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - (16_649_000 as Weight) + Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - (31_322_000 as Weight) + Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - (30_012_000 as Weight) + Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - (29_623_000 as Weight) + Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -119,81 +119,81 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (34_370_000 as Weight) + Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - (34_759_000 as Weight) + Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - (32_254_000 as Weight) + Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - (7_858_000 as Weight) + Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - (8_011_000 as Weight) + Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - (7_970_000 as Weight) + Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - (24_730_000 as Weight) + Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -203,58 +203,58 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (44_988_000 as Weight) + Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - (36_768_000 as Weight) + Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - (13_474_000 as Weight) + Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - (37_720_000 as Weight) + Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - (26_848_000 as Weight) + Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -263,48 +263,48 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - (16_649_000 as Weight) + Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((241_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - (31_322_000 as Weight) + Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((312_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - (30_012_000 as Weight) + Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((3_005_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - (29_623_000 as Weight) + Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_100_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -313,81 +313,81 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (34_370_000 as Weight) + Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((186_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_114_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((189_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - (34_759_000 as Weight) + Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((251_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((340_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - (32_254_000 as Weight) + Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((159_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((347_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - (7_858_000 as Weight) + Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((190_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - (8_011_000 as Weight) + Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((187_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - (7_970_000 as Weight) + Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((175_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - (24_730_000 as Weight) + Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((196_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add((341_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -397,57 +397,57 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (44_988_000 as Weight) + Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((201_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((1_126_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - (36_768_000 as Weight) + Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - (13_474_000 as Weight) + Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - (37_720_000 as Weight) + Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - (26_848_000 as Weight) + Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((115_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 2459f7e748941..b734bd37b6fd4 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -123,7 +123,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Runtime { diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 34762e66ec301..09fbc55854288 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_im_online. @@ -56,13 +56,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (79_225_000 as Weight) + Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -74,12 +74,12 @@ impl WeightInfo for () { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (79_225_000 as Weight) + Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((41_000 as Weight).saturating_mul(k as Weight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add((293_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 6bd79708c3dd2..693296a3b1064 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -44,7 +44,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 6635d45272048..2475c86fd499b 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_indices. @@ -56,35 +56,35 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - (25_929_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_929_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (32_627_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_627_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - (26_804_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_804_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (27_390_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_390_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - (30_973_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(30_973_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -92,34 +92,34 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - (25_929_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_929_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (32_627_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_627_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - (26_804_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_804_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - (27_390_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(27_390_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - (30_973_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(30_973_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 193958cfd41aa..f646ca02a0377 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_lottery. @@ -63,30 +63,30 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - (44_706_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(44_706_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - (12_556_000 as Weight) + Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - (38_051_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(38_051_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - (6_910_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_910_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - (53_732_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(53_732_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -105,9 +105,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - (55_868_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_868_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -121,30 +121,30 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - (44_706_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(44_706_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - (12_556_000 as Weight) + Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((295_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - (38_051_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(38_051_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - (6_910_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_910_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -152,9 +152,9 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - (53_732_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(53_732_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -163,8 +163,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - (55_868_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(55_868_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 32e1130f3d944..b4c9c3b38f1da 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -532,7 +532,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); pub static Members: Vec = vec![]; pub static Prime: Option = None; } diff --git a/frame/membership/src/migrations/v4.rs b/frame/membership/src/migrations/v4.rs index b3b52751d9598..5b8735aa2bac9 100644 --- a/frame/membership/src/migrations/v4.rs +++ b/frame/membership/src/migrations/v4.rs @@ -46,7 +46,7 @@ pub fn migrate::on_chain_storage_version(); @@ -71,7 +71,7 @@ pub fn migrate WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - (15_318_000 as Weight) + Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -73,11 +73,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - (18_005_000 as Weight) + Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -85,11 +85,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - (18_029_000 as Weight) + Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -97,11 +97,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - (18_105_000 as Weight) + Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -109,29 +109,29 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - (18_852_000 as Weight) + Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - (4_869_000 as Weight) + Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - (1_593_000 as Weight) + Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -142,11 +142,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - (15_318_000 as Weight) + Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((51_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -154,11 +154,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - (18_005_000 as Weight) + Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((45_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -166,11 +166,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - (18_029_000 as Weight) + Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -178,11 +178,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - (18_105_000 as Weight) + Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((158_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -190,28 +190,28 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - (18_852_000 as Weight) + Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((55_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - (4_869_000 as Weight) + Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((28_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - (1_593_000 as Weight) + Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(m as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs index 73d1963a42964..e7a4b6ab31c4a 100644 --- a/frame/merkle-mountain-range/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -30,7 +30,7 @@ impl crate::WeightInfo for () { // Blake2 hash cost. let hash_weight = 2 * WEIGHT_PER_NANOS; // No-op hook. - let hook_weight = 0; + let hook_weight = Weight::zero(); leaf_weight .saturating_add(hash_weight) diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index 566a051823d5e..e13f89617bb9a 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -39,7 +39,7 @@ fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { ext.register_extension(OffchainWorkerExt::new(offchain)); } -fn new_block() -> u64 { +fn new_block() -> Weight { let number = frame_system::Pallet::::block_number() + 1; let hash = H256::repeat_byte(number as u8); LEAF_DATA.with(|r| r.borrow_mut().a = number); @@ -110,7 +110,7 @@ fn should_start_empty() { crate::RootHash::::get(), hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") ); - assert!(weight != 0); + assert!(weight != Weight::zero()); }); } diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8201426f5330f..dafc421a7b72d 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -80,7 +80,7 @@ benchmarks! { // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, 0) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); assert!(!Calls::::contains_key(call_hash)); @@ -99,7 +99,7 @@ benchmarks! { // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, 0) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); assert!(Calls::::contains_key(call_hash)); @@ -118,13 +118,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, storing for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; assert!(Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, 0) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -143,13 +143,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, not storing - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, Weight::zero())?; assert!(!Calls::::contains_key(call_hash)); let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, 0) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -169,20 +169,20 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi, storing it for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::max_value()) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::MAX) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); } @@ -200,7 +200,7 @@ benchmarks! { let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, 0) + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); } @@ -225,13 +225,13 @@ benchmarks! { None, call, false, - 0 + Weight::zero() )?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, 0) + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); @@ -251,13 +251,13 @@ benchmarks! { // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, 0)?; + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, 0)?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); @@ -270,7 +270,7 @@ benchmarks! { signatories2, Some(timepoint), call_hash, - Weight::max_value() + Weight::MAX ) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); @@ -288,7 +288,7 @@ benchmarks! { let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, 0)?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, Weight::zero())?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); assert!(Calls::::contains_key(call_hash)); // Whitelist caller account from further DB operations. diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index d4ea041e5820e..10184ce84a9a8 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -257,9 +257,9 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_multi_threshold_1(call.using_encoded(|c| c.len() as u32)) - .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 5d37e32e4d8a4..ddf5197ab734c 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; @@ -152,7 +152,7 @@ fn multisig_deposit_is_taken_and_returned() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); @@ -190,7 +190,7 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { None, OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 5); @@ -221,7 +221,14 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); @@ -232,7 +239,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { Some(now()), OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(2), 3); assert_eq!(Balances::reserved_balance(2), 2); @@ -259,18 +266,25 @@ fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); + assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -288,11 +302,25 @@ fn timepoint_checking_works() { let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, 0), + Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + Weight::zero() + ), Error::::UnexpectedTimepoint, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_noop!( Multisig::as_multi( @@ -302,7 +330,7 @@ fn timepoint_checking_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::NoTimepoint, ); @@ -315,7 +343,7 @@ fn timepoint_checking_works() { Some(later), OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::WrongTimepoint, ); @@ -341,7 +369,7 @@ fn multisig_2_of_3_works_with_call_storing() { None, OpaqueCall::from_encoded(data), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -369,7 +397,14 @@ fn multisig_2_of_3_works() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( @@ -397,14 +432,21 @@ fn multisig_3_of_3_works() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -426,14 +468,21 @@ fn cancel_multisig_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), @@ -455,7 +504,7 @@ fn cancel_multisig_with_call_storage_works() { None, OpaqueCall::from_encoded(call), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(1), 4); assert_ok!(Multisig::approve_as_multi( @@ -464,7 +513,7 @@ fn cancel_multisig_with_call_storage_works() { vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), @@ -480,7 +529,14 @@ fn cancel_multisig_with_alt_call_storage_works() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_eq!(Balances::free_balance(1), 6); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -489,7 +545,7 @@ fn cancel_multisig_with_alt_call_storage_works() { Some(now()), OpaqueCall::from_encoded(call), true, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(2), 8); assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); @@ -516,7 +572,7 @@ fn multisig_2_of_3_as_multi_works() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -555,7 +611,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { None, OpaqueCall::from_encoded(data1.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -564,7 +620,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { None, OpaqueCall::from_encoded(data2.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(3), @@ -609,7 +665,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(2), @@ -629,7 +685,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_ok!(Multisig::as_multi( Origin::signed(3), @@ -667,7 +723,7 @@ fn minimum_threshold_check_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -679,7 +735,7 @@ fn minimum_threshold_check_works() { None, OpaqueCall::from_encoded(call.clone()), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -698,7 +754,7 @@ fn too_many_signatories_fails() { None, OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::TooManySignatories, ); @@ -710,9 +766,23 @@ fn duplicate_approvals_are_ignored() { new_test_ext().execute_with(|| { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash, 0), + Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + Some(now()), + hash, + Weight::zero() + ), Error::::AlreadyApproved, ); assert_ok!(Multisig::approve_as_multi( @@ -721,10 +791,17 @@ fn duplicate_approvals_are_ignored() { vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash, 0), + Multisig::approve_as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + hash, + Weight::zero() + ), Error::::AlreadyApproved, ); }); @@ -741,7 +818,14 @@ fn multisig_1_of_3_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash, 0), + Multisig::approve_as_multi( + Origin::signed(1), + 1, + vec![2, 3], + None, + hash, + Weight::zero() + ), Error::::MinimumThreshold, ); assert_noop!( @@ -752,7 +836,7 @@ fn multisig_1_of_3_works() { None, OpaqueCall::from_encoded(call), false, - 0 + Weight::zero() ), Error::::MinimumThreshold, ); @@ -791,7 +875,7 @@ fn weight_check_works() { None, OpaqueCall::from_encoded(data.clone()), false, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -803,7 +887,7 @@ fn weight_check_works() { Some(now()), OpaqueCall::from_encoded(data), false, - 0 + Weight::zero() ), Error::::MaxWeightTooLow, ); @@ -825,14 +909,21 @@ fn multisig_handles_no_preimage_after_all_approve() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash, 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash, + Weight::zero() + )); assert_ok!(Multisig::approve_as_multi( Origin::signed(2), 3, vec![1, 3], Some(now()), hash, - 0 + Weight::zero() )); assert_ok!(Multisig::approve_as_multi( Origin::signed(3), @@ -840,7 +931,7 @@ fn multisig_handles_no_preimage_after_all_approve() { vec![1, 2], Some(now()), hash, - 0 + Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 7946b96546768..0b580ec82b640 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_multisig. @@ -60,199 +60,199 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn as_multi_threshold_1(_z: u32, ) -> Weight { - (17_537_000 as Weight) + Weight::from_ref_time(17_537_000 as RefTimeWeight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - (36_535_000 as Weight) + Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (39_918_000 as Weight) + Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (25_524_000 as Weight) + Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (39_923_000 as Weight) + Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (45_877_000 as Weight) + Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - (34_309_000 as Weight) + Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - (22_848_000 as Weight) + Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - (63_239_000 as Weight) + Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - (51_254_000 as Weight) + Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn as_multi_threshold_1(_z: u32, ) -> Weight { - (17_537_000 as Weight) + Weight::from_ref_time(17_537_000 as RefTimeWeight) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - (36_535_000 as Weight) + Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((99_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (39_918_000 as Weight) + Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (25_524_000 as Weight) + Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (39_923_000 as Weight) + Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (45_877_000 as Weight) + Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((146_000 as Weight).saturating_mul(s as Weight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(z as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - (34_309_000 as Weight) + Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - (22_848_000 as Weight) + Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((114_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - (63_239_000 as Weight) + Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - (51_254_000 as Weight) + Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((118_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 8eb6936ec0450..cf5af649b7843 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -273,7 +273,7 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs index cf182f94273ce..f5a816ddaecee 100644 --- a/frame/node-authorization/src/weights.rs +++ b/frame/node-authorization/src/weights.rs @@ -21,7 +21,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; pub trait WeightInfo { @@ -37,13 +37,13 @@ pub trait WeightInfo { } impl WeightInfo for () { - fn add_well_known_node() -> Weight { 50_000_000 } - fn remove_well_known_node() -> Weight { 50_000_000 } - fn swap_well_known_node() -> Weight { 50_000_000 } - fn reset_well_known_nodes() -> Weight { 50_000_000 } - fn claim_node() -> Weight { 50_000_000 } - fn remove_claim() -> Weight { 50_000_000 } - fn transfer_node() -> Weight { 50_000_000 } - fn add_connections() -> Weight { 50_000_000 } - fn remove_connections() -> Weight { 50_000_000 } + fn add_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn swap_well_known_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn reset_well_known_nodes() -> Weight { Weight::from_ref_time(50_000_000) } + fn claim_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_claim() -> Weight { Weight::from_ref_time(50_000_000) } + fn transfer_node() -> Weight { Weight::from_ref_time(50_000_000) } + fn add_connections() -> Weight { Weight::from_ref_time(50_000_000) } + fn remove_connections() -> Weight { Weight::from_ref_time(50_000_000) } } diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 243e5489b5445..412c954a2bbf3 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -320,6 +320,7 @@ pub mod v2 { current ); current.put::>(); + T::DbWeight::get().reads_writes(members_translated + 1, reward_pools_translated + 1) } } diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index a9003ffd3fb4c..1f0d2ce8cddc4 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_nomination_pools. @@ -80,9 +80,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (123_947_000 as Weight) - .saturating_add(T::DbWeight::get().reads(17 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(123_947_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(17 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (118_236_000 as Weight) - .saturating_add(T::DbWeight::get().reads(14 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(118_236_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -108,18 +108,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_475_000 as Weight) - .saturating_add(T::DbWeight::get().reads(14 as Weight)) - .saturating_add(T::DbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(132_475_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (50_299_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_299_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -136,9 +136,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_254_000 as Weight) - .saturating_add(T::DbWeight::get().reads(18 as Weight)) - .saturating_add(T::DbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(121_254_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(18 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -146,11 +146,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (41_928_000 as Weight) + Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -162,11 +162,11 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (81_611_000 as Weight) + Weight::from_ref_time(81_611_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -189,9 +189,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (139_849_000 as Weight) - .saturating_add(T::DbWeight::get().reads(19 as Weight)) - .saturating_add(T::DbWeight::get().writes(16 as Weight)) + Weight::from_ref_time(139_849_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(19 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(16 as RefTimeWeight)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -216,9 +216,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (126_246_000 as Weight) - .saturating_add(T::DbWeight::get().reads(22 as Weight)) - .saturating_add(T::DbWeight::get().writes(15 as Weight)) + Weight::from_ref_time(126_246_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(22 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(15 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -234,30 +234,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (48_829_000 as Weight) + Weight::from_ref_time(48_829_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (26_761_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_761_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_519_000 as Weight) + Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -265,14 +265,14 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_173_000 as Weight) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(6_173_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_261_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_261_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -283,9 +283,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (47_959_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(47_959_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -305,9 +305,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - (123_947_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(17 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(123_947_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(17 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -319,9 +319,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - (118_236_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(14 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + Weight::from_ref_time(118_236_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -333,18 +333,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - (132_475_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(14 as Weight)) - .saturating_add(RocksDbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(132_475_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - (50_299_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(50_299_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -361,9 +361,9 @@ impl WeightInfo for () { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - (121_254_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(18 as Weight)) - .saturating_add(RocksDbWeight::get().writes(13 as Weight)) + Weight::from_ref_time(121_254_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(18 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -371,11 +371,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - (41_928_000 as Weight) + Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -387,11 +387,11 @@ impl WeightInfo for () { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - (81_611_000 as Weight) + Weight::from_ref_time(81_611_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -414,9 +414,9 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (139_849_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(19 as Weight)) - .saturating_add(RocksDbWeight::get().writes(16 as Weight)) + Weight::from_ref_time(139_849_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(19 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(16 as RefTimeWeight)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -441,9 +441,9 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - (126_246_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(22 as Weight)) - .saturating_add(RocksDbWeight::get().writes(15 as Weight)) + Weight::from_ref_time(126_246_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(22 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(15 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -459,30 +459,30 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - (48_829_000 as Weight) + Weight::from_ref_time(48_829_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add((2_204_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - (26_761_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_761_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - (14_519_000 as Weight) + Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -490,14 +490,14 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - (6_173_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(6_173_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - (22_261_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_261_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -508,8 +508,8 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (47_959_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(47_959_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index d51a81b1212c0..312bc4e18f413 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -40,7 +40,9 @@ type Balance = u64; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + 2 * WEIGHT_PER_SECOND + ); } impl frame_system::Config for Test { diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 109806049a0fd..1a1f05cb9c088 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = Everything; diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index de3eb6607fe8c..183b704ec705d 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_preimage. @@ -64,87 +64,87 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - (44_380_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(44_380_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - (30_280_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_280_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - (42_809_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_809_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - (28_964_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(28_964_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - (17_555_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_555_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - (7_745_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_745_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - (29_758_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_758_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - (18_360_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(18_360_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - (7_439_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_439_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -153,86 +153,86 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - (44_380_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(44_380_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - (30_280_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_280_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - (42_809_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(42_809_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - (28_964_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(28_964_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - (17_555_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(17_555_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - (7_745_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_745_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - (29_758_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_758_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - (18_360_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(18_360_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - (7_439_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(7_439_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 761d530954100..2d8dfc238bcd0 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -199,9 +199,9 @@ pub mod pallet { #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get()) - .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.weight), di.class) })] pub fn proxy( @@ -529,9 +529,9 @@ pub mod pallet { #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) - .saturating_add(di.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(di.weight), di.class) })] pub fn proxy_announced( diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 72acee1e0347e..b8d5a55705efa 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -54,7 +54,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 19beaf4d1401b..119df271e0d55 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_proxy. @@ -61,97 +61,97 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - (17_768_000 as Weight) + Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - (35_682_000 as Weight) + Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - (25_586_000 as Weight) + Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - (25_794_000 as Weight) + Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - (33_002_000 as Weight) + Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - (28_166_000 as Weight) + Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - (28_128_000 as Weight) + Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - (24_066_000 as Weight) + Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - (31_077_000 as Weight) + Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - (24_657_000 as Weight) + Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -159,96 +159,96 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - (17_768_000 as Weight) + Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((76_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - (35_682_000 as Weight) + Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((158_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - (25_586_000 as Weight) + Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((175_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((18_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - (25_794_000 as Weight) + Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((173_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - (33_002_000 as Weight) + Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((163_000 as Weight).saturating_mul(a as Weight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((79_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - (28_166_000 as Weight) + Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((105_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - (28_128_000 as Weight) + Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((118_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - (24_066_000 as Weight) + Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((81_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - (31_077_000 as Weight) + Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((37_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - (24_657_000 as Weight) + Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((87_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index f709578f6941a..467cae9728fae 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -69,7 +69,7 @@ use safe_mix::TripletMix; use codec::Encode; -use frame_support::traits::Randomness; +use frame_support::{pallet_prelude::Weight, traits::Randomness}; use sp_runtime::traits::{Hash, Saturating}; const RANDOM_MATERIAL_LEN: u32 = 81; @@ -187,7 +187,7 @@ mod tests { parameter_types! { pub BlockWeights: limits::BlockWeights = limits::BlockWeights - ::simple_max(1024); + ::simple_max(Weight::from_ref_time(1024)); pub BlockLength: limits::BlockLength = limits::BlockLength ::max(2 * 1024); } diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 530388d83f3c4..b4173b30b0c2e 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -22,6 +22,7 @@ use std::collections::BTreeMap; use frame_support::{ assert_noop, assert_ok, error::BadOrigin, + pallet_prelude::Weight, parameter_types, traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, }; @@ -50,7 +51,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); } impl frame_system::Config for Test { type BaseCallFilter = Everything; diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 3048dd804a5e2..a0309daea2263 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_ranked_collective. @@ -61,62 +61,62 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - (11_000_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(11_000_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - (16_855_000 as Weight) + Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - (11_936_000 as Weight) + Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - (17_582_000 as Weight) + Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - (22_000_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(22_000_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - (6_188_000 as Weight) + Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } } @@ -127,61 +127,61 @@ impl WeightInfo for () { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - (11_000_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(11_000_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - (16_855_000 as Weight) + Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add((8_107_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(r as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(r as Weight))) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - (11_936_000 as Weight) + Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((9_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - (17_582_000 as Weight) + Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((142_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - (22_000_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(22_000_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - (6_188_000 as Weight) + Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((867_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 44fc4d72a4a5f..5dc49fff09b32 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -47,7 +47,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 0887180a533fc..8b82454d5849d 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_recovery. @@ -60,71 +60,71 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - (6_579_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(6_579_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - (13_402_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_402_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - (28_217_000 as Weight) + Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - (34_082_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_082_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - (22_038_000 as Weight) + Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - (28_621_000 as Weight) + Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - (33_287_000 as Weight) + Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - (31_964_000 as Weight) + Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - (12_702_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_702_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -132,70 +132,70 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - (6_579_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(6_579_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - (13_402_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(13_402_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - (28_217_000 as Weight) + Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((172_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - (34_082_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(34_082_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - (22_038_000 as Weight) + Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((307_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - (28_621_000 as Weight) + Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((353_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - (33_287_000 as Weight) + Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add((264_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - (31_964_000 as Weight) + Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add((222_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - (12_702_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_702_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index f381f5fe5b709..172b5af999df5 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -19,6 +19,7 @@ use super::Config; use crate::weights::WeightInfo; +use frame_support::weights::Weight; /// Branches within the `begin_deciding` function. pub enum BeginDecidingBranch { @@ -82,7 +83,8 @@ impl ServiceBranch { /// Return the maximum possible weight of the `nudge` function. pub fn max_weight_of_nudge, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::nudge_referendum_no_deposit()) + Weight::new() + .max(T::WeightInfo::nudge_referendum_no_deposit()) .max(T::WeightInfo::nudge_referendum_preparing()) .max(T::WeightInfo::nudge_referendum_queued()) .max(T::WeightInfo::nudge_referendum_not_queued()) @@ -105,7 +107,7 @@ impl ServiceBranch { self, ) -> Option { use ServiceBranch::*; - Some(match self { + let ref_time_weight = match self { Preparing => T::WeightInfo::place_decision_deposit_preparing(), Queued => T::WeightInfo::place_decision_deposit_queued(), NotQueued => T::WeightInfo::place_decision_deposit_not_queued(), @@ -122,12 +124,15 @@ impl ServiceBranch { TimedOut | Fail | NoDeposit => return None, - }) + }; + + Some(ref_time_weight) } /// Return the maximum possible weight of the `place_decision_deposit` function. pub fn max_weight_of_deposit, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::place_decision_deposit_preparing()) + Weight::new() + .max(T::WeightInfo::place_decision_deposit_preparing()) .max(T::WeightInfo::place_decision_deposit_queued()) .max(T::WeightInfo::place_decision_deposit_not_queued()) .max(T::WeightInfo::place_decision_deposit_passing()) @@ -167,7 +172,8 @@ impl OneFewerDecidingBranch { /// Return the maximum possible weight of the `one_fewer_deciding` function. pub fn max_weight, I: 'static>() -> frame_support::weights::Weight { - 0.max(T::WeightInfo::one_fewer_deciding_queue_empty()) + Weight::new() + .max(T::WeightInfo::one_fewer_deciding_queue_empty()) .max(T::WeightInfo::one_fewer_deciding_passing()) .max(T::WeightInfo::one_fewer_deciding_failing()) } diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index dbe22cd562349..698bea8cc9f67 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -26,6 +26,7 @@ use frame_support::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, PreimageRecipient, SortedMembers, }, + weights::Weight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; @@ -61,8 +62,9 @@ impl Contains for BaseFilter { } parameter_types! { + pub MaxWeight: Weight = Weight::from_ref_time(2_000_000_000_000); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1_000_000); + frame_system::limits::BlockWeights::simple_max(MaxWeight::get()); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; @@ -104,7 +106,7 @@ impl pallet_scheduler::Config for Test { type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; - type MaximumWeight = ConstU64<2_000_000_000_000>; + type MaximumWeight = MaxWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index d48ebb1014d48..84a726d9e4fbe 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_referenda. @@ -80,205 +80,205 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - (34_640_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_640_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - (44_290_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(44_290_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - (49_428_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(49_428_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - (50_076_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(50_076_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - (55_935_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(55_935_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - (52_921_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(52_921_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - (29_160_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_160_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - (34_972_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_972_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - (60_620_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(60_620_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - (9_615_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(9_615_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - (113_077_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(113_077_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - (114_376_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(114_376_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - (43_901_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_901_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - (43_279_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_279_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - (45_564_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_564_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - (45_061_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_061_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - (23_757_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(23_757_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - (24_781_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(24_781_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - (18_344_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_344_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - (34_752_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_752_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - (37_055_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(37_055_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - (31_442_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(31_442_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - (33_201_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_201_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - (30_047_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_047_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - (29_195_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_195_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - (50_119_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(50_119_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - (32_203_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_203_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -288,204 +288,204 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - (34_640_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_640_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - (44_290_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(44_290_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - (49_428_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(49_428_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - (50_076_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(50_076_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - (55_935_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(55_935_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - (52_921_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(52_921_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - (29_160_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_160_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - (34_972_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_972_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - (60_620_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(60_620_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - (9_615_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(9_615_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - (113_077_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(113_077_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - (114_376_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(114_376_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - (43_901_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_901_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - (43_279_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(43_279_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - (45_564_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_564_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - (45_061_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(45_061_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - (23_757_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(23_757_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - (24_781_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(24_781_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - (18_344_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(18_344_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - (34_752_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(34_752_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - (37_055_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(37_055_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - (31_442_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(31_442_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - (33_201_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_201_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - (30_047_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(30_047_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - (29_195_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_195_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - (50_119_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(50_119_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - (32_203_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(32_203_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index b8bd4618f8def..a098670ccf100 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_remark. @@ -52,10 +52,10 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - (13_140_000 as Weight) + Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } } @@ -63,9 +63,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - (13_140_000 as Weight) + Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } } diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 008105dc737ea..7bbc628604bc6 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -119,7 +119,7 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2_000_000_000_000); + frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); } impl system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index d03e13da4747a..56f9298dd4b0c 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -30,7 +30,7 @@ use substrate_test_utils::assert_eq_uvec; #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into())); run_to_block(3); @@ -45,7 +45,7 @@ fn basic_scheduling_works() { #[test] fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let hashed = MaybeHashed::Hash(hash); assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); @@ -65,7 +65,7 @@ fn scheduling_with_preimages_works() { #[test] fn scheduling_with_preimage_postpones_correctly() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let hashed = MaybeHashed::Hash(hash); @@ -98,7 +98,7 @@ fn scheduling_with_preimage_postpones_correctly() { fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call.into())); @@ -115,7 +115,7 @@ fn schedule_after_works() { fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call.into())); // Will trigger on the next block. @@ -135,7 +135,7 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - Call::Logger(logger::Call::log { i: 42, weight: 1000 }).into() + Call::Logger(logger::Call::log { i: 42, weight: Weight::from_ref_time(1000) }).into() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -157,7 +157,7 @@ fn periodic_scheduling_works() { #[test] fn reschedule_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into()).unwrap(), @@ -188,7 +188,7 @@ fn reschedule_works() { #[test] fn reschedule_named_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -230,7 +230,7 @@ fn reschedule_named_works() { #[test] fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: 1000 }); + let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -292,7 +292,7 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -300,7 +300,7 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into(), + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), ) .unwrap(); run_to_block(3); @@ -322,7 +322,7 @@ fn cancel_named_periodic_scheduling_works() { Some((3, 3)), 127, root(), - Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into(), + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), ) .unwrap(); // same id results in error. @@ -332,7 +332,7 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), ) .is_err()); // different id is ok. @@ -342,7 +342,7 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), ) .unwrap(); run_to_block(3); @@ -458,8 +458,11 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { None, 126, root(), - Call::Logger(LoggerCall::log { i: 2600, weight: max_weight / 2 - item_weight + 1 }) - .into(), + Call::Logger(LoggerCall::log { + i: 2600, + weight: max_weight / 2 - item_weight + Weight::one() + }) + .into(), )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through @@ -484,7 +487,7 @@ fn on_initialize_weight_is_correct() { None, 255, root(), - Call::Logger(LoggerCall::log { i: 3, weight: call_weight + 1 }).into(), + Call::Logger(LoggerCall::log { i: 3, weight: call_weight + Weight::one() }).into(), )); // Anon Periodic assert_ok!(Scheduler::do_schedule( @@ -492,7 +495,8 @@ fn on_initialize_weight_is_correct() { Some((1000, 3)), 128, root(), - Call::Logger(LoggerCall::log { i: 42, weight: call_weight + 2 }).into(), + Call::Logger(LoggerCall::log { i: 42, weight: call_weight + Weight::from_ref_time(2) }) + .into(), )); // Anon assert_ok!(Scheduler::do_schedule( @@ -500,7 +504,8 @@ fn on_initialize_weight_is_correct() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: call_weight + 3 }).into(), + Call::Logger(LoggerCall::log { i: 69, weight: call_weight + Weight::from_ref_time(3) }) + .into(), )); // Named Periodic assert_ok!(Scheduler::do_schedule_named( @@ -509,7 +514,11 @@ fn on_initialize_weight_is_correct() { Some((1000, 3)), 126, root(), - Call::Logger(LoggerCall::log { i: 2600, weight: call_weight + 4 }).into(), + Call::Logger(LoggerCall::log { + i: 2600, + weight: call_weight + Weight::from_ref_time(4) + }) + .into(), )); // Will include the named periodic only @@ -517,7 +526,8 @@ fn on_initialize_weight_is_correct() { assert_eq!( actual_weight, base_weight + - call_weight + 4 + <() as MarginalWeightInfo>::item(true, true, Some(false)) + call_weight + Weight::from_ref_time(4) + + <() as MarginalWeightInfo>::item(true, true, Some(false)) ); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -526,8 +536,10 @@ fn on_initialize_weight_is_correct() { assert_eq!( actual_weight, base_weight + - call_weight + 2 + <() as MarginalWeightInfo>::item(false, false, Some(false)) + - call_weight + 3 + <() as MarginalWeightInfo>::item(true, false, Some(false)) + call_weight + Weight::from_ref_time(2) + + <() as MarginalWeightInfo>::item(false, false, Some(false)) + + call_weight + Weight::from_ref_time(3) + + <() as MarginalWeightInfo>::item(true, false, Some(false)) ); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); @@ -536,7 +548,8 @@ fn on_initialize_weight_is_correct() { assert_eq!( actual_weight, base_weight + - call_weight + 1 + <() as MarginalWeightInfo>::item(false, true, Some(false)) + call_weight + Weight::from_ref_time(1) + + <() as MarginalWeightInfo>::item(false, true, Some(false)) ); assert_eq!( logger::log(), @@ -552,8 +565,12 @@ fn on_initialize_weight_is_correct() { #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + let call = Box::new( + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + ); + let call2 = Box::new( + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + ); assert_ok!(Scheduler::schedule_named(Origin::root(), 1u32.encode(), 4, None, 127, call,)); assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); run_to_block(3); @@ -573,9 +590,15 @@ fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { run_to_block(3); - let call1 = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); - let call3 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + let call1 = Box::new( + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + ); + let call2 = Box::new( + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + ); + let call3 = Box::new( + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + ); assert_err!( Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call1), @@ -597,8 +620,12 @@ fn fails_to_schedule_task_in_the_past() { #[test] fn should_use_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + let call = Box::new( + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + ); + let call2 = Box::new( + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + ); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), 1u32.encode(), @@ -623,8 +650,12 @@ fn should_use_orign() { #[test] fn should_check_orign() { new_test_ext().execute_with(|| { - let call = Box::new(Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into()); - let call2 = Box::new(Call::Logger(LoggerCall::log { i: 42, weight: 1000 }).into()); + let call = Box::new( + Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + ); + let call2 = Box::new( + Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + ); assert_noop!( Scheduler::schedule_named( system::RawOrigin::Signed(2).into(), @@ -646,10 +677,20 @@ fn should_check_orign() { #[test] fn should_check_orign_for_cancel() { new_test_ext().execute_with(|| { - let call = - Box::new(Call::Logger(LoggerCall::log_without_filter { i: 69, weight: 1000 }).into()); - let call2 = - Box::new(Call::Logger(LoggerCall::log_without_filter { i: 42, weight: 1000 }).into()); + let call = Box::new( + Call::Logger(LoggerCall::log_without_filter { + i: 69, + weight: Weight::from_ref_time(1000), + }) + .into(), + ); + let call2 = Box::new( + Call::Logger(LoggerCall::log_without_filter { + i: 42, + weight: Weight::from_ref_time(1000), + }) + .into(), + ); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), 1u32.encode(), @@ -693,14 +734,20 @@ fn migration_to_v3_works() { Some(ScheduledV1 { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + }), maybe_periodic: None, }), None, Some(ScheduledV1 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000), + }), maybe_periodic: Some((456u64, 10)), }), ]; @@ -718,7 +765,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 10, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -727,7 +778,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -740,7 +795,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 11, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -749,7 +808,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -762,7 +825,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 12, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: root(), _phantom: PhantomData::::default(), @@ -771,7 +838,11 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: root(), _phantom: PhantomData::::default(), @@ -794,7 +865,11 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + }) + .into(), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), @@ -804,7 +879,11 @@ fn test_migrate_origin() { maybe_id: Some(b"test".to_vec()), priority: 123, origin: 2u32, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000), + }) + .into(), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), @@ -833,7 +912,11 @@ fn test_migrate_origin() { Some(ScheduledV2::, u64, OriginCaller, u64> { maybe_id: None, priority: 10, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), @@ -842,7 +925,11 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -855,7 +942,11 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: None, priority: 11, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), @@ -864,7 +955,11 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -877,7 +972,11 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: None, priority: 12, - call: Call::Logger(LoggerCall::log { i: 96, weight: 100 }).into(), + call: Call::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100) + }) + .into(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), @@ -886,7 +985,11 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { i: 69, weight: 1000 }).into(), + call: Call::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(1000) + }) + .into(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index dd7ed8104420d..f201c89eaf278 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. @@ -68,146 +68,146 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - (9_994_000 as Weight) + Weight::from_ref_time(9_994_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((19_843_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((4 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_resolved(s: u32, ) -> Weight { - (10_318_000 as Weight) + Weight::from_ref_time(10_318_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((15_451_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - (11_675_000 as Weight) + Weight::from_ref_time(11_675_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((17_019_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_resolved(s: u32, ) -> Weight { - (11_934_000 as Weight) + Weight::from_ref_time(11_934_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((14_134_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_aborted(s: u32, ) -> Weight { - (7_279_000 as Weight) + Weight::from_ref_time(7_279_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((5_388_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) fn on_initialize_aborted(s: u32, ) -> Weight { - (8_619_000 as Weight) + Weight::from_ref_time(8_619_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_969_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named(s: u32, ) -> Weight { - (16_129_000 as Weight) + Weight::from_ref_time(16_129_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((9_772_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) fn on_initialize_periodic(s: u32, ) -> Weight { - (15_785_000 as Weight) + Weight::from_ref_time(15_785_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((7_208_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named(s: u32, ) -> Weight { - (15_778_000 as Weight) + Weight::from_ref_time(15_778_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((5_597_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) fn on_initialize(s: u32, ) -> Weight { - (15_912_000 as Weight) + Weight::from_ref_time(15_912_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((4_530_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - (18_013_000 as Weight) + Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - (18_131_000 as Weight) + Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - (21_230_000 as Weight) + Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - (20_139_000 as Weight) + Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -218,145 +218,145 @@ impl WeightInfo for () { // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - (9_994_000 as Weight) + Weight::from_ref_time(9_994_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add((19_843_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((4 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_resolved(s: u32, ) -> Weight { - (10_318_000 as Weight) + Weight::from_ref_time(10_318_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((15_451_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - (11_675_000 as Weight) + Weight::from_ref_time(11_675_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add((17_019_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_resolved(s: u32, ) -> Weight { - (11_934_000 as Weight) + Weight::from_ref_time(11_934_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((14_134_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_aborted(s: u32, ) -> Weight { - (7_279_000 as Weight) + Weight::from_ref_time(7_279_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((5_388_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) fn on_initialize_aborted(s: u32, ) -> Weight { - (8_619_000 as Weight) + Weight::from_ref_time(8_619_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add((2_969_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named(s: u32, ) -> Weight { - (16_129_000 as Weight) + Weight::from_ref_time(16_129_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add((9_772_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:2 w:2) fn on_initialize_periodic(s: u32, ) -> Weight { - (15_785_000 as Weight) + Weight::from_ref_time(15_785_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((7_208_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named(s: u32, ) -> Weight { - (15_778_000 as Weight) + Weight::from_ref_time(15_778_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((5_597_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Scheduler Agenda (r:1 w:1) fn on_initialize(s: u32, ) -> Weight { - (15_912_000 as Weight) + Weight::from_ref_time(15_912_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((4_530_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - (18_013_000 as Weight) + Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((87_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - (18_131_000 as Weight) + Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - (21_230_000 as Weight) + Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((98_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - (20_139_000 as Weight) + Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((595_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index aa4f75255b120..dd96a5df2baf9 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -281,7 +281,7 @@ pub mod pallet { let pool = >::get(); >::refresh_members(pool, ChangeReceiver::MembershipChanged); } - 0 + Weight::zero() } } diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 4fef5385eb2c5..e38e0a18b99c8 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub const CandidateDeposit: u64 = 25; pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } ord_parameter_types! { pub const KickOrigin: u64 = 2; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 71ee9d1e0758a..34c560984661d 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -574,7 +574,7 @@ pub mod pallet { // NOTE: the non-database part of the weight for `should_end_session(n)` is // included as weight for empty block, the database part is expected to be in // cache. - 0 + Weight::zero() } } } diff --git a/frame/session/src/migrations/v1.rs b/frame/session/src/migrations/v1.rs index 3c687ea7d9d66..c0dce422fe8b5 100644 --- a/frame/session/src/migrations/v1.rs +++ b/frame/session/src/migrations/v1.rs @@ -47,7 +47,7 @@ pub fn migrate::on_chain_storage_version(); @@ -82,7 +82,7 @@ pub fn migrate sp_io::TestExternalities { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index 40ae7f1be4265..5208a679c6bb7 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_session. @@ -55,17 +55,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - (48_484_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_484_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - (38_003_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(38_003_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } } @@ -75,16 +75,16 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - (48_484_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(48_484_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - (38_003_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(38_003_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index ec4cca1813ec6..626562f7a45f8 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -614,7 +614,7 @@ pub mod pallet { fn on_initialize(n: T::BlockNumber) -> Weight { let mut members = vec![]; - let mut weight = 0; + let mut weight = Weight::new(); let weights = T::BlockWeights::get(); // Run a candidate/membership rotation diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 04ea705eed556..ed668e79269fd 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -50,7 +50,7 @@ frame_support::construct_runtime!( parameter_types! { pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } ord_parameter_types! { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 68aa97db8a324..2a55d3baea2e6 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1167,7 +1167,7 @@ where disable_strategy: DisableStrategy, ) -> Weight { let reward_proportion = SlashRewardFraction::::get(); - let mut consumed_weight: Weight = 0; + let mut consumed_weight = Weight::from_ref_time(0); let mut add_db_reads_writes = |reads, writes| { consumed_weight += T::DbWeight::get().reads_writes(reads, writes); }; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 485a9dc3ae66a..cda606008a9cc 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -3759,7 +3759,7 @@ fn payout_stakers_handles_weight_refund() { let half_max_nom_rewarded_weight = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); - assert!(zero_nom_payouts_weight > 0); + assert!(zero_nom_payouts_weight > Weight::zero()); assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); @@ -3898,42 +3898,68 @@ fn bond_during_era_correctly_populates_claimed_rewards() { fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write - let zero_offence_weight = ::DbWeight::get().reads_writes(4, 1); - assert_eq!(Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), zero_offence_weight); + let zero_offence_weight = + ::DbWeight::get().reads_writes(4, 1); + assert_eq!( + Staking::on_offence(&[], &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), + zero_offence_weight + ); // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes - let n_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(4, 5); - - let offenders: Vec::AccountId, pallet_session::historical::IdentificationTuple>> - = (1..10).map(|i| - OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), i)), - reporters: vec![], - } - ).collect(); - assert_eq!(Staking::on_offence(&offenders, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), n_offence_unapplied_weight); + let n_offence_unapplied_weight = ::DbWeight::get() + .reads_writes(4, 1) + + ::DbWeight::get().reads_writes(4, 5); + + let offenders: Vec< + OffenceDetails< + ::AccountId, + pallet_session::historical::IdentificationTuple, + >, + > = (1..10) + .map(|i| OffenceDetails { + offender: (i, Staking::eras_stakers(active_era(), i)), + reporters: vec![], + }) + .collect(); + assert_eq!( + Staking::on_offence( + &offenders, + &[Perbill::from_percent(50)], + 0, + DisableStrategy::WhenSlashed + ), + n_offence_unapplied_weight + ); // On Offence with one offenders, Applied - let one_offender = [ - OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), 11)), - reporters: vec![1], - }, - ]; + let one_offender = [OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), 11)), + reporters: vec![1], + }]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes - let one_offence_unapplied_weight = ::DbWeight::get().reads_writes(4, 1) - + ::DbWeight::get().reads_writes(rw, rw) + let one_offence_unapplied_weight = + ::DbWeight::get().reads_writes(4, 1) + + + ::DbWeight::get().reads_writes(rw, rw) // One `slash_cost` + ::DbWeight::get().reads_writes(6, 5) // `slash_cost` * nominators (1) + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) - + ::DbWeight::get().reads_writes(2, 2); + + ::DbWeight::get().reads_writes(2, 2) + ; - assert_eq!(Staking::on_offence(&one_offender, &[Perbill::from_percent(50)], 0, DisableStrategy::WhenSlashed), one_offence_unapplied_weight); + assert_eq!( + Staking::on_offence( + &one_offender, + &[Perbill::from_percent(50)], + 0, + DisableStrategy::WhenSlashed + ), + one_offence_unapplied_weight + ); }); } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 1bdfb01bddc86..c5f584054d1f4 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_staking. @@ -86,9 +86,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (43_992_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(43_992_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -96,9 +96,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (75_827_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(75_827_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -110,20 +110,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (81_075_000 as Weight) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + Weight::from_ref_time(81_075_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (35_763_000 as Weight) + Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -139,9 +139,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (66_938_000 as Weight) - .saturating_add(T::DbWeight::get().reads(13 as Weight)) - .saturating_add(T::DbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(66_938_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(13 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(11 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -155,19 +155,19 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (52_943_000 as Weight) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(52_943_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_264_000 as Weight) + Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -181,12 +181,12 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (56_596_000 as Weight) + Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -196,50 +196,50 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (51_117_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(51_117_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (11_223_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(11_223_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (19_826_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(19_826_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (3_789_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_789_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (3_793_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_793_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (3_802_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_802_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (3_762_000 as Weight) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_762_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (4_318_000 as Weight) + Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -255,20 +255,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (65_265_000 as Weight) + Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (903_312_000 as Weight) + Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -281,13 +281,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (87_569_000 as Weight) + Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -301,13 +301,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (98_839_000 as Weight) + Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -316,11 +316,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (74_865_000 as Weight) + Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(9 as Weight)) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(9 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -332,12 +332,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -353,12 +353,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (70_933_000 as Weight) + Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(12 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -379,16 +379,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(208 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(208 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -399,25 +399,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(202 as Weight)) - .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(202 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -426,8 +426,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - (7_041_000 as Weight) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(7_041_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -436,8 +436,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - (6_495_000 as Weight) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(6_495_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -450,16 +450,16 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (62_014_000 as Weight) - .saturating_add(T::DbWeight::get().reads(11 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(62_014_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - (12_814_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_814_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -472,9 +472,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (43_992_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(43_992_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -482,9 +482,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - (75_827_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(7 as Weight)) + Weight::from_ref_time(75_827_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -496,20 +496,20 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - (81_075_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + Weight::from_ref_time(81_075_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (35_763_000 as Weight) + Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((57_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -525,9 +525,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - (66_938_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(13 as Weight)) - .saturating_add(RocksDbWeight::get().writes(11 as Weight)) + Weight::from_ref_time(66_938_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(13 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(11 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -541,19 +541,19 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (52_943_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + Weight::from_ref_time(52_943_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (23_264_000 as Weight) + Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add((8_006_000 as Weight).saturating_mul(k as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -567,12 +567,12 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - (56_596_000 as Weight) + Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((3_644_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -582,50 +582,50 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - (51_117_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(51_117_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - (11_223_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(11_223_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (19_826_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(19_826_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (3_789_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_789_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (3_793_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_793_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (3_802_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_802_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (3_762_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(3_762_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (4_318_000 as Weight) + Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((10_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -641,20 +641,20 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (65_265_000 as Weight) + Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_029_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (903_312_000 as Weight) + Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add((4_968_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -667,13 +667,13 @@ impl WeightInfo for () { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (87_569_000 as Weight) + Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((24_232_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(10 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -687,13 +687,13 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (98_839_000 as Weight) + Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add((34_480_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -702,11 +702,11 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - (74_865_000 as Weight) + Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add((64_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(9 as Weight)) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(9 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -718,12 +718,12 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add((22_829_000 as Weight).saturating_mul(e as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -739,12 +739,12 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - (70_933_000 as Weight) + Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_021_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -765,16 +765,16 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add((213_100_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add((31_123_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(208 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(208 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -785,25 +785,25 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add((23_745_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add((22_497_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add((20_676_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(202 as Weight)) - .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(v as Weight))) - .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(202 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add((8_097_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -812,8 +812,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - (7_041_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(7_041_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -822,8 +822,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - (6_495_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(6_495_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -836,15 +836,15 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - (62_014_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(11 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) + Weight::from_ref_time(62_014_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - (12_814_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(12_814_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 9b0172bf97aa2..93b5bd3468f6b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -839,9 +839,10 @@ pub mod pallet { impl Pallet { /// The real weight of a migration of the given number of `items` with total `size`. fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { - let items = items as Weight; - items - .saturating_mul(::DbWeight::get().reads_writes(1, 1)) + let items = items as u64; + ::DbWeight::get() + .reads_writes(1, 1) + .scalar_saturating_mul(items) // we assume that the read/write per-byte weight is the same for child and top tree. .saturating_add(T::WeightInfo::process_top_key(size)) } @@ -1129,25 +1130,25 @@ mod mock { impl WeightInfo for StateMigrationTestWeight { fn process_top_key(_: u32) -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn continue_migrate() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn continue_migrate_wrong_witness() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn migrate_custom_top_fail() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn migrate_custom_top_success() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn migrate_custom_child_fail() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } fn migrate_custom_child_success() -> Weight { - 1000000 + Weight::from_ref_time(1000000) } } @@ -1243,9 +1244,9 @@ mod mock { (custom_storage, version).into() } - pub(crate) fn run_to_block(n: u32) -> (H256, u64) { + pub(crate) fn run_to_block(n: u32) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = 0; + let mut weight_sum = Weight::new(); log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1606,7 +1607,10 @@ pub(crate) mod remote_tests { use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; use frame_benchmarking::Zero; - use frame_support::traits::{Get, Hooks}; + use frame_support::{ + traits::{Get, Hooks}, + weights::Weight, + }; use frame_system::Pallet as System; use remote_externalities::Mode; use sp_core::H256; @@ -1616,9 +1620,9 @@ pub(crate) mod remote_tests { #[allow(dead_code)] fn run_to_block>( n: ::BlockNumber, - ) -> (H256, u64) { + ) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = 0; + let mut weight_sum = Weight::new(); while System::::block_number() < n { System::::set_block_number(System::::block_number() + One::one()); System::::on_initialize(System::::block_number()); diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index f2566f949c058..6fccc0b68f3d7 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_state_trie_migration. @@ -59,40 +59,40 @@ impl WeightInfo for SubstrateWeight { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - (19_019_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_019_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - (1_874_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_874_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } fn migrate_custom_top_success() -> Weight { - (16_381_000 as Weight) + Weight::from_ref_time(16_381_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - (25_966_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_966_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn migrate_custom_child_success() -> Weight { - (16_712_000 as Weight) + Weight::from_ref_time(16_712_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - (29_885_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_885_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -101,39 +101,39 @@ impl WeightInfo for () { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - (19_019_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_019_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - (1_874_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + Weight::from_ref_time(1_874_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } fn migrate_custom_top_success() -> Weight { - (16_381_000 as Weight) + Weight::from_ref_time(16_381_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - (25_966_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_966_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn migrate_custom_child_success() -> Weight { - (16_712_000 as Weight) + Weight::from_ref_time(16_712_000 as RefTimeWeight) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - (29_885_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(29_885_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 07fdc56e82da6..bde69f11106dc 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -139,7 +139,7 @@ pub mod pallet { /// # #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); - (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) + (dispatch_info.weight, dispatch_info.class) })] pub fn sudo( origin: OriginFor, @@ -222,7 +222,6 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( dispatch_info.weight - .saturating_add(10_000) // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 71f0f26b1a1d5..c895eaf830136 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -22,6 +22,7 @@ use crate as sudo; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, Contains, GenesisBuild}, + weights::Weight, }; use frame_system::limits; use sp_core::H256; @@ -109,7 +110,7 @@ frame_support::construct_runtime!( ); parameter_types! { - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(1024); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::simple_max(Weight::from_ref_time(1024)); } pub struct BlockEverything; diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 84c8e0c5c254e..502c6476935a2 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for the module. use super::*; -use frame_support::{assert_noop, assert_ok}; +use frame_support::{assert_noop, assert_ok, weights::Weight}; use mock::{ new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, Test, @@ -39,12 +39,18 @@ fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); }); } @@ -56,7 +62,8 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) @@ -66,24 +73,36 @@ fn sudo_emits_events_correctly() { fn sudo_unchecked_weight_basics() { new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as origin. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); - assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); + assert_ok!(Sudo::sudo_unchecked_weight( + Origin::signed(1), + call, + Weight::from_ref_time(1_000) + )); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_noop!( - Sudo::sudo_unchecked_weight(Origin::signed(2), call, 1_000), + Sudo::sudo_unchecked_weight(Origin::signed(2), call, Weight::from_ref_time(1_000)), Error::::RequireSudo, ); // `I32Log` is unchanged after unsuccessful call. assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); - let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: 1_000 }; + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + let sudo_unchecked_weight_call = + SudoCall::sudo_unchecked_weight { call, weight: Weight::from_ref_time(1_000) }; let info = sudo_unchecked_weight_call.get_dispatch_info(); - assert_eq!(info.weight, 1_000); + assert_eq!(info.weight, Weight::from_ref_time(1_000)); }); } @@ -94,8 +113,13 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1 })); - assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); + let call = + Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + assert_ok!(Sudo::sudo_unchecked_weight( + Origin::signed(1), + call, + Weight::from_ref_time(1_000) + )); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) } @@ -134,17 +158,22 @@ fn set_key_emits_events_correctly() { fn sudo_as_basics() { new_test_ext(1).execute_with(|| { // A privileged function will not work when passed to `sudo_as`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: 1_000 })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1_000), + })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); // The correct user makes the call within `sudo_as`. @@ -159,7 +188,8 @@ fn sudo_as_emits_events_correctly() { System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: 1 })); + let call = + Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) })); }); diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index a658f01fa6854..97312ac3476b5 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -176,18 +176,18 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// ``` /// # #[macro_use] /// # extern crate frame_support; -/// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; +/// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}}; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { -/// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; +/// ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100_000)))?; /// if do_expensive_calc { /// // do the expensive calculation /// // ... /// // return None to indicate that we are using all weight (the default) -/// return Ok(None.into()); +/// return Ok(None::.into()); /// } /// // expensive calculation not executed: use only a portion of the weight /// Ok(Some(100_000).into()) @@ -1614,7 +1614,7 @@ macro_rules! decl_module { pallet_name, ); - 0 + $crate::dispatch::Weight::new() } #[cfg(feature = "try-runtime")] @@ -2649,13 +2649,13 @@ mod tests { #[weight = (5, DispatchClass::Operational)] fn operational(_origin) { unreachable!() } - fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } + fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } Weight::from_ref_time(7) } fn on_idle(n: T::BlockNumber, remaining_weight: Weight,) -> Weight { - if n.into() == 42 || remaining_weight == 42 { panic!("on_idle") } - 7 + if n.into() == 42 || remaining_weight == Weight::from_ref_time(42) { panic!("on_idle") } + Weight::from_ref_time(7) } fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } - fn on_runtime_upgrade() -> Weight { 10 } + fn on_runtime_upgrade() -> Weight { Weight::from_ref_time(10) } fn offchain_worker() {} /// Some doc fn integrity_test() { panic!("integrity_test") } @@ -2814,24 +2814,30 @@ mod tests { #[test] fn on_initialize_should_work_2() { - assert_eq!( as OnInitialize>::on_initialize(10), 7); + assert_eq!( + as OnInitialize>::on_initialize(10), + Weight::from_ref_time(7) + ); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_1() { - as OnIdle>::on_idle(42, 9); + as OnIdle>::on_idle(42, Weight::from_ref_time(9)); } #[test] #[should_panic(expected = "on_idle")] fn on_idle_should_work_2() { - as OnIdle>::on_idle(9, 42); + as OnIdle>::on_idle(9, Weight::from_ref_time(42)); } #[test] fn on_idle_should_work_3() { - assert_eq!( as OnIdle>::on_idle(10, 11), 7); + assert_eq!( + as OnIdle>::on_idle(10, Weight::from_ref_time(11)), + Weight::from_ref_time(7) + ); } #[test] @@ -2843,7 +2849,10 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { sp_io::TestExternalities::default().execute_with(|| { - assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10) + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + Weight::from_ref_time(10) + ) }); } @@ -2852,12 +2861,20 @@ mod tests { // operational. assert_eq!( Call::::operational {}.get_dispatch_info(), - DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, ); // custom basic assert_eq!( Call::::aux_3 {}.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, ); } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 72551980bc7e9..a81a266fb9a3d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1374,7 +1374,7 @@ pub mod pallet_prelude { ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, - weights::{DispatchClass, Pays, Weight}, + weights::{DispatchClass, Pays, RefTimeWeight, Weight}, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index f594b98ede4ff..b559d3aca615f 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -48,7 +48,7 @@ impl PalletVersionToStorageVersionHelpe #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - let mut weight: Weight = 0; + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 2b7234006e0ff..f8e91886edf0c 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -17,8 +17,8 @@ //! Traits for hooking tasks to events in a blockchain's lifecycle. +use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; -use sp_arithmetic::traits::Saturating; use sp_runtime::traits::AtLeast32BitUnsigned; /// The block initialization trait. @@ -33,8 +33,8 @@ pub trait OnInitialize { /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - 0 + fn on_initialize(_n: BlockNumber) -> Weight { + Weight::new() } } @@ -42,8 +42,8 @@ pub trait OnInitialize { #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnInitialize for Tuple { - fn on_initialize(n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; + fn on_initialize(n: BlockNumber) -> Weight { + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); weight } @@ -75,11 +75,8 @@ pub trait OnIdle { /// /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - /// in a block are applied but before `on_finalize` is executed. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight, - ) -> crate::weights::Weight { - 0 + fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { + Weight::new() } } @@ -87,12 +84,10 @@ pub trait OnIdle { #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { - let on_idle_functions: &[fn( - BlockNumber, - crate::weights::Weight, - ) -> crate::weights::Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; - let mut weight = 0; + fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { + let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = + &[for_tuples!( #( Tuple::on_idle ),* )]; + let mut weight = Weight::new(); let len = on_idle_functions.len(); let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( @@ -174,8 +169,8 @@ pub trait OnRuntimeUpgrade { /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 + fn on_runtime_upgrade() -> Weight { + Weight::new() } /// Execute some pre-checks prior to a runtime upgrade. @@ -199,8 +194,8 @@ pub trait OnRuntimeUpgrade { #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; + fn on_runtime_upgrade() -> Weight { + let mut weight = Weight::new(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } @@ -243,18 +238,15 @@ pub trait Hooks { /// Will not fire if the remaining weight is 0. /// Return the weight used, the hook will subtract it from current weight used /// and pass the result to the next `on_idle` hook if it exists. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight, - ) -> crate::weights::Weight { - 0 + fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { + Weight::new() } /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - 0 + fn on_initialize(_n: BlockNumber) -> Weight { + Weight::new() } /// Perform a module upgrade. @@ -276,8 +268,8 @@ pub trait Hooks { /// pallet is discouraged and might get deprecated in the future. Alternatively, export the same /// logic as a free-function from your pallet, and pass it to `type Executive` from the /// top-level runtime. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 + fn on_runtime_upgrade() -> Weight { + Weight::new() } /// Execute some pre-checks prior to a runtime upgrade. @@ -360,18 +352,18 @@ mod tests { fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { struct Test; impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 + fn on_initialize(_n: u8) -> Weight { + Weight::from_ref_time(10) } } impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 + fn on_runtime_upgrade() -> Weight { + Weight::from_ref_time(20) } } - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + assert_eq!(<(Test, Test)>::on_initialize(0), Weight::from_ref_time(20)); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), Weight::from_ref_time(40)); } #[test] @@ -383,48 +375,48 @@ mod tests { struct Test3; type TestTuple = (Test1, Test2, Test3); impl OnIdle for Test1 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test1"); } - 0 + Weight::zero() } } impl OnIdle for Test2 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test2"); } - 0 + Weight::zero() } } impl OnIdle for Test3 { - fn on_idle(_n: u32, _weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(_n: u32, _weight: Weight) -> Weight { unsafe { ON_IDLE_INVOCATION_ORDER.push("Test3"); } - 0 + Weight::zero() } } unsafe { - TestTuple::on_idle(0, 0); + TestTuple::on_idle(0, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(1, 0); + TestTuple::on_idle(1, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(2, 0); + TestTuple::on_idle(2, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test3", "Test1", "Test2"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(3, 0); + TestTuple::on_idle(3, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test1", "Test2", "Test3"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); - TestTuple::on_idle(4, 0); + TestTuple::on_idle(4, Weight::zero()); assert_eq!(ON_IDLE_INVOCATION_ORDER, ["Test2", "Test3", "Test1"].to_vec()); ON_IDLE_INVOCATION_ORDER.clear(); } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index fb34484416063..568cb5535ccda 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -123,6 +123,7 @@ mod block_weights; mod extrinsic_weights; mod paritydb_weights; mod rocksdb_weights; +mod weight_v2; use crate::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, @@ -146,18 +147,17 @@ use sp_runtime::{ /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; -/// Numeric range of a transaction weight. -pub type Weight = u64; +pub use weight_v2::*; /// These constants are specific to FRAME, and the current implementation of its various components. /// For example: FRAME System, FRAME Executive, our FRAME support libraries, etc... pub mod constants { use super::Weight; - pub const WEIGHT_PER_SECOND: Weight = 1_000_000_000_000; - pub const WEIGHT_PER_MILLIS: Weight = WEIGHT_PER_SECOND / 1000; // 1_000_000_000 - pub const WEIGHT_PER_MICROS: Weight = WEIGHT_PER_MILLIS / 1000; // 1_000_000 - pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 + pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); + pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); + pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); + pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); // Expose the Block and Extrinsic base weights. pub use super::{block_weights::BlockExecutionWeight, extrinsic_weights::ExtrinsicBaseWeight}; @@ -204,6 +204,12 @@ impl Default for Pays { } } +impl From for PostDispatchInfo { + fn from(pays_fee: Pays) -> Self { + Self { actual_weight: None, pays_fee } + } +} + /// A generalized group of dispatch types. /// /// NOTE whenever upgrading the enum make sure to also update @@ -359,25 +365,6 @@ pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &Dispa .pays_fee(info) } -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (actual_weight, pays_fee) = post_weight_info; - Self { actual_weight, pays_fee } - } -} - -impl From for PostDispatchInfo { - fn from(pays_fee: Pays) -> Self { - Self { actual_weight: None, pays_fee } - } -} - -impl From> for PostDispatchInfo { - fn from(actual_weight: Option) -> Self { - Self { actual_weight, pays_fee: Default::default() } - } -} - impl From<()> for PostDispatchInfo { fn from(_: ()) -> Self { Self { actual_weight: None, pays_fee: Default::default() } @@ -407,7 +394,7 @@ pub trait WithPostDispatchInfo { /// # Example /// /// ```ignore - /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100)))?; /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); /// ``` fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; @@ -428,78 +415,6 @@ where } } -impl WeighData for Weight { - fn weigh_data(&self, _: T) -> Weight { - *self - } -} - -impl ClassifyDispatch for Weight { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for Weight { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, DispatchClass, Pays) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (Weight, DispatchClass) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, Pays) { - fn weigh_data(&self, _: T) -> Weight { - self.0 - } -} - -impl ClassifyDispatch for (Weight, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (Weight, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - /// Implementation for unchecked extrinsic. impl GetDispatchInfo for UncheckedExtrinsic @@ -527,30 +442,37 @@ where impl GetDispatchInfo for sp_runtime::testing::TestXt { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. - DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::Yes, ..Default::default() } + DispatchInfo { + weight: Weight::from_ref_time(self.encode().len() as _), + pays_fee: Pays::Yes, + ..Default::default() + } } } /// The weight of database operations that the runtime can invoke. +/// +/// NOTE: This is currently only measured in computational time, and will probably +/// be updated all together once proof size is accounted for. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct RuntimeDbWeight { - pub read: Weight, - pub write: Weight, + pub read: RefTimeWeight, + pub write: RefTimeWeight, } impl RuntimeDbWeight { - pub fn reads(self, r: Weight) -> Weight { - self.read.saturating_mul(r) + pub fn reads(self, r: u64) -> Weight { + Weight::from_ref_time(self.read.saturating_mul(r)) } - pub fn writes(self, w: Weight) -> Weight { - self.write.saturating_mul(w) + pub fn writes(self, w: u64) -> Weight { + Weight::from_ref_time(self.write.saturating_mul(w)) } - pub fn reads_writes(self, r: Weight, w: Weight) -> Weight { + pub fn reads_writes(self, r: u64, w: u64) -> Weight { let read_weight = self.read.saturating_mul(r); let write_weight = self.write.saturating_mul(w); - read_weight.saturating_add(write_weight) + Weight::from_ref_time(read_weight.saturating_add(write_weight)) } } @@ -618,7 +540,8 @@ where Self::polynomial() .iter() .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); + let w = Self::Balance::saturated_from(weight.ref_time()) + .saturating_pow(args.degree.into()); // The sum could get negative. Therefore we only sum with the accumulator. // The Perbill Mul implementation is non overflowing. @@ -648,7 +571,7 @@ where type Balance = T; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) } } @@ -671,7 +594,7 @@ where type Balance = T; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight).saturating_mul(M::get()) + Self::Balance::saturated_from(weight.ref_time()).saturating_mul(M::get()) } } @@ -727,7 +650,7 @@ impl PerDispatchClass { impl PerDispatchClass { /// Returns the total weight consumed by all extrinsics in the block. pub fn total(&self) -> Weight { - let mut sum = 0; + let mut sum = Weight::new(); for class in DispatchClass::all() { sum = sum.saturating_add(*self.get(*class)); } @@ -744,7 +667,7 @@ impl PerDispatchClass { /// occur. pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { let value = self.get_mut(class); - *value = value.checked_add(weight).ok_or(())?; + *value = value.checked_add(&weight).ok_or(())?; Ok(()) } @@ -804,16 +727,16 @@ mod tests { fn f03(_origin) { unimplemented!(); } // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + #[weight = ((_a * 10 + _eb * 1) as RefTimeWeight, DispatchClass::Normal, Pays::Yes)] fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } #[weight = (0, DispatchClass::Operational, Pays::Yes)] fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_ref_time(10_000)] fn f20(_origin) { unimplemented!(); } - #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_ref_time(40_000)] fn f21(_origin) { unimplemented!(); } } @@ -823,80 +746,98 @@ mod tests { fn weights_are_correct() { // #[weight = 1000] let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, DispatchClass::Mandatory)] let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Mandatory); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (1000, Pays::No)] let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::No); // #[weight = (1000, DispatchClass::Operational, Pays::No)] let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, 1000); + assert_eq!(info.weight, Weight::from_ref_time(1000)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::No); // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, 150); // 13*10 + 20 + assert_eq!(info.weight, Weight::from_ref_time(150)); // 13*10 + 20 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = (0, DispatchClass::Operational, Pays::Yes)] let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, 0); + assert_eq!(info.weight, Weight::from_ref_time(0)); assert_eq!(info.class, DispatchClass::Operational); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, 12300); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.weight, Weight::from_ref_time(12300)); // 100*3 + 1000*2 + 10_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, 45600); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.weight, Weight::from_ref_time(45600)); // 100*6 + 1000*5 + 40_1000 assert_eq!(info.class, DispatchClass::Normal); assert_eq!(info.pays_fee, Pays::Yes); } #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: 1000, ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), 7); - assert_eq!(extract_actual_weight(&Ok(Some(1000).into()), &pre), 1000); - assert_eq!(extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), 9); + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_ref_time(7)); + assert_eq!( + extract_actual_weight(&Ok(Some(1000).into()), &pre), + Weight::from_ref_time(1000) + ); + assert_eq!( + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), + Weight::from_ref_time(9) + ); } #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: 1000, ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(1250).into()), &pre), 1000); + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; assert_eq!( - extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(1300)), &pre), - 1000 + extract_actual_weight(&Ok(Some(1250).into()), &pre), + Weight::from_ref_time(1000) + ); + assert_eq!( + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(1300))), + &pre + ), + Weight::from_ref_time(1000), ); } #[test] fn extract_actual_pays_fee_works() { - let pre = DispatchInfo { weight: 1000, ..Default::default() }; + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); assert_eq!( - extract_actual_pays_fee(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), + extract_actual_pays_fee( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), Pays::Yes ); assert_eq!( @@ -910,7 +851,11 @@ mod tests { Pays::No ); - let pre = DispatchInfo { weight: 1000, pays_fee: Pays::No, ..Default::default() }; + let pre = DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::No, + ..Default::default() + }; assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); @@ -956,40 +901,52 @@ mod tests { #[test] fn polynomial_works() { // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 - assert_eq!(Poly::weight_to_fee(&100), 514033); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(100)), 514033); // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 - assert_eq!(Poly::weight_to_fee(&10_123), 518917034928); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10_123)), 518917034928); } #[test] fn polynomial_does_not_underflow() { - assert_eq!(Poly::weight_to_fee(&0), 0); - assert_eq!(Poly::weight_to_fee(&10), 0); + assert_eq!(Poly::weight_to_fee(&Weight::zero()), 0); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10)), 0); } #[test] fn polynomial_does_not_overflow() { - assert_eq!(Poly::weight_to_fee(&Weight::max_value()), Balance::max_value() - 10_000); + assert_eq!(Poly::weight_to_fee(&Weight::MAX), Balance::max_value() - 10_000); } #[test] fn identity_fee_works() { - assert_eq!(IdentityFee::::weight_to_fee(&0), 0); - assert_eq!(IdentityFee::::weight_to_fee(&50), 50); - assert_eq!( - IdentityFee::::weight_to_fee(&Weight::max_value()), - Balance::max_value() - ); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::zero()), 0); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::from_ref_time(50)), 50); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::MAX), Balance::max_value()); } #[test] fn constant_fee_works() { use crate::traits::ConstU128; - assert_eq!(ConstantMultiplier::>::weight_to_fee(&0), 0); - assert_eq!(ConstantMultiplier::>::weight_to_fee(&50), 500); - assert_eq!(ConstantMultiplier::>::weight_to_fee(&16), 16384); assert_eq!( - ConstantMultiplier::>::weight_to_fee(&2), + ConstantMultiplier::>::weight_to_fee(&Weight::zero()), + 0 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 50 + )), + 500 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 16 + )), + 16384 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee( + &Weight::from_ref_time(2) + ), u128::MAX ); } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index b86334514af2f..93a80d12db96b 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 5_489_273 /// 95th: 5_433_314 /// 75th: 5_354_812 - pub const BlockExecutionWeight: Weight = 5_346_284 * WEIGHT_PER_NANOS; + pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(5_346_284); } #[cfg(test)] diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index b8a52c164d8fe..d223eb7c10efc 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 86_924 /// 95th: 86_828 /// 75th: 86_347 - pub const ExtrinsicBaseWeight: Weight = 86_298 * WEIGHT_PER_NANOS; + pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(86_298); } #[cfg(test)] diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index 572187ba78a92..f498991729d7a 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -25,8 +25,8 @@ pub mod constants { /// ParityDB can be enabled with a feature flag, but is still experimental. These weights /// are available for brave runtime engineers who may want to try this out as default. pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_PER_NANOS, - write: 50_000 * constants::WEIGHT_PER_NANOS, + read: 8_000 * constants::WEIGHT_PER_NANOS.ref_time(), + write: 50_000 * constants::WEIGHT_PER_NANOS.ref_time(), }; } diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index f37964dcbd825..67571c4723f94 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -25,8 +25,8 @@ pub mod constants { /// By default, Substrate uses RocksDB, so this will be the weight used throughout /// the runtime. pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_PER_NANOS, - write: 100_000 * constants::WEIGHT_PER_NANOS, + read: 25_000 * constants::WEIGHT_PER_NANOS.ref_time(), + write: 100_000 * constants::WEIGHT_PER_NANOS.ref_time(), }; } diff --git a/frame/support/src/weights/weight_v2.rs b/frame/support/src/weights/weight_v2.rs new file mode 100644 index 0000000000000..d02aac3268838 --- /dev/null +++ b/frame/support/src/weights/weight_v2.rs @@ -0,0 +1,483 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; +use sp_runtime::{ + traits::{Bounded, CheckedAdd, CheckedSub, One, Zero}, + Perquintill, RuntimeDebug, +}; + +use super::*; + +/// The unit of measurement for computational time spent when executing runtime logic on reference +/// hardware. +pub type RefTimeWeight = u64; + +#[derive( + Encode, + Decode, + MaxEncodedLen, + TypeInfo, + Eq, + PartialEq, + Copy, + Clone, + RuntimeDebug, + Default, + Ord, + PartialOrd, + CompactAs, +)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct Weight { + /// The weight of computational time used based on some reference hardware. + ref_time: RefTimeWeight, +} + +impl Weight { + /// Create a new Weight with zero. + pub const fn new() -> Self { + Self { ref_time: 0 } + } + + /// Set the reference time part of the weight. + pub const fn set_ref_time(mut self, c: RefTimeWeight) -> Self { + self.ref_time = c; + self + } + + /// Return the reference time part of the weight. + pub const fn ref_time(&self) -> RefTimeWeight { + self.ref_time + } + + /// Return a mutable reference time part of the weight. + pub fn ref_time_mut(&mut self) -> &mut RefTimeWeight { + &mut self.ref_time + } + + pub const MAX: Self = Self { ref_time: RefTimeWeight::MAX }; + + /// Get the conservative min of `self` and `other` weight. + pub fn min(&self, other: Self) -> Self { + Self { ref_time: self.ref_time.min(other.ref_time) } + } + + /// Get the aggressive max of `self` and `other` weight. + pub fn max(&self, other: Self) -> Self { + Self { ref_time: self.ref_time.max(other.ref_time) } + } + + /// Try to add some `other` weight while upholding the `limit`. + pub fn try_add(&self, other: &Self, limit: &Self) -> Option { + let total = self.checked_add(other)?; + if total.ref_time > limit.ref_time { + None + } else { + Some(total) + } + } + + /// Construct with reference time weight. + pub const fn from_ref_time(ref_time: RefTimeWeight) -> Self { + Self { ref_time } + } + + pub fn checked_mul(self, rhs: u64) -> Option { + let ref_time = self.ref_time.checked_mul(rhs)?; + Some(Self { ref_time }) + } + + pub fn checked_div(self, rhs: u64) -> Option { + let ref_time = self.ref_time.checked_div(rhs)?; + Some(Self { ref_time }) + } + + pub const fn scalar_saturating_mul(self, rhs: u64) -> Self { + Self { ref_time: self.ref_time.saturating_mul(rhs) } + } + + pub const fn scalar_div(self, rhs: u64) -> Self { + Self { ref_time: self.ref_time / rhs } + } +} + +impl Zero for Weight { + fn zero() -> Self { + Self::zero() + } + + fn is_zero(&self) -> bool { + self.ref_time == 0 + } +} + +impl One for Weight { + fn one() -> Self { + Self::one() + } +} + +impl Add for Weight { + type Output = Self; + fn add(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time + rhs.ref_time } + } +} + +impl Sub for Weight { + type Output = Self; + fn sub(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time - rhs.ref_time } + } +} + +impl Mul for Weight { + type Output = Self; + fn mul(self, b: Self) -> Self { + Self { ref_time: b.ref_time * self.ref_time } + } +} + +impl Mul for Weight +where + T: Mul + Copy, +{ + type Output = Self; + fn mul(self, b: T) -> Self { + Self { ref_time: b * self.ref_time } + } +} + +impl Mul for Perbill { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Mul for Perquintill { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Mul for u64 { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } +} + +impl Div for Weight +where + u64: Div, + T: Copy, +{ + type Output = Self; + fn div(self, b: T) -> Self { + Self { ref_time: self.ref_time / b } + } +} + +impl Saturating for Weight { + fn saturating_add(self, rhs: Self) -> Self { + self.saturating_add(rhs) + } + + fn saturating_sub(self, rhs: Self) -> Self { + self.saturating_sub(rhs) + } + + fn saturating_mul(self, rhs: Self) -> Self { + self.saturating_mul(rhs) + } + + fn saturating_pow(self, exp: usize) -> Self { + self.saturating_pow(exp) + } +} + +impl CheckedAdd for Weight { + fn checked_add(&self, rhs: &Self) -> Option { + self.checked_add(rhs) + } +} + +impl CheckedSub for Weight { + fn checked_sub(&self, rhs: &Self) -> Option { + self.checked_sub(rhs) + } +} + +impl PaysFee for (Weight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (Weight, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl WeighData for (Weight, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (Weight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (Weight, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (Weight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (actual_weight, pays_fee) = post_weight_info; + Self { actual_weight, pays_fee } + } +} + +impl From> for PostDispatchInfo { + fn from(actual_weight: Option) -> Self { + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl WeighData for Weight { + fn weigh_data(&self, _: T) -> Weight { + return *self + } +} + +impl ClassifyDispatch for Weight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for Weight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl ClassifyDispatch for (Weight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl core::fmt::Display for Weight { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "Weight(ref_time: {})", self.ref_time) + } +} + +impl Bounded for Weight { + fn min_value() -> Self { + Zero::zero() + } + fn max_value() -> Self { + Self::MAX + } +} + +impl AddAssign for Weight { + fn add_assign(&mut self, other: Self) { + *self = Self { ref_time: self.ref_time + other.ref_time }; + } +} + +impl SubAssign for Weight { + fn sub_assign(&mut self, other: Self) { + *self = Self { ref_time: self.ref_time - other.ref_time }; + } +} + +impl sp_runtime::traits::Printable for Weight { + fn print(&self) { + self.ref_time().print() + } +} + +// Re-export common functions so you do not need to import trait. +impl Weight { + pub const fn zero() -> Self { + Self { ref_time: 0 } + } + + pub const fn one() -> Self { + Self { ref_time: 1 } + } + + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + } + + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + } + + pub const fn saturating_mul(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_mul(rhs.ref_time) } + } + + pub const fn saturating_pow(self, exp: usize) -> Self { + Self { ref_time: self.ref_time.saturating_pow(exp as u32) } + } + + pub const fn checked_add(&self, rhs: &Self) -> Option { + match self.ref_time.checked_add(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + pub const fn checked_sub(&self, rhs: &Self) -> Option { + match self.ref_time.checked_sub(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } +} + +// TODO: Eventually remove these + +impl From> for PostDispatchInfo { + fn from(maybe_actual_computation: Option) -> Self { + let actual_weight = match maybe_actual_computation { + Some(actual_computation) => Some(Weight::new().set_ref_time(actual_computation)), + None => None, + }; + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (maybe_actual_time, pays_fee) = post_weight_info; + let actual_weight = match maybe_actual_time { + Some(actual_time) => Some(Weight::new().set_ref_time(actual_time)), + None => None, + }; + Self { actual_weight, pays_fee } + } +} + +impl WeighData for RefTimeWeight { + fn weigh_data(&self, _: T) -> Weight { + return Weight::new().set_ref_time(*self) + } +} + +impl ClassifyDispatch for RefTimeWeight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for RefTimeWeight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (RefTimeWeight, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (RefTimeWeight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (RefTimeWeight, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (RefTimeWeight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (RefTimeWeight, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (RefTimeWeight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (RefTimeWeight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +// END TODO diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index d388127f29abd..aed98579a0fd8 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -503,17 +503,25 @@ fn call_encode_is_correct_and_decode_works() { fn call_weight_should_attach_to_call_enum() { use frame_support::{ dispatch::{DispatchInfo, GetDispatchInfo}, - weights::{DispatchClass, Pays}, + weights::{DispatchClass, Pays, Weight}, }; // operational. assert_eq!( module3::Call::::operational {}.get_dispatch_info(), - DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Operational, + pays_fee: Pays::Yes + }, ); // custom basic assert_eq!( module3::Call::::aux_4 {}.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes }, + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + }, ); } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 497b7bb04c36a..907a52f1184cc 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -23,7 +23,7 @@ use frame_support::{ ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight, Weight}, }; use scale_info::{meta_type, TypeInfo}; use sp_io::{ @@ -165,7 +165,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_: BlockNumberFor) { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -176,7 +176,7 @@ pub mod pallet { let _ = T::AccountId::from(SomeType1); // Test for where clause let _ = T::AccountId::from(SomeType2); // Test for where clause Self::deposit_event(Event::Something(30)); - 30 + Weight::from_ref_time(30) } fn integrity_test() { let _ = T::AccountId::from(SomeType1); // Test for where clause @@ -190,7 +190,7 @@ pub mod pallet { T::AccountId: From + From + SomeAssociation1, { /// Doc comment put in metadata - #[pallet::weight(Weight::from(*_foo))] + #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -492,14 +492,14 @@ pub mod pallet2 { { fn on_initialize(_: BlockNumberFor) -> Weight { Self::deposit_event(Event::Something(11)); - 0 + Weight::zero() } fn on_finalize(_: BlockNumberFor) { Self::deposit_event(Event::Something(21)); } fn on_runtime_upgrade() -> Weight { Self::deposit_event(Event::Something(31)); - 0 + Weight::zero() } } @@ -668,7 +668,11 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3, bar: 0 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: frame_support::weights::Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -1046,10 +1050,10 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 10); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(10)); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 30); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(30)); assert_eq!( frame_system::Pallet::::events()[0].event, @@ -1085,10 +1089,16 @@ fn all_pallets_type_reversed_order_is_correct() { #[allow(deprecated)] { - assert_eq!(AllPalletsWithoutSystemReversed::on_initialize(1), 10); + assert_eq!( + AllPalletsWithoutSystemReversed::on_initialize(1), + Weight::from_ref_time(10) + ); AllPalletsWithoutSystemReversed::on_finalize(1); - assert_eq!(AllPalletsWithoutSystemReversed::on_runtime_upgrade(), 30); + assert_eq!( + AllPalletsWithoutSystemReversed::on_runtime_upgrade(), + Weight::from_ref_time(30) + ); } assert_eq!( @@ -1155,7 +1165,7 @@ fn migrate_from_pallet_version_to_storage_version() { >(&db_weight); // 4 pallets, 2 writes and every write costs 5 weight. - assert_eq!(4 * 2 * 5, weight); + assert_eq!(Weight::from_ref_time(4 * 2 * 5), weight); // All pallet versions should be removed assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 9327f5b6a3304..4d597e24356c7 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -31,7 +31,10 @@ impl SomeAssociation for u64 { mod pallet_old { use super::SomeAssociation; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + decl_error, decl_event, decl_module, decl_storage, + traits::Get, + weights::{RefTimeWeight, Weight}, + Parameter, }; use frame_system::ensure_root; @@ -40,7 +43,7 @@ mod pallet_old { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + SomeAssociation; type Event: From> + Into<::Event>; @@ -75,7 +78,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -85,7 +88,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -113,7 +116,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + SomeAssociation @@ -131,7 +134,7 @@ pub mod pallet { impl Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -141,7 +144,7 @@ pub mod pallet { #[pallet::call] impl Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 3de45df223674..2fd6833eaa428 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -23,13 +23,16 @@ use frame_support::traits::{ConstU32, ConstU64}; mod pallet_old { use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, + decl_error, decl_event, decl_module, decl_storage, + traits::Get, + weights::{RefTimeWeight, Weight}, + Parameter, }; use frame_system::ensure_root; pub trait Config: frame_system::Config { type SomeConst: Get; - type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Balance: Parameter + codec::HasCompact + From + Into + Default; type Event: From> + Into<::Event>; } @@ -62,7 +65,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -72,7 +75,7 @@ mod pallet_old { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -99,7 +102,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + scale_info::StaticTypeInfo; @@ -116,7 +119,7 @@ pub mod pallet { impl, I: 'static> Hooks for Pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { >::put(T::Balance::from(10)); - 10 + Weight::from_ref_time(10) } fn on_finalize(_n: T::BlockNumber) { @@ -126,7 +129,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index f4ef5f802c44f..2ae910e73d87e 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -54,10 +54,10 @@ pub mod pallet { fn on_initialize(_: BlockNumberFor) -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(10)); - 10 + Weight::from_ref_time(10) } else { Self::deposit_event(Event::Something(11)); - 11 + Weight::from_ref_time(11) } } fn on_finalize(_: BlockNumberFor) { @@ -70,10 +70,10 @@ pub mod pallet { fn on_runtime_upgrade() -> Weight { if TypeId::of::() == TypeId::of::<()>() { Self::deposit_event(Event::Something(30)); - 30 + Weight::from_ref_time(30) } else { Self::deposit_event(Event::Something(31)); - 31 + Weight::from_ref_time(31) } } fn integrity_test() {} @@ -82,7 +82,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { /// Doc comment put in metadata - #[pallet::weight(Weight::from(*_foo))] + #[pallet::weight(Weight::from_ref_time(*_foo as u64))] pub fn foo( origin: OriginFor, #[pallet::compact] _foo: u32, @@ -345,12 +345,18 @@ frame_support::construct_runtime!( } ); +use frame_support::weights::Weight; + #[test] fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!(pallet::Call::::get_call_names(), &["foo", "foo_storage_layer"]); @@ -358,7 +364,11 @@ fn call_expand() { let call_foo = pallet::Call::::foo { foo: 3 }; assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } + DispatchInfo { + weight: Weight::from_ref_time(3), + class: DispatchClass::Normal, + pays_fee: Pays::Yes + } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -651,10 +661,10 @@ fn pallet_hooks_expand() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - assert_eq!(AllPalletsWithoutSystem::on_initialize(1), 21); + assert_eq!(AllPalletsWithoutSystem::on_initialize(1), Weight::from_ref_time(21)); AllPalletsWithoutSystem::on_finalize(1); - assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), 61); + assert_eq!(AllPalletsWithoutSystem::on_runtime_upgrade(), Weight::from_ref_time(61)); assert_eq!( frame_system::Pallet::::events()[0].event, diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 45cdfad67b8ae..e674e49eddbe5 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 158 others + and 159 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index d7441e8b18562..ecdc18263432e 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 158 others + and 159 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 6c49e1220a3a5..d9cd20711403d 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 75 others + and 76 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9a0731683a953..9a4e8d740cb2c 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 75 others + and 76 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 7ed8454668327..6415c3c0d2c07 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -54,7 +54,7 @@ frame_support::decl_module! { } fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - 0 + frame_support::weights::Weight::zero() } } } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 0bc34fcbc5be2..017298dab3928 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -16,7 +16,10 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + traits::{ConstU32, ConstU64}, + weights::Weight, +}; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -61,7 +64,7 @@ frame_support::construct_runtime!( frame_support::parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::with_sensible_defaults( - 4 * 1024 * 1024, Perbill::from_percent(75), + Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75), ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength::max_with_normal_ratio( diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 5090093fe168f..59a6e14aca175 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -101,7 +101,7 @@ impl SignedExtension for CheckMortality { mod tests { use super::*; use crate::mock::{new_test_ext, System, Test, CALL}; - use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; + use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use sp_core::H256; #[test] @@ -126,8 +126,11 @@ mod tests { #[test] fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let len = 0_usize; let ext = ( crate::CheckWeight::::new(), diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index b59c36ecb53b5..b7232c430696d 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -19,7 +19,7 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Decode, Encode}; use frame_support::{ traits::Get, - weights::{DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::{DispatchClass, DispatchInfo, PostDispatchInfo, Weight}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -238,7 +238,7 @@ where } let unspent = post_info.calc_unspent(info); - if unspent > 0 { + if unspent > Weight::zero() { crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) @@ -297,7 +297,7 @@ mod tests { fn check(call: impl FnOnce(&DispatchInfo, usize)) { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: Weight::max_value(), + weight: Weight::MAX, class: DispatchClass::Mandatory, ..Default::default() }; @@ -309,7 +309,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); - assert_eq!(System::block_weight().total(), Weight::max_value()); + assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total() > block_weight_limit()); }); check(|max, len| { @@ -321,7 +321,8 @@ mod tests { fn normal_extrinsic_limited_by_maximum_extrinsic_weight() { new_test_ext().execute_with(|| { let max = DispatchInfo { - weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + 1, + weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + + Weight::one(), class: DispatchClass::Normal, ..Default::default() }; @@ -347,7 +348,7 @@ mod tests { let okay = DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { - weight: weight + 1, + weight: weight + Weight::one(), class: DispatchClass::Operational, ..Default::default() }; @@ -364,8 +365,8 @@ mod tests { #[test] fn register_extra_weight_unchecked_doesnt_care_about_limits() { new_test_ext().execute_with(|| { - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Normal); - assert_eq!(System::block_weight().total(), Weight::max_value()); + System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); + assert_eq!(System::block_weight().total(), Weight::MAX); assert!(System::block_weight().total() > block_weight_limit()); }); } @@ -378,9 +379,10 @@ mod tests { // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 256 to produce a full block (-5 for base) - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -388,9 +390,9 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), 768); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), 1024); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); assert_eq!(System::block_weight().total(), block_weight_limit()); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); @@ -401,9 +403,10 @@ mod tests { fn dispatch_order_does_not_effect_weight_logic() { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -412,9 +415,9 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); // Extra 15 here from block execution + base extrinsic weight - assert_eq!(System::block_weight().total(), 266); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), 1024); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); assert_eq!(System::block_weight().total(), block_weight_limit()); }); } @@ -423,11 +426,14 @@ mod tests { fn operational_works_on_full_block() { new_test_ext().execute_with(|| { // An on_initialize takes up the whole block! (Every time!) - System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); - let dispatch_normal = - DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; + System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Mandatory); + let dispatch_normal = DispatchInfo { + weight: Weight::from_ref_time(251), + class: DispatchClass::Normal, + ..Default::default() + }; let dispatch_operational = DispatchInfo { - weight: 251, + weight: Weight::from_ref_time(251), class: DispatchClass::Operational, ..Default::default() }; @@ -453,9 +459,9 @@ mod tests { #[test] fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, ..Default::default() }; + let normal = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -489,7 +495,7 @@ mod tests { fn signed_ext_check_weight_block_size_works() { new_test_ext().execute_with(|| { let normal = DispatchInfo::default(); - let normal_limit = normal_weight_limit() as usize; + let normal_limit = normal_weight_limit().ref_time() as usize; let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); @@ -505,8 +511,11 @@ mod tests { reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. - let op = - DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: Weight::zero(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; reset_check_weight(&op, normal_limit, false); reset_check_weight(&op, normal_limit + 100, false); reset_check_weight(&op, 1024, false); @@ -518,12 +527,14 @@ mod tests { fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: 100, ..Default::default() }; + let small = DispatchInfo { weight: Weight::from_ref_time(100), ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; let medium = DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; - let big = - DispatchInfo { weight: normal_limit - base_extrinsic + 1, ..Default::default() }; + let big = DispatchInfo { + weight: normal_limit - base_extrinsic + Weight::one(), + ..Default::default() + }; let len = 0_usize; let reset_check_weight = |i, f, s| { @@ -538,9 +549,9 @@ mod tests { } }; - reset_check_weight(&small, false, 0); - reset_check_weight(&medium, false, 0); - reset_check_weight(&big, true, 1); + reset_check_weight(&small, false, Weight::zero()); + reset_check_weight(&medium, false, Weight::zero()); + reset_check_weight(&big, true, Weight::one()); }) } @@ -548,20 +559,26 @@ mod tests { fn signed_ext_check_weight_refund_works() { new_test_ext().execute_with(|| { // This is half of the max block weight - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = - PostDispatchInfo { actual_weight: Some(128), pays_fee: Default::default() }; + let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(128)), + pays_fee: Default::default(), + }; let len = 0_usize; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; // We allow 75% for normal transaction, so we put 25% - extrinsic base weight BlockWeight::::mutate(|current_weight| { - current_weight.set(0, DispatchClass::Mandatory); - current_weight.set(256 - base_extrinsic, DispatchClass::Normal); + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight + .set(Weight::from_ref_time(256) - base_extrinsic, DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(BlockWeight::::get().total(), info.weight + 256); + assert_eq!( + BlockWeight::::get().total(), + info.weight + Weight::from_ref_time(256) + ); assert_ok!(CheckWeight::::post_dispatch( Some(pre), @@ -570,27 +587,34 @@ mod tests { len, &Ok(()) )); - assert_eq!(BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256); + assert_eq!( + BlockWeight::::get().total(), + post_info.actual_weight.unwrap() + Weight::from_ref_time(256) + ); }) } #[test] fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = - PostDispatchInfo { actual_weight: Some(700), pays_fee: Default::default() }; + let info = DispatchInfo { weight: Weight::from_ref_time(512), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(700)), + pays_fee: Default::default(), + }; let len = 0_usize; BlockWeight::::mutate(|current_weight| { - current_weight.set(0, DispatchClass::Mandatory); - current_weight.set(128, DispatchClass::Normal); + current_weight.set(Weight::zero(), DispatchClass::Mandatory); + current_weight.set(Weight::from_ref_time(128), DispatchClass::Normal); }); let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!( BlockWeight::::get().total(), - info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); assert_ok!(CheckWeight::::post_dispatch( @@ -602,7 +626,9 @@ mod tests { )); assert_eq!( BlockWeight::::get().total(), - info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, + info.weight + + Weight::from_ref_time(128) + + block_weights().get(DispatchClass::Normal).base_extrinsic, ); }) } @@ -611,7 +637,7 @@ mod tests { fn zero_weight_extrinsic_still_has_base_weight() { new_test_ext().execute_with(|| { let weights = block_weights(); - let free = DispatchInfo { weight: 0, ..Default::default() }; + let free = DispatchInfo { weight: Weight::zero(), ..Default::default() }; let len = 0_usize; // Initial weight from `weights.base_block` @@ -630,9 +656,10 @@ mod tests { // Max block is 1024 // Max normal is 768 (75%) // Max mandatory is unlimited - let max_normal = DispatchInfo { weight: 753, ..Default::default() }; + let max_normal = + DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let mandatory = DispatchInfo { - weight: 1019, + weight: Weight::from_ref_time(1019), class: DispatchClass::Mandatory, ..Default::default() }; @@ -640,10 +667,10 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(System::block_weight().total(), 768); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), 1024); - assert_eq!(System::block_weight().total(), 1024 + 768); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); + assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); } @@ -652,30 +679,36 @@ mod tests { fn no_max_total_should_still_be_limited_by_max_block() { // given let maximum_weight = BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { - w.base_extrinsic = 0; - w.max_total = Some(20); + w.base_extrinsic = Weight::zero(); + w.max_total = Some(Weight::from_ref_time(20)); }) .for_class(DispatchClass::Mandatory, |w| { - w.base_extrinsic = 0; - w.reserved = Some(5); + w.base_extrinsic = Weight::zero(); + w.reserved = Some(Weight::from_ref_time(5)); w.max_total = None; }) .build_or_panic(); let all_weight = crate::ConsumedWeight::new(|class| match class { - DispatchClass::Normal => 10, - DispatchClass::Operational => 10, - DispatchClass::Mandatory => 0, + DispatchClass::Normal => Weight::from_ref_time(10), + DispatchClass::Operational => Weight::from_ref_time(10), + DispatchClass::Mandatory => Weight::zero(), }); assert_eq!(maximum_weight.max_block, all_weight.total()); // fits into reserved - let mandatory1 = - DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory1 = DispatchInfo { + weight: Weight::from_ref_time(5), + class: DispatchClass::Mandatory, + ..Default::default() + }; // does not fit into reserved and the block is full. - let mandatory2 = - DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory2 = DispatchInfo { + weight: Weight::from_ref_time(6), + class: DispatchClass::Mandatory, + ..Default::default() + }; // when assert_ok!(calculate_consumed_weight::<::Call>( diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 990e3cdd14de1..d9afa17eec959 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1327,18 +1327,18 @@ impl Pallet { ).deconstruct(), Self::block_weight().get(DispatchClass::Normal), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Normal), - T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Normal).ref_time(), + T::BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), Self::block_weight().get(DispatchClass::Operational), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Operational), - T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Operational).ref_time(), + T::BlockWeights::get().get(DispatchClass::Operational).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), Self::block_weight().get(DispatchClass::Mandatory), sp_runtime::Percent::from_rational( - *Self::block_weight().get(DispatchClass::Mandatory), - T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()) + Self::block_weight().get(DispatchClass::Mandatory).ref_time(), + T::BlockWeights::get().get(DispatchClass::Mandatory).max_total.unwrap_or(Bounded::max_value()).ref_time() ).deconstruct(), ); ExecutionPhase::::kill(); diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index 6076414ba6bcb..d9be460b3a4fb 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -27,7 +27,7 @@ use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; use scale_info::TypeInfo; -use sp_runtime::{Perbill, RuntimeDebug}; +use sp_runtime::{traits::Bounded, Perbill, RuntimeDebug}; /// Block length limit configuration. #[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode, TypeInfo)] @@ -230,14 +230,15 @@ impl BlockWeights { // base_for_class error_assert!( (max_for_class > self.base_block && max_for_class > base_for_class) - || max_for_class == 0, + || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), + weights.max_extrinsic.unwrap_or(Weight::zero()) <= + max_for_class.saturating_sub(base_for_class), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -246,14 +247,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value) > 0, + weights.max_extrinsic.unwrap_or_else(Weight::max_value) > Weight::zero(), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved > base_for_class || reserved == 0, + reserved > base_for_class || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -262,7 +263,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block >= weights.max_total.unwrap_or(0), + self.max_block >= weights.max_total.unwrap_or(Weight::zero()), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -294,9 +295,9 @@ impl BlockWeights { /// is not suitable for production deployments. pub fn simple_max(block_weight: Weight) -> Self { Self::builder() - .base_block(0) + .base_block(Weight::new()) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = 0; + weights.base_extrinsic = Weight::new(); }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = block_weight.into(); @@ -333,9 +334,10 @@ impl BlockWeights { BlockWeightsBuilder { weights: BlockWeights { base_block: constants::BlockExecutionWeight::get(), - max_block: 0, + max_block: Weight::zero(), per_class: PerDispatchClass::new(|class| { - let initial = if class == DispatchClass::Mandatory { None } else { Some(0) }; + let initial = + if class == DispatchClass::Mandatory { None } else { Some(Weight::zero()) }; WeightsPerClass { base_extrinsic: constants::ExtrinsicBaseWeight::get(), max_extrinsic: None, diff --git a/frame/system/src/migrations/mod.rs b/frame/system/src/migrations/mod.rs index f02af7a316fe1..15746d7376ac5 100644 --- a/frame/system/src/migrations/mod.rs +++ b/frame/system/src/migrations/mod.rs @@ -81,7 +81,7 @@ pub fn migrate_from_single_u8_to_triple_ref_count() -> Wei ); >::put(true); >::put(true); - Weight::max_value() + Weight::MAX } /// Migrate from unique `u32` reference counting to triple `u32` reference counting. @@ -99,7 +99,7 @@ pub fn migrate_from_single_to_triple_ref_count() -> Weight translated ); >::put(true); - Weight::max_value() + Weight::MAX } /// Migrate from dual `u32` reference counting to triple `u32` reference counting. @@ -117,5 +117,5 @@ pub fn migrate_from_dual_to_triple_ref_count() -> Weight { translated ); >::put(true); - Weight::max_value() + Weight::MAX } diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index f3f542aa83a9a..23ab3c2af20b0 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -42,7 +42,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = 1024; +const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { @@ -60,9 +60,9 @@ parameter_types! { write: 100, }; pub RuntimeBlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(10) + .base_block(Weight::from_ref_time(10)) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = 5; + weights.base_extrinsic = Weight::from_ref_time(5); }) .for_class(DispatchClass::Normal, |weights| { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 417dca12045ee..f82ea338bd146 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -224,7 +224,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); - let pre_info = DispatchInfo { weight: 1000, ..Default::default() }; + let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); System::note_applied_extrinsic( @@ -236,7 +236,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::note_applied_extrinsic(&Ok(Pays::No.into()), pre_info); System::note_applied_extrinsic(&Ok((Some(2_500_000), Pays::No).into()), pre_info); System::note_applied_extrinsic(&Ok((Some(500), Pays::No).into()), pre_info); - System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); + System::note_applied_extrinsic( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(999))), + pre_info, + ); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { @@ -247,14 +250,20 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { ); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::Yes }, + post_info: PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(800)), + pays_fee: Pays::Yes, + }, error: DispatchError::BadOrigin, }), pre_info, ); System::note_applied_extrinsic( &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: Some(800), pays_fee: Pays::No }, + post_info: PostDispatchInfo { + actual_weight: Some(Weight::from_ref_time(800)), + pays_fee: Pays::No, + }, error: DispatchError::BadOrigin, }), pre_info, @@ -266,7 +275,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 300, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300), + ..Default::default() + }, } .into(), topics: vec![] @@ -274,7 +286,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + ..Default::default() + }, } .into(), topics: vec![] @@ -282,7 +297,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { weight: 1000, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000), + ..Default::default() + }, } .into(), topics: vec![] @@ -291,7 +309,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(3), event: SysEvent::ExtrinsicSuccess { dispatch_info: DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), pays_fee: Pays::Yes, ..Default::default() }, @@ -303,7 +321,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(4), event: SysEvent::ExtrinsicSuccess { dispatch_info: DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), pays_fee: Pays::No, ..Default::default() }, @@ -315,7 +333,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(5), event: SysEvent::ExtrinsicSuccess { dispatch_info: DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), pays_fee: Pays::No, ..Default::default() }, @@ -327,7 +345,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(6), event: SysEvent::ExtrinsicSuccess { dispatch_info: DispatchInfo { - weight: 500, + weight: Weight::from_ref_time(500), pays_fee: Pays::No, ..Default::default() }, @@ -339,7 +357,10 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { phase: Phase::ApplyExtrinsic(7), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { weight: 999, ..Default::default() }, + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(999), + ..Default::default() + }, } .into(), topics: vec![] @@ -349,7 +370,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), dispatch_info: DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), pays_fee: Pays::Yes, ..Default::default() }, @@ -362,7 +383,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), dispatch_info: DispatchInfo { - weight: 800, + weight: Weight::from_ref_time(800), pays_fee: Pays::Yes, ..Default::default() }, @@ -375,7 +396,7 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), dispatch_info: DispatchInfo { - weight: 800, + weight: Weight::from_ref_time(800), pays_fee: Pays::No, ..Default::default() }, diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 19719032587ef..4f7f168eb55ab 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_system. @@ -59,44 +59,44 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - (1_000_000 as Weight) + Weight::from_ref_time(1_000_000 as RefTimeWeight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - (5_367_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(5_367_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } @@ -104,43 +104,43 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - (1_000_000 as Weight) + Weight::from_ref_time(1_000_000 as RefTimeWeight) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - (5_367_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(5_367_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((603_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((513_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((1_026_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 0da94deb7112a..b4c377cfa30ef 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -49,7 +49,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index 6b4ebfa74dd87..f71a0f753a43e 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_timestamp. @@ -54,12 +54,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - (8_080_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_080_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn on_finalize() -> Weight { - (2_681_000 as Weight) + Weight::from_ref_time(2_681_000 as RefTimeWeight) } } @@ -68,11 +68,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - (8_080_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(8_080_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn on_finalize() -> Weight { - (2_681_000 as Weight) + Weight::from_ref_time(2_681_000 as RefTimeWeight) } } diff --git a/frame/tips/src/migrations/v4.rs b/frame/tips/src/migrations/v4.rs index 34f7a43ec12de..5e10fa7dd2c6d 100644 --- a/frame/tips/src/migrations/v4.rs +++ b/frame/tips/src/migrations/v4.rs @@ -49,7 +49,7 @@ pub fn migrate::on_chain_storage_version(); @@ -84,7 +84,7 @@ pub fn migrate WeightInfo for SubstrateWeight { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - (30_669_000 as Weight) + Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - (28_768_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_768_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - (20_385_000 as Weight) + Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - (12_287_000 as Weight) + Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - (45_656_000 as Weight) + Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - (18_525_000 as Weight) + Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -119,58 +119,58 @@ impl WeightInfo for () { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - (30_669_000 as Weight) + Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((4_000 as Weight).saturating_mul(r as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - (28_768_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(28_768_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - (20_385_000 as Weight) + Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((166_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - (12_287_000 as Weight) + Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add((363_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - (45_656_000 as Weight) + Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add((276_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - (18_525_000 as Weight) + Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add((37_000 as Weight).saturating_mul(t as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index 08b17a6bf459c..a296a52b5e840 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -59,19 +59,19 @@ const CALL: &::Call = &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = 1024.into(); + weights.max_total = Weight::from_ref_time(1024).into(); }) .build_or_panic() } @@ -129,7 +129,8 @@ impl WeightToFeeT for WeightToFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight).saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) + Self::Balance::saturated_from(weight.ref_time()) + .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -137,7 +138,7 @@ impl WeightToFeeT for TransactionByteFee { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } @@ -208,19 +209,24 @@ impl Config for Runtime { pub struct ExtBuilder { balance_factor: u64, - base_weight: u64, + base_weight: Weight, byte_fee: u64, weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } + Self { + balance_factor: 1, + base_weight: Weight::from_ref_time(0), + byte_fee: 1, + weight_to_fee: 1, + } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: u64) -> Self { + pub fn base_weight(mut self, base_weight: Weight) -> Self { self.base_weight = base_weight; self } @@ -283,19 +289,19 @@ fn transaction_payment_in_native_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeAssetTxPayment::::from(0, None) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) .unwrap(); let initial_balance = 10 * balance_factor; assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(5), + &info_from_weight(Weight::from_ref_time(5)), &default_post_info(), len, &Ok(()) @@ -303,15 +309,15 @@ fn transaction_payment_in_native_possible() { assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 10); let pre = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); let initial_balance_for_2 = 20 * balance_factor; assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 5); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -325,7 +331,7 @@ fn transaction_payment_in_asset_possible() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -351,7 +357,7 @@ fn transaction_payment_in_asset_possible() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -361,7 +367,7 @@ fn transaction_payment_in_asset_possible() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -378,7 +384,7 @@ fn transaction_payment_without_fee() { let balance_factor = 100; ExtBuilder::default() .balance_factor(balance_factor) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -404,7 +410,7 @@ fn transaction_payment_without_fee() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // assert that native balance is not used assert_eq!(Balances::free_balance(caller), 10 * balance_factor); @@ -414,7 +420,7 @@ fn transaction_payment_without_fee() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &post_info_from_pays(Pays::No), len, &Ok(()) @@ -431,7 +437,7 @@ fn asset_transaction_payment_with_tip_and_refund() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -459,15 +465,15 @@ fn asset_transaction_payment_with_tip_and_refund() { let fee_with_tip = (base_weight + weight + len as u64 + tip) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); let final_weight = 50; assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), - &post_info_from_weight(final_weight), + &info_from_weight(Weight::from_ref_time(weight)), + &post_info_from_weight(Weight::from_ref_time(final_weight)), len, &Ok(()) )); @@ -483,7 +489,7 @@ fn payment_from_account_with_only_assets() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -511,7 +517,7 @@ fn payment_from_account_with_only_assets() { // existential deposit let fee = (base_weight + weight + len as u64) * min_balance / ExistentialDeposit::get(); let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); assert_eq!(Balances::free_balance(caller), 0); // check that fee was charged in the given asset @@ -519,7 +525,7 @@ fn payment_from_account_with_only_assets() { assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -534,7 +540,7 @@ fn payment_only_with_existing_sufficient_asset() { let base_weight = 5; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { let asset_id = 1; @@ -543,7 +549,7 @@ fn payment_only_with_existing_sufficient_asset() { let len = 10; // pre_dispatch fails for non-existent asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .is_err()); // create the non-sufficient asset @@ -557,7 +563,7 @@ fn payment_only_with_existing_sufficient_asset() { )); // pre_dispatch fails for non-sufficient asset assert!(ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .is_err()); }); } @@ -567,7 +573,7 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -611,14 +617,14 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { assert_eq!(Assets::balance(asset_id, caller), balance); } let pre = ChargeAssetTxPayment::::from(0, Some(asset_id)) - .pre_dispatch(&caller, CALL, &info_from_weight(weight), len) + .pre_dispatch(&caller, CALL, &info_from_weight(Weight::from_ref_time(weight)), len) .unwrap(); // check that at least one coin was charged in the given asset assert_eq!(Assets::balance(asset_id, caller), balance - 1); assert_ok!(ChargeAssetTxPayment::::post_dispatch( Some(pre), - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &default_post_info(), len, &Ok(()) @@ -632,7 +638,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -689,7 +695,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let base_weight = 1; ExtBuilder::default() .balance_factor(100) - .base_weight(base_weight) + .base_weight(Weight::from_ref_time(base_weight)) .build() .execute_with(|| { // create the asset @@ -713,7 +719,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let len = 1; ChargeAssetTxPayment::::pre_dispatch_unsigned( CALL, - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), len, ) .unwrap(); @@ -724,7 +730,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { // initial fee) assert_ok!(ChargeAssetTxPayment::::post_dispatch( None, - &info_from_weight(weight), + &info_from_weight(Weight::from_ref_time(weight)), &post_info_from_pays(Pays::Yes), len, &Ok(()) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 707f7d8a22065..9777d7d240491 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -66,7 +66,8 @@ use frame_support::{ dispatch::DispatchResult, traits::{EstimateCallFee, Get}, weights::{ - DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, WeightToFee, + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, + Weight, WeightToFee, }, }; @@ -189,7 +190,11 @@ where weights.get(DispatchClass::Normal).max_total.unwrap_or(weights.max_block); let current_block_weight = >::block_weight(); let normal_block_weight = - *current_block_weight.get(DispatchClass::Normal).min(&normal_max_weight); + current_block_weight.get(DispatchClass::Normal).min(normal_max_weight); + + // TODO: Handle all weight dimensions + let normal_max_weight = normal_max_weight.ref_time(); + let normal_block_weight = normal_block_weight.ref_time(); let s = S::get(); let v = V::get(); @@ -347,7 +352,7 @@ pub mod pallet { assert!( ::max_value() >= Multiplier::checked_from_integer::( - T::BlockWeights::get().max_block.try_into().unwrap() + T::BlockWeights::get().max_block.ref_time().try_into().unwrap() ) .unwrap(), ); @@ -359,7 +364,7 @@ pub mod pallet { ); // add 1 percent; let addition = target / 100; - if addition == 0 { + if addition == Weight::zero() { // this is most likely because in a test setup we set everything to (). return } @@ -554,7 +559,7 @@ where } fn length_to_fee(length: u32) -> BalanceOf { - T::LengthToFee::weight_to_fee(&(length as Weight)) + T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as RefTimeWeight)) } fn weight_to_fee(weight: Weight) -> BalanceOf { @@ -655,7 +660,11 @@ where let max_block_weight = T::BlockWeights::get().max_block; let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; - let bounded_weight = info.weight.max(1).min(max_block_weight); + // TODO: Take into account all dimensions of weight + let max_block_weight = max_block_weight.ref_time(); + let info_weight = info.weight.ref_time(); + + let bounded_weight = info_weight.max(1).min(max_block_weight); let bounded_length = (len as u64).max(1).min(max_block_length); let max_tx_per_block_weight = max_block_weight / bounded_weight; @@ -836,19 +845,19 @@ mod tests { &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(0); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); } pub struct BlockWeights; impl Get for BlockWeights { fn get() -> frame_system::limits::BlockWeights { frame_system::limits::BlockWeights::builder() - .base_block(0) + .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = 1024.into(); + weights.max_total = Weight::from_ref_time(1024).into(); }) .build_or_panic() } @@ -903,7 +912,7 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(WEIGHT_TO_FEE.with(|v| *v.borrow())) } } @@ -912,7 +921,7 @@ mod tests { type Balance = u64; fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(*weight) + Self::Balance::saturated_from(weight.ref_time()) .saturating_mul(TRANSACTION_BYTE_FEE.with(|v| *v.borrow())) } } @@ -947,19 +956,19 @@ mod tests { pub struct ExtBuilder { balance_factor: u64, - base_weight: u64, + base_weight: Weight, byte_fee: u64, weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } + Self { balance_factor: 1, base_weight: Weight::zero(), byte_fee: 1, weight_to_fee: 1 } } } impl ExtBuilder { - pub fn base_weight(mut self, base_weight: u64) -> Self { + pub fn base_weight(mut self, base_weight: Weight) -> Self { self.base_weight = base_weight; self } @@ -1025,18 +1034,18 @@ mod tests { fn signed_extension_transaction_payment_work() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(5)), len) .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(5), + &info_from_weight(Weight::from_ref_time(5)), &default_post_info(), len, &Ok(()) @@ -1048,14 +1057,14 @@ mod tests { FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -1069,22 +1078,22 @@ mod tests { fn signed_extension_transaction_payment_multiplied_refund_works() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; >::put(Multiplier::saturating_from_rational(3, 2)); let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -1100,13 +1109,14 @@ mod tests { assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( &1, CALL, - &info_from_weight(Weight::max_value()), + &info_from_weight(Weight::MAX), 10 )); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), - (10000 - ::BlockWeights::get().max_block) as u64 + (10000 - + ::BlockWeights::get().max_block.ref_time()) as u64 ); }); } @@ -1114,7 +1124,7 @@ mod tests { #[test] fn signed_extension_allows_free_transactions() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .balance_factor(0) .build() .execute_with(|| { @@ -1125,7 +1135,7 @@ mod tests { // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. let operational_transaction = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::No, }; @@ -1137,8 +1147,11 @@ mod tests { )); // like a InsecureFreeNormal - let free_transaction = - DispatchInfo { weight: 0, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let free_transaction = DispatchInfo { + weight: Weight::from_ref_time(0), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; assert_noop!( ChargeTransactionPayment::::from(0).validate( &1, @@ -1154,7 +1167,7 @@ mod tests { #[test] fn signed_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .balance_factor(10) .build() .execute_with(|| { @@ -1162,8 +1175,10 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - assert_ok!(ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(3), len)); + assert_ok!( + ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(Weight::from_ref_time(3)), len) + ); assert_eq!( Balances::free_balance(1), 100 // original @@ -1188,48 +1203,54 @@ mod tests { let unsigned_xt = TestXt::<_, ()>::new(call, None); let unsigned_xt_info = unsigned_xt.get_dispatch_info(); - ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); + ExtBuilder::default() + .base_weight(Weight::from_ref_time(5)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); - assert_eq!( - TransactionPayment::query_info(xt.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ + assert_eq!( + TransactionPayment::query_info(xt.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ - }, - ); + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); - assert_eq!( - TransactionPayment::query_info(unsigned_xt.clone(), len), - RuntimeDispatchInfo { - weight: unsigned_xt_info.weight, - class: unsigned_xt_info.class, - partial_fee: 0, - }, - ); + assert_eq!( + TransactionPayment::query_info(unsigned_xt.clone(), len), + RuntimeDispatchInfo { + weight: unsigned_xt_info.weight, + class: unsigned_xt_info.class, + partial_fee: 0, + }, + ); - assert_eq!( - TransactionPayment::query_fee_details(xt, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, - len_fee: len as u64, - adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * - 2 * 3 / 2 - }), - tip: 0, - }, - ); + assert_eq!( + TransactionPayment::query_fee_details(xt, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, + len_fee: len as u64, + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 + }), + tip: 0, + }, + ); - assert_eq!( - TransactionPayment::query_fee_details(unsigned_xt, len), - FeeDetails { inclusion_fee: None, tip: 0 }, - ); - }); + assert_eq!( + TransactionPayment::query_fee_details(unsigned_xt, len), + FeeDetails { inclusion_fee: None, tip: 0 }, + ); + }); } #[test] @@ -1239,40 +1260,46 @@ mod tests { let encoded_call = call.encode(); let len = encoded_call.len() as u32; - ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); + ExtBuilder::default() + .base_weight(Weight::from_ref_time(5)) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); - assert_eq!( - TransactionPayment::query_call_info(call.clone(), len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: 5 * 2 /* base * weight_fee */ + assert_eq!( + TransactionPayment::query_call_info(call.clone(), len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ - + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ - }, - ); + + info.weight.min(BlockWeights::get().max_block).ref_time() as u64 * 2 * 3 / 2 /* weight */ + }, + ); - assert_eq!( - TransactionPayment::query_call_fee_details(call, len), - FeeDetails { - inclusion_fee: Some(InclusionFee { - base_fee: 5 * 2, /* base * weight_fee */ - len_fee: len as u64, /* len * 1 */ - adjusted_weight_fee: info.weight.min(BlockWeights::get().max_block) as u64 * - 2 * 3 / 2 /* weight * weight_fee * multipler */ - }), - tip: 0, - }, - ); - }); + assert_eq!( + TransactionPayment::query_call_fee_details(call, len), + FeeDetails { + inclusion_fee: Some(InclusionFee { + base_fee: 5 * 2, /* base * weight_fee */ + len_fee: len as u64, /* len * 1 */ + adjusted_weight_fee: info + .weight + .min(BlockWeights::get().max_block) + .ref_time() as u64 * 2 * 3 / 2 /* weight * weight_fee * multipler */ + }), + tip: 0, + }, + ); + }); } #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1282,14 +1309,14 @@ mod tests { // Tip only, no fees works let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::No, }; assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1300,7 +1327,7 @@ mod tests { assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { - weight: 1000, + weight: Weight::from_ref_time(1000), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1311,7 +1338,7 @@ mod tests { #[test] fn compute_fee_works_with_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1320,7 +1347,7 @@ mod tests { >::put(Multiplier::saturating_from_rational(3, 2)); // Base fee is unaffected by multiplier let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1328,7 +1355,7 @@ mod tests { // Everything works together :) let dispatch_info = DispatchInfo { - weight: 123, + weight: Weight::from_ref_time(123), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1343,7 +1370,7 @@ mod tests { #[test] fn compute_fee_works_with_negative_multiplier() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() @@ -1353,7 +1380,7 @@ mod tests { // Base fee is unaffected by multiplier. let dispatch_info = DispatchInfo { - weight: 0, + weight: Weight::from_ref_time(0), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1361,7 +1388,7 @@ mod tests { // Everything works together. let dispatch_info = DispatchInfo { - weight: 123, + weight: Weight::from_ref_time(123), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1376,14 +1403,14 @@ mod tests { #[test] fn compute_fee_does_not_overflow() { ExtBuilder::default() - .base_weight(100) + .base_weight(Weight::from_ref_time(100)) .byte_fee(10) .balance_factor(0) .build() .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { - weight: Weight::max_value(), + weight: Weight::MAX, class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1398,14 +1425,14 @@ mod tests { fn refund_does_not_recreate_account() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); @@ -1415,8 +1442,8 @@ mod tests { assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(50), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(50)), len, &Ok(()) )); @@ -1438,19 +1465,19 @@ mod tests { fn actual_weight_higher_than_max_refunds_nothing() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { let len = 10; let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); assert_ok!(ChargeTransactionPayment::::post_dispatch( Some(pre), - &info_from_weight(100), - &post_info_from_weight(101), + &info_from_weight(Weight::from_ref_time(100)), + &post_info_from_weight(Weight::from_ref_time(101)), len, &Ok(()) )); @@ -1462,14 +1489,17 @@ mod tests { fn zero_transfer_on_free_transaction() { ExtBuilder::default() .balance_factor(10) - .base_weight(5) + .base_weight(Weight::from_ref_time(5)) .build() .execute_with(|| { // So events are emitted System::set_block_number(10); let len = 10; - let dispatch_info = - DispatchInfo { weight: 100, pays_fee: Pays::No, class: DispatchClass::Normal }; + let dispatch_info = DispatchInfo { + weight: Weight::from_ref_time(100), + pays_fee: Pays::No, + class: DispatchClass::Normal, + }; let user = 69; let pre = ChargeTransactionPayment::::from(0) .pre_dispatch(&user, CALL, &dispatch_info, len) @@ -1498,11 +1528,11 @@ mod tests { fn refund_consistent_with_actual_weight() { ExtBuilder::default() .balance_factor(10) - .base_weight(7) + .base_weight(Weight::from_ref_time(7)) .build() .execute_with(|| { - let info = info_from_weight(100); - let post_info = post_info_from_weight(33); + let info = info_from_weight(Weight::from_ref_time(100)); + let post_info = post_info_from_weight(Weight::from_ref_time(33)); let prev_balance = Balances::free_balance(2); let len = 10; let tip = 5; @@ -1538,8 +1568,11 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1557,7 +1590,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1581,8 +1614,11 @@ mod tests { let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; let priority = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1593,7 +1629,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1612,8 +1648,11 @@ mod tests { let mut priority2 = 0; let len = 10; ExtBuilder::default().balance_factor(100).build().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = DispatchInfo { + weight: Weight::from_ref_time(100), + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }; priority1 = ChargeTransactionPayment::(tip) .validate(&2, CALL, &normal, len) .unwrap() @@ -1622,7 +1661,7 @@ mod tests { ExtBuilder::default().balance_factor(100).build().execute_with(|| { let op = DispatchInfo { - weight: 100, + weight: Weight::from_ref_time(100), class: DispatchClass::Operational, pays_fee: Pays::Yes, }; @@ -1649,10 +1688,10 @@ mod tests { fn post_info_can_change_pays_fee() { ExtBuilder::default() .balance_factor(10) - .base_weight(7) + .base_weight(Weight::from_ref_time(7)) .build() .execute_with(|| { - let info = info_from_weight(100); + let info = info_from_weight(Weight::from_ref_time(100)); let post_info = post_info_from_pays(Pays::No); let prev_balance = Balances::free_balance(2); let len = 10; diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 3faebfed48946..5e915d62a26d4 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -135,12 +135,12 @@ mod tests { #[test] fn should_serialize_and_deserialize_properly_with_string() { let info = RuntimeDispatchInfo { - weight: 5, + weight: Weight::from_ref_time(5), class: DispatchClass::Normal, partial_fee: 1_000_000_u64, }; - let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; + let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -152,12 +152,12 @@ mod tests { #[test] fn should_serialize_and_deserialize_properly_large_value() { let info = RuntimeDispatchInfo { - weight: 5, + weight: Weight::from_ref_time(5), class: DispatchClass::Normal, partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index b8bc4890a416e..54d5b0723aad6 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_transaction_storage. @@ -59,11 +59,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -72,9 +72,9 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - (50_978_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(50_978_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -82,9 +82,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - (106_990_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(106_990_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } } @@ -97,11 +97,11 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(l as Weight)) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -110,9 +110,9 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - (50_978_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(50_978_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -120,8 +120,8 @@ impl WeightInfo for () { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - (106_990_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(106_990_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index ed38177e1c499..eecf225beea9b 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -319,7 +319,7 @@ pub mod pallet { if (n % T::SpendPeriod::get()).is_zero() { Self::spend_funds() } else { - 0 + Weight::zero() } } } @@ -500,7 +500,7 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { - let mut total_weight: Weight = Zero::zero(); + let mut total_weight = Weight::new(); let mut budget_remaining = Self::pot(); Self::deposit_event(Event::Spending { budget_remaining }); diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 61eafb652427b..bec96daf576e3 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -55,7 +55,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index f6b5414a05652..74e6e9779000e 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_treasury. @@ -58,51 +58,51 @@ impl WeightInfo for SubstrateWeight { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - (22_063_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_063_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - (26_473_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(26_473_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - (29_955_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_955_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - (10_786_000 as Weight) + Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - (6_647_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_647_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - (25_805_000 as Weight) + Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) - .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } @@ -111,50 +111,50 @@ impl WeightInfo for () { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - (22_063_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_063_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - (26_473_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(26_473_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - (29_955_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(29_955_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - (10_786_000 as Weight) + Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((110_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - (6_647_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(6_647_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - (25_805_000 as Weight) + Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add((28_473_000 as Weight).saturating_mul(p as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } diff --git a/frame/uniques/src/migration.rs b/frame/uniques/src/migration.rs index d301f0a3d1eb1..8a2a0ef808d90 100644 --- a/frame/uniques/src/migration.rs +++ b/frame/uniques/src/migration.rs @@ -17,10 +17,7 @@ //! Various pieces of common functionality. use super::*; -use frame_support::{ - traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}, - weights::Weight, -}; +use frame_support::traits::{Get, GetStorageVersion, PalletInfoAccess, StorageVersion}; /// Migrate the pallet storage to v1. pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfoAccess>( @@ -45,7 +42,7 @@ pub fn migrate_to_v1, I: 'static, P: GetStorageVersion + PalletInfo on_chain_storage_version, ); // calculate and return migration weights - T::DbWeight::get().reads_writes(count as Weight + 1, count as Weight + 1) + T::DbWeight::get().reads_writes(count as u64 + 1, count as u64 + 1) } else { log::warn!( target: "runtime::uniques", diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 7c8cb170b1b1d..4ed01e463cc86 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_uniques. @@ -80,16 +80,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -103,192 +103,192 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_960_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(33_025_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) - .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_534_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_272_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } } @@ -297,16 +297,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - (33_075_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(33_075_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - (19_528_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(19_528_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -320,191 +320,191 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add((13_639_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add((2_393_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add((2_217_000 as Weight).saturating_mul(a as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(m as Weight))) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(a as Weight))) + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - (42_146_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(42_146_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - (42_960_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(42_960_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - (33_025_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(33_025_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - (0 as Weight) + Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 24_000 - .saturating_add((15_540_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - (25_194_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_194_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - (25_397_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_397_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - (19_278_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_278_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - (19_304_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_304_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - (28_615_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(28_615_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - (19_943_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(19_943_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - (22_583_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(22_583_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - (47_520_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(47_520_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - (45_316_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(45_316_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - (38_391_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_391_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - (38_023_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(38_023_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - (37_398_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(37_398_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - (35_621_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(35_621_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - (25_856_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(25_856_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - (26_098_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(26_098_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - (24_076_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(24_076_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - (22_035_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_035_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - (22_534_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(1 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + Weight::from_ref_time(22_534_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - (45_272_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + Weight::from_ref_time(45_272_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 0aae2615702dd..3df12d69e84eb 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -185,7 +185,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -208,7 +208,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. @@ -253,9 +253,9 @@ pub mod pallet { let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() - .saturating_add(dispatch_info.weight) // AccountData for inner call origin accountdata. - .saturating_add(T::DbWeight::get().reads_writes(1, 1)), + .saturating_add(T::DbWeight::get().reads_writes(1, 1)) + .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] @@ -301,7 +301,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -324,7 +324,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. @@ -352,7 +352,7 @@ pub mod pallet { } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. @@ -406,7 +406,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(0, |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -429,7 +429,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight: Weight = 0; + let mut weight = Weight::new(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { @@ -455,7 +455,7 @@ pub mod pallet { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); - Ok(Some(base_weight + weight).into()) + Ok(Some(base_weight.saturating_add(weight)).into()) } } } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 6368473ac8708..e3b16b5244fc0 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -99,7 +99,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::max_value()); + frame_system::limits::BlockWeights::simple_max(Weight::MAX); } impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; @@ -191,7 +191,7 @@ fn call_transfer(dest: u64, value: u64) -> Call { Call::Balances(BalancesCall::transfer { dest, value }) } -fn call_foobar(err: bool, start_weight: u64, end_weight: Option) -> Call { +fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> Call { Call::Example(ExampleCall::foobar { err, start_weight, end_weight }) } @@ -213,8 +213,8 @@ fn as_derivative_works() { #[test] fn as_derivative_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; // Full weight when ok @@ -364,24 +364,24 @@ fn batch_weight_calculation_doesnt_overflow() { use sp_runtime::Perbill; new_test_ext().execute_with(|| { let big_call = Call::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); - assert_eq!(big_call.get_dispatch_info().weight, Weight::max_value() / 2); + assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); // 3 * 50% saturates to 100% let batch_call = Call::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); - assert_eq!(batch_call.get_dispatch_info().weight, Weight::max_value()); + assert_eq!(batch_call.get_dispatch_info().weight, Weight::MAX); }); } #[test] fn batch_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; - let batch_len: Weight = 4; + let batch_len = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); @@ -420,7 +420,7 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as Weight; + let batch_len = Weight::from_ref_time(batch_calls.len() as u64); let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); @@ -494,10 +494,10 @@ fn batch_all_revert() { #[test] fn batch_all_handles_weight_refund() { new_test_ext().execute_with(|| { - let start_weight = 100; - let end_weight = 75; + let start_weight = Weight::from_ref_time(100); + let end_weight = Weight::from_ref_time(75); let diff = start_weight - end_weight; - let batch_len: Weight = 4; + let batch_len = 4; // Full weight when ok let inner_call = call_foobar(false, start_weight, None); @@ -533,7 +533,7 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = batch_calls.len() as Weight; + let batch_len = Weight::from_ref_time(batch_calls.len() as u64); let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); @@ -616,7 +616,7 @@ fn force_batch_works() { Origin::signed(1), vec![ call_transfer(2, 5), - call_foobar(true, 75, None), + call_foobar(true, Weight::from_ref_time(75), None), call_transfer(2, 10), call_transfer(2, 5), ] diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 3660a54fb6a8f..0f0d171d8d4ee 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_utility. @@ -58,27 +58,27 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - (23_113_000 as Weight) + Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { - (4_182_000 as Weight) + Weight::from_ref_time(4_182_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - (18_682_000 as Weight) + Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { - (12_049_000 as Weight) + Weight::from_ref_time(12_049_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - (19_136_000 as Weight) + Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } } @@ -86,26 +86,26 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - (23_113_000 as Weight) + Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_701_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { - (4_182_000 as Weight) + Weight::from_ref_time(4_182_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - (18_682_000 as Weight) + Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((2_794_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { - (12_049_000 as Weight) + Weight::from_ref_time(12_049_000 as RefTimeWeight) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - (19_136_000 as Weight) + Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((2_697_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) } } diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 9ad8e57500e89..8875404f03fa2 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -45,7 +45,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 4596157e63b7b..bd3af72cdd182 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_vesting. @@ -60,96 +60,96 @@ impl WeightInfo for SubstrateWeight { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - (32_978_000 as Weight) + Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - (32_856_000 as Weight) + Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - (33_522_000 as Weight) + Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - (32_558_000 as Weight) + Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - (49_260_000 as Weight) + Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - (49_166_000 as Weight) + Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (34_042_000 as Weight) + Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (33_937_000 as Weight) + Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -158,95 +158,95 @@ impl WeightInfo for () { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - (32_978_000 as Weight) + Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - (32_856_000 as Weight) + Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((79_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((56_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - (33_522_000 as Weight) + Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((74_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((72_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - (32_558_000 as Weight) + Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - (49_260_000 as Weight) + Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((80_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - (49_166_000 as Weight) + Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add((77_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (34_042_000 as Weight) + Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((83_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((80_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - (33_937_000 as Weight) + Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add((73_000 as Weight).saturating_mul(s as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index 634db53a09a4e..b18236099d445 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -51,7 +51,7 @@ construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); } impl frame_system::Config for Test { type BaseCallFilter = Nothing; diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index 67bccaeaeebe1..3e20cd29efb4f 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -19,7 +19,9 @@ use crate::mock::*; use codec::Encode; -use frame_support::{assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider}; +use frame_support::{ + assert_noop, assert_ok, dispatch::GetDispatchInfo, traits::PreimageProvider, weights::Weight, +}; use sp_runtime::{traits::Hash, DispatchError}; #[test] @@ -94,7 +96,11 @@ fn test_whitelist_call_and_execute() { assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight - 1), + Whitelist::dispatch_whitelisted_call( + Origin::root(), + call_hash, + call_weight - Weight::one() + ), crate::Error::::InvalidCallWeightWitness, ); @@ -114,7 +120,7 @@ fn test_whitelist_call_and_execute_failing_call() { new_test_ext().execute_with(|| { let call = Call::Whitelist(crate::Call::dispatch_whitelisted_call { call_hash: Default::default(), - call_weight_witness: 0, + call_weight_witness: Weight::zero(), }); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 81482c35e3de8..bb2ed9700c833 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_whitelist. @@ -56,35 +56,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - (20_938_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(20_938_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - (22_332_000 as Weight) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(22_332_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - (5_989_917_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(5_989_917_000 as RefTimeWeight) + .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - (25_325_000 as Weight) + Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } } @@ -93,34 +93,34 @@ impl WeightInfo for () { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - (20_938_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + Weight::from_ref_time(20_938_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - (22_332_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(22_332_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - (5_989_917_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + Weight::from_ref_time(5_989_917_000 as RefTimeWeight) + .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - (25_325_000 as Weight) + Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add((1_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(2 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index aa436f1ad2a91..181e3fec62b24 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -39,7 +39,7 @@ use cfg_if::cfg_if; use frame_support::{ parameter_types, traits::{ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, - weights::RuntimeDbWeight, + weights::{RuntimeDbWeight, Weight}, }; use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; @@ -577,7 +577,7 @@ parameter_types! { pub RuntimeBlockLength: BlockLength = BlockLength::max(4 * 1024 * 1024); pub RuntimeBlockWeights: BlockWeights = - BlockWeights::with_sensible_defaults(4 * 1024 * 1024, Perbill::from_percent(75)); + BlockWeights::with_sensible_defaults(Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75)); } impl frame_system::Config for Runtime { diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index e48a7e8b3c6f5..36215c8a0586d 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -142,7 +142,8 @@ where let weight = ConsumedWeight::decode_all(&mut raw_weight)?; // Should be divisible, but still use floats in case we ever change that. - Ok((weight.total() as f64 / WEIGHT_PER_NANOS as f64).floor() as NanoSeconds) + Ok((weight.total().ref_time() as f64 / WEIGHT_PER_NANOS.ref_time() as f64).floor() + as NanoSeconds) } /// Prints the weight info of a block to the console. diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index 6f41e881d0572..a1da5da0d0792 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -1,21 +1,21 @@ # The `benchmark overhead` command -Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". -This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. -The exact overhead to can vary per Substrate chain and needs to be calculated per chain. +Each time an extrinsic or a block is executed, a fixed weight is charged as "execution overhead". +This is necessary since the weight that is calculated by the pallet benchmarks does not include this overhead. +The exact overhead to can vary per Substrate chain and needs to be calculated per chain. This command calculates the exact values of these overhead weights for any Substrate chain that supports it. ## How does it work? -The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. +The benchmark consists of two parts; the [`BlockExecutionWeight`] and the [`ExtrinsicBaseWeight`]. Both are executed sequentially when invoking the command. ## BlockExecutionWeight -The block execution weight is defined as the weight that it takes to execute an *empty block*. -It is measured by constructing an empty block and measuring its executing time. -The result are written to a `block_weights.rs` file which is created from a template. -The file will contain the concrete weight value and various statistics about the measurements. For example: +The block execution weight is defined as the weight that it takes to execute an *empty block*. +It is measured by constructing an empty block and measuring its executing time. +The result are written to a `block_weights.rs` file which is created from a template. +The file will contain the concrete weight value and various statistics about the measurements. For example: ```rust /// Time to execute an empty block. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -30,21 +30,21 @@ The file will contain the concrete weight value and various statistics about the /// 99th: 3_631_863 /// 95th: 3_595_674 /// 75th: 3_526_435 -pub const BlockExecutionWeight: Weight = 3_532_484 * WEIGHT_PER_NANOS; +pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(3_532_484); ``` -In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. +In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. This constant weight is therefore added to each block to ensure that Substrate budgets enough time to execute it. ## ExtrinsicBaseWeight -The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. -An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. +The extrinsic base weight is defined as the weight that it takes to execute an *empty* extrinsic. +An *empty* extrinsic is also called a *NO-OP*. It does nothing and is the equivalent to the empty block form above. The benchmark now constructs a block which is filled with only NO-OP extrinsics. -This block is then executed many times and the weights are measured. -The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. +This block is then executed many times and the weights are measured. +The result is divided by the number of extrinsics in that block and the results are written to `extrinsic_weights.rs`. -The relevant section in the output file looks like this: +The relevant section in the output file looks like this: ```rust /// Time to execute a NO-OP extrinsic, for example `System::remark`. /// Calculated by multiplying the *Average* with `1` and adding `0`. @@ -59,10 +59,10 @@ The relevant section in the output file looks like this: /// 99th: 68_758 /// 95th: 67_843 /// 75th: 67_749 -pub const ExtrinsicBaseWeight: Weight = 67_745 * WEIGHT_PER_NANOS; +pub const ExtrinsicBaseWeight: Weight = Weight::from_ref_time(67_745 * WEIGHT_PER_NANOS); ``` -In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. +In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. This constant weight is therefore added to each extrinsic to ensure that Substrate budgets enough time to execute it. ## Invocation @@ -76,48 +76,48 @@ Output: ```pre # BlockExecutionWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-block execution overhead [ns]: Total: 353248430 Min: 3508416, Max: 3680498 Average: 3532484, Median: 3522111, Stddev: 27070.23 -Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 +Percentiles 99th, 95th, 75th: 3631863, 3595674, 3526435 Writing weights to "block_weights.rs" # Setup -Building block, this takes some time... +Building block, this takes some time... Extrinsics per block: 12000 # ExtrinsicBaseWeight Running 10 warmups... -Executing block 100 times +Executing block 100 times Per-extrinsic execution overhead [ns]: Total: 6774590 Min: 67561, Max: 69855 Average: 67745, Median: 67701, Stddev: 264.68 -Percentiles 99th, 95th, 75th: 68758, 67843, 67749 +Percentiles 99th, 95th, 75th: 68758, 67843, 67749 Writing weights to "extrinsic_weights.rs" ``` -The complete command for Polkadot looks like this: +The complete command for Polkadot looks like this: ```sh cargo run --profile=production -- benchmark overhead --chain=polkadot-dev --execution=wasm --wasm-execution=compiled --weight-path=runtime/polkadot/constants/src/weights/ ``` -This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. -You can try the same for *Rococo* and to see that the results slightly differ. +This will overwrite the the [block_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/block_weights.rs) and [extrinsic_weights.rs](https://github.com/paritytech/polkadot/blob/c254e5975711a6497af256f6831e9a6c752d28f5/runtime/polkadot/constants/src/weights/extrinsic_weights.rs) files in the Polkadot runtime directory. +You can try the same for *Rococo* and to see that the results slightly differ. 👉 It is paramount to use `--profile=production`, `--execution=wasm` and `--wasm-execution=compiled` as the results are otherwise useless. ## Output Interpretation -Lower is better. The less weight the execution overhead needs, the better. -Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. +Lower is better. The less weight the execution overhead needs, the better. +Since the weights of the overhead is charged per extrinsic and per block, a larger weight results in less extrinsics per block. Minimizing this is important to have a large transaction throughput. ## Arguments -- `--chain` / `--dev` Set the chain specification. -- `--weight-path` Set the output directory or file to write the weights to. +- `--chain` / `--dev` Set the chain specification. +- `--weight-path` Set the output directory or file to write the weights to. - `--repeat` Set the repetitions of both benchmarks. - `--warmup` Set the rounds of warmup before measuring. - `--execution` Should be set to `wasm` for correct results. diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index f8312b0052592..d07533e9dbaa8 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -52,11 +52,12 @@ parameter_types! { /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} - pub const {{long_name}}Weight: Weight = {{underscore weight}} * WEIGHT_PER_NANOS; + pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul({{underscore weight}}); } #[cfg(test)] mod test_weights { + use super::*; use frame_support::weights::constants; /// Checks that the weight exists and is sane. @@ -68,14 +69,14 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. - assert!(w >= 100 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); + assert!(w >= Weight::from_ref_time(100 * constants::WEIGHT_PER_MICROS), "Weight should be at least 100 µs."); // At most 50 ms. - assert!(w <= 50 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); + assert!(w <= Weight::from_ref_time(50 * constants::WEIGHT_PER_MILLIS), "Weight should be at most 50 ms."); {{else}} // At least 10 µs. - assert!(w >= 10 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); + assert!(w >= Weight::from_ref_time(10 * constants::WEIGHT_PER_MICROS), "Weight should be at least 10 µs."); // At most 1 ms. - assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); + assert!(w <= Weight::from_ref_time(constants::WEIGHT_PER_MILLIS), "Weight should be at most 1 ms."); {{/if}} } } diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 688ad4d3934f5..bf18e23367bc9 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -15,7 +15,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::Weight}; +use frame_support::{traits::Get, weights::{RefTimeWeight, Weight}}; use sp_std::marker::PhantomData; /// Weight functions for `{{pallet}}`. @@ -33,22 +33,22 @@ impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) {{/each}} } {{/each}} From 7c1a39f0e526a181c7e454322723768b83ab5ef8 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 31 Aug 2022 14:55:46 +0200 Subject: [PATCH 076/348] Simplify trait bounds in network to prepare for collator-rpc (#12082) * Hack towards PoC * Abstract away runtime requirement * blockchainevents * Remove bitswap * Remove unused sync more * Remove unused features in network * Re-enable bitswap change * Remove `Chain` trait bound * Reimplement blockchain-rpc-events * Move network to cumulus * Make AuthorityDiscovery async * Remove `ProofProvider` requirement from network behaviour * Extract bitswap * Adjustments after merge * Remove HeaderMetadata trait from network * Introduce NetworkHeaderBackend * Add comments * Improve comments * Move NetworkHeaderBackend to new module * Improve naming, remove redundand send + sync * Clean up generics * Fix CI * Improve comment and readability * Remove NetworkHeaderBackend * Fix Cargo.lock Co-authored-by: Sebastian Kunert --- Cargo.lock | 2 + client/authority-discovery/Cargo.toml | 1 + client/authority-discovery/src/lib.rs | 13 +-- client/authority-discovery/src/worker.rs | 44 +++++++-- .../authority-discovery/src/worker/tests.rs | 1 + client/network/common/Cargo.toml | 1 + client/network/src/behaviour.rs | 71 +++----------- client/network/src/bitswap.rs | 92 +++++++++++++++++-- client/network/src/config.rs | 5 +- client/network/src/protocol.rs | 21 +---- client/network/src/service.rs | 58 ++---------- client/network/src/service/tests.rs | 1 + client/network/test/src/lib.rs | 1 + client/service/src/builder.rs | 9 +- 14 files changed, 165 insertions(+), 155 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d90de9da4f579..b0d0206a35300 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7742,6 +7742,7 @@ dependencies = [ name = "sc-authority-discovery" version = "0.10.0-dev" dependencies = [ + "async-trait", "futures", "futures-timer", "ip_network", @@ -8451,6 +8452,7 @@ dependencies = [ "sc-peerset", "serde", "smallvec", + "sp-blockchain", "sp-consensus", "sp-finality-grandpa", "sp-runtime", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index b7a140f24a111..d9e9df4f2a97c 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -35,6 +35,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +async-trait = "0.1.56" [dev-dependencies] quickcheck = { version = "1.0.3", default-features = false } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index f0ef374551617..db3802b168fe5 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -29,7 +29,7 @@ pub use crate::{ service::Service, - worker::{NetworkProvider, Role, Worker}, + worker::{AuthorityDiscovery, NetworkProvider, Role, Worker}, }; use std::{collections::HashSet, sync::Arc, time::Duration}; @@ -40,10 +40,9 @@ use futures::{ }; use libp2p::{Multiaddr, PeerId}; -use sc_client_api::blockchain::HeaderBackend; use sc_network_common::protocol::event::DhtEvent; -use sp_api::ProvideRuntimeApi; -use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; +use sp_authority_discovery::AuthorityId; +use sp_blockchain::HeaderBackend; use sp_runtime::traits::Block as BlockT; mod error; @@ -122,8 +121,7 @@ pub fn new_worker_and_service( where Block: BlockT + Unpin + 'static, Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + Client: AuthorityDiscovery + Send + Sync + 'static + HeaderBackend, DhtEventStream: Stream + Unpin, { new_worker_and_service_with_config( @@ -150,8 +148,7 @@ pub fn new_worker_and_service_with_config + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + Client: AuthorityDiscovery + HeaderBackend + 'static, DhtEventStream: Stream + Unpin, { let (to_worker, from_service) = mpsc::channel(0); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index f13a1cf33581b..4121b64e00b9b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -43,15 +43,16 @@ use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; -use sc_client_api::blockchain::HeaderBackend; use sc_network_common::{ protocol::event::DhtEvent, service::{KademliaKey, NetworkDHTProvider, NetworkSigner, NetworkStateInfo, Signature}, }; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiError, ProvideRuntimeApi}; use sp_authority_discovery::{ AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, }; +use sp_blockchain::HeaderBackend; + use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_keystore::CryptoStore; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -150,12 +151,35 @@ pub struct Worker { phantom: PhantomData, } +/// Wrapper for [`AuthorityDiscoveryApi`](sp_authority_discovery::AuthorityDiscoveryApi). Can be +/// be implemented by any struct without dependency on the runtime. +#[async_trait::async_trait] +pub trait AuthorityDiscovery { + /// Retrieve authority identifiers of the current and next authority set. + async fn authorities(&self, at: Block::Hash) + -> std::result::Result, ApiError>; +} + +#[async_trait::async_trait] +impl AuthorityDiscovery for T +where + T: ProvideRuntimeApi + Send + Sync, + T::Api: AuthorityDiscoveryApi, + Block: BlockT, +{ + async fn authorities( + &self, + at: Block::Hash, + ) -> std::result::Result, ApiError> { + self.runtime_api().authorities(&BlockId::Hash(at)) + } +} + impl Worker where Block: BlockT + Unpin + 'static, Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + Client: AuthorityDiscovery + HeaderBackend + 'static, DhtEventStream: Stream + Unpin, { /// Construct a [`Worker`]. @@ -354,7 +378,7 @@ where } async fn refill_pending_lookups_queue(&mut self) -> Result<()> { - let id = BlockId::hash(self.client.info().best_hash); + let best_hash = self.client.info().best_hash; let local_keys = match &self.role { Role::PublishAndDiscover(key_store) => key_store @@ -367,8 +391,8 @@ where let mut authorities = self .client - .runtime_api() - .authorities(&id) + .authorities(best_hash) + .await .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) @@ -574,10 +598,10 @@ where .into_iter() .collect::>(); - let id = BlockId::hash(client.info().best_hash); + let best_hash = client.info().best_hash; let authorities = client - .runtime_api() - .authorities(&id) + .authorities(best_hash) + .await .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .map(Into::into) diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 7b0ee45833e19..8e7a221877574 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -32,6 +32,7 @@ use futures::{ use libp2p::{core::multiaddr, identity::Keypair, PeerId}; use prometheus_endpoint::prometheus::default_registry; +use sc_client_api::HeaderBackend; use sc_network_common::service::{KademliaKey, Signature, SigningError}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_keystore::{testing::KeyStore, CryptoStore}; diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index ea50c8b794ccb..47d43e8b4b03f 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -32,4 +32,5 @@ serde = { version = "1.0.136", features = ["derive"] } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } thiserror = "1.0" diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 4ff415788f4ea..4177c2452c7f7 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -39,7 +39,6 @@ use libp2p::{ }; use log::debug; -use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::import_queue::{IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, @@ -47,7 +46,7 @@ use sc_network_common::{ request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, }; use sc_peerset::PeersetHandle; -use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, @@ -69,13 +68,7 @@ pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, R pub struct Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// All the substrate-specific protocols. substrate: Protocol, @@ -85,7 +78,7 @@ where /// Discovers nodes of the network. discovery: DiscoveryBehaviour, /// Bitswap server for blockchain data. - bitswap: Toggle>, + bitswap: Toggle>, /// Generic request-response protocols. request_responses: request_responses::RequestResponsesBehaviour, @@ -208,13 +201,7 @@ pub enum BehaviourOut { impl Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Builds a new `Behaviour`. pub fn new( @@ -225,7 +212,7 @@ where block_request_protocol_config: ProtocolConfig, state_request_protocol_config: ProtocolConfig, warp_sync_protocol_config: Option, - bitswap: Option>, + bitswap: Option>, light_client_request_protocol_config: ProtocolConfig, // All remaining request protocol configs. mut request_response_protocols: Vec, @@ -352,13 +339,7 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { impl NetworkBehaviourEventProcess for Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn inject_event(&mut self, event: void::Void) { void::unreachable(event) @@ -368,13 +349,7 @@ where impl NetworkBehaviourEventProcess> for Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn inject_event(&mut self, event: CustomMessageOutcome) { match event { @@ -483,13 +458,7 @@ where impl NetworkBehaviourEventProcess for Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn inject_event(&mut self, event: request_responses::Event) { match event { @@ -515,13 +484,7 @@ where impl NetworkBehaviourEventProcess for Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { let peer_info::PeerInfoEvent::Identified { @@ -548,13 +511,7 @@ where impl NetworkBehaviourEventProcess for Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn inject_event(&mut self, out: DiscoveryOut) { match out { @@ -592,13 +549,7 @@ where impl Behaviour where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn poll( &mut self, diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs index 2dab45adc5618..52fa0c36caedf 100644 --- a/client/network/src/bitswap.rs +++ b/client/network/src/bitswap.rs @@ -178,24 +178,99 @@ impl Prefix { } } +/// Bitswap trait +pub trait BitswapT { + /// Get single indexed transaction by content hash. + /// + /// Note that this will only fetch transactions + /// that are indexed by the runtime with `storage_index_transaction`. + fn indexed_transaction( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result>>; + + /// Queue of blocks ready to be sent out on `poll()` + fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)>; +} + /// Network behaviour that handles sending and receiving IPFS blocks. -pub struct Bitswap { +struct BitswapInternal { client: Arc, ready_blocks: VecDeque<(PeerId, BitswapMessage)>, _block: PhantomData, } -impl Bitswap { +impl BitswapInternal { /// Create a new instance of the bitswap protocol handler. pub fn new(client: Arc) -> Self { Self { client, ready_blocks: Default::default(), _block: PhantomData::default() } } } -impl NetworkBehaviour for Bitswap +impl BitswapT for BitswapInternal +where + Block: BlockT, + Client: BlockBackend, +{ + fn indexed_transaction( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result>> { + self.client.indexed_transaction(&hash) + } + + fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { + &mut self.ready_blocks + } +} + +/// Wrapper for bitswap trait object implement NetworkBehaviour +pub struct Bitswap { + inner: Box + Sync + Send>, +} + +impl Bitswap { + /// Create new Bitswap wrapper + pub fn from_client + Send + Sync + 'static>( + client: Arc, + ) -> Self { + let inner = Box::new(BitswapInternal::new(client)) as Box<_>; + Self { inner } + } +} + +impl BitswapT for Bitswap { + fn indexed_transaction( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result>> { + self.inner.indexed_transaction(hash) + } + + fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { + self.inner.ready_blocks() + } +} + +impl BitswapT for Box +where + T: BitswapT, +{ + fn indexed_transaction( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result>> { + T::indexed_transaction(self, hash) + } + + fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { + T::ready_blocks(self) + } +} + +impl NetworkBehaviour for Bitswap where B: BlockT, - Client: BlockBackend + Send + Sync + 'static, { type ConnectionHandler = OneShotHandler; type OutEvent = void::Void; @@ -214,10 +289,11 @@ where HandlerEvent::Request(msg) => msg, }; trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); - if self.ready_blocks.len() > MAX_RESPONSE_QUEUE { + if self.ready_blocks().len() > MAX_RESPONSE_QUEUE { debug!(target: LOG_TARGET, "Ignored request: queue is full"); return } + let mut response = BitswapMessage { wantlist: None, blocks: Default::default(), @@ -253,7 +329,7 @@ where } let mut hash = B::Hash::default(); hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let transaction = match self.client.indexed_transaction(&hash) { + let transaction = match self.indexed_transaction(hash) { Ok(ex) => ex, Err(e) => { error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); @@ -292,7 +368,7 @@ where } } trace!(target: LOG_TARGET, "Response: {:?}", response); - self.ready_blocks.push_back((peer, response)); + self.ready_blocks().push_back((peer, response)); } fn poll( @@ -300,7 +376,7 @@ where _ctx: &mut Context, _: &mut impl PollParameters, ) -> Poll> { - if let Some((peer_id, message)) = self.ready_blocks.pop_front() { + if let Some((peer_id, message)) = self.ready_blocks().pop_front() { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler: NotifyHandler::Any, diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 52fa28e76e207..521aa42827563 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -31,7 +31,7 @@ pub use sc_network_common::{ pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ExHashT; +use crate::{bitswap::Bitswap, ExHashT}; use core::{fmt, iter}; use futures::future; @@ -80,6 +80,9 @@ where /// Client that contains the blockchain. pub chain: Arc, + /// Bitswap block request protocol implementation. + pub bitswap: Option>, + /// Pool of transactions. /// /// The network worker will fetch transactions from this object in order to propagate them on diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 351e7d207ad1e..64794538999b0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -40,7 +40,7 @@ use message::{ }; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; -use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; +use sc_client_api::HeaderBackend; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, @@ -56,7 +56,6 @@ use sc_network_common::{ }, }; use sp_arithmetic::traits::SaturatedConversion; -use sp_blockchain::HeaderMetadata; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, @@ -262,13 +261,7 @@ impl BlockAnnouncesHandshake { impl Protocol where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Create a new instance. pub fn new( @@ -373,7 +366,7 @@ where let block_announces_protocol = { let genesis_hash = - chain.block_hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); + chain.hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); if let Some(fork_id) = fork_id { format!("/{}/{}/block-announces/1", hex::encode(genesis_hash), fork_id) } else { @@ -1318,13 +1311,7 @@ pub enum CustomMessageOutcome { impl NetworkBehaviour for Protocol where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { type ConnectionHandler = ::ConnectionHandler; type OutEvent = CustomMessageOutcome; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 68ac4b8db8a7d..7a196da25260a 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,7 +29,6 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - bitswap::Bitswap, config::{Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, @@ -59,7 +58,6 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ config::MultiaddrWithPeerId, @@ -75,7 +73,7 @@ use sc_network_common::{ }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ borrow::Cow, @@ -137,13 +135,7 @@ impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: sp_blockchain::HeaderBackend + 'static, { /// Creates the network service. /// @@ -220,7 +212,7 @@ where params.protocol_id.clone(), params .chain - .block_hash(0u32.into()) + .hash(0u32.into()) .ok() .flatten() .expect("Genesis block exists; qed"), @@ -374,7 +366,6 @@ where }; let behaviour = { - let bitswap = params.network_config.ipfs_server.then(|| Bitswap::new(params.chain)); let result = Behaviour::new( protocol, user_agent, @@ -383,7 +374,7 @@ where params.block_request_protocol_config, params.state_request_protocol_config, params.warp_sync_protocol_config, - bitswap, + params.bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, peerset_handle.clone(), @@ -1297,13 +1288,7 @@ pub struct NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, @@ -1336,13 +1321,7 @@ impl Future for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { type Output = (); @@ -1375,7 +1354,6 @@ where Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, }; - match msg { ServiceToWorkerMsg::AnnounceBlock(hash, data) => this .network_service @@ -1988,13 +1966,7 @@ impl Unpin for NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { } @@ -2002,13 +1974,7 @@ where struct NetworkLink<'a, B, Client> where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { protocol: &'a mut Swarm>, } @@ -2016,13 +1982,7 @@ where impl<'a, B, Client> Link for NetworkLink<'a, B, Client> where B: BlockT, - Client: HeaderBackend - + BlockBackend - + HeaderMetadata - + ProofProvider - + Send - + Sync - + 'static, + Client: HeaderBackend + 'static, { fn blocks_processed( &mut self, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index e2fe58423abfe..e0d8798aef91e 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -146,6 +146,7 @@ fn build_test_full_node( import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, + bitswap: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index fbe56f463d0f3..323af13943de7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -883,6 +883,7 @@ where import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, + bitswap: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index de04af259600b..f03ba6de2866d 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,7 +37,7 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{config::SyncMode, NetworkService}; +use sc_network::{bitswap::Bitswap, config::SyncMode, NetworkService}; use sc_network_common::{ service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, sync::warp::WarpSyncProvider, @@ -711,7 +711,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// An optional warp sync provider. pub warp_sync: Option>>, } - /// Build the network service, the network status sinks and an RPC sender. pub fn build_network( params: BuildNetworkParams, @@ -857,6 +856,7 @@ where fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), + bitswap: config.network.ipfs_server.then(|| Bitswap::from_client(client.clone())), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, @@ -923,6 +923,11 @@ where pub struct NetworkStarter(oneshot::Sender<()>); impl NetworkStarter { + /// Create a new NetworkStarter + pub fn new(sender: oneshot::Sender<()>) -> Self { + NetworkStarter(sender) + } + /// Start the network. Call this after all sub-components have been initialized. /// /// > **Note**: If you don't call this function, the networking will not work. From 435218f5c4d0045d92d094641c915c31513cf2bf Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Thu, 1 Sep 2022 04:31:05 +0800 Subject: [PATCH 077/348] Only log when the transaction is actually to be propagated (#12158) The logging before is confusing as it says Propagating but it's not in fact when gossip_enabled is false. Now it's also consistent with `propagate_transactions` below. --- client/network/src/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index f7e4d774ca812..f557ec0cec7c7 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -425,11 +425,11 @@ impl TransactionsHandler { /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { - debug!(target: "sync", "Propagating transaction [{:?}]", hash); // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { return } + debug!(target: "sync", "Propagating transaction [{:?}]", hash); if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); self.transaction_pool.on_broadcasted(propagated_to); From 6fbcc571ddf42fc62306e5ac7f5bc8e22a7757e7 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:45:20 +0200 Subject: [PATCH 078/348] Uniques: Reset approved account after transfer (#12145) * reset approved account * wrap at 100 * doc * fmt * Update frame/uniques/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * new test * Update frame/uniques/src/lib.rs Co-authored-by: Keith Yeung * fmt Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Keith Yeung --- frame/uniques/src/functions.rs | 6 ++++++ frame/uniques/src/lib.rs | 4 ++++ frame/uniques/src/tests.rs | 38 ++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index 107214558307f..4e68f1139420d 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -48,6 +48,12 @@ impl, I: 'static> Pallet { Account::::insert((&dest, &collection, &item), ()); let origin = details.owner; details.owner = dest; + + // The approved account has to be reset to None, because otherwise pre-approve attack would + // be possible, where the owner can approve his second account before making the transaction + // and then claiming the item back. + details.approved = None; + Item::::insert(&collection, &item, &details); ItemPriceOf::::remove(&collection, &item); diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 70f10ca4f8b39..3b0cf6dc673c9 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -606,6 +606,8 @@ pub mod pallet { /// Move an item from the sender account to another. /// + /// This resets the approved account of the item. + /// /// Origin must be Signed and the signing account must be either: /// - the Admin of the `collection`; /// - the Owner of the `item`; @@ -914,6 +916,8 @@ pub mod pallet { /// - `item`: The item of the item to be approved for delegated transfer. /// - `delegate`: The account to delegate permission to transfer the item. /// + /// Important NOTE: The `approved` account gets reset after each transfer. + /// /// Emits `ApprovedTransfer` on success. /// /// Weight: `O(1)` diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 8b1d00d7ba0c7..85d1bec574cf0 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -557,6 +557,44 @@ fn approval_lifecycle_works() { }); } +#[test] +fn approved_account_gets_reset_after_transfer() { + new_test_ext().execute_with(|| { + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + + assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 5)); + + // this shouldn't work because we have just transfered the item to another account. + assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 4), Error::::NoPermission); + // The new owner can transfer fine: + assert_ok!(Uniques::transfer(Origin::signed(5), 0, 42, 6)); + }); +} + +#[test] +fn approved_account_gets_reset_after_buy_item() { + new_test_ext().execute_with(|| { + let item = 1; + let price = 15; + + Balances::make_free_balance_be(&2, 100); + + assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(Origin::signed(1), 0, item, 1)); + assert_ok!(Uniques::approve_transfer(Origin::signed(1), 0, item, 5)); + + assert_ok!(Uniques::set_price(Origin::signed(1), 0, item, Some(price), None)); + + assert_ok!(Uniques::buy_item(Origin::signed(2), 0, item, price)); + + // this shouldn't work because the item has been bough and the approved account should be + // reset. + assert_noop!(Uniques::transfer(Origin::signed(5), 0, item, 4), Error::::NoPermission); + }); +} + #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { From 4f8207d80868eba5bafabef179f991ccc12fc555 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Thu, 1 Sep 2022 12:31:32 +0200 Subject: [PATCH 079/348] Alliance pallet: add force_set_members instead of init_members function (#11997) * Alliance pallet: add force_set_members instead of init_members function * benchmark with witness data * remove invalid limit for clear * Apply suggestions from code review Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Revert "remove invalid limit for clear" This reverts commit b654d6834c321541b5aa4c8589937a233ade411f. * compile constructor only for test * Update comments for force_set_members Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance * benchmark - founders count range * Revert "benchmark - founders count range" This reverts commit 744178f7680e5dc6d5b870bf9c4ce601990ab13b. * witness members count instead votable members count * update the doc * use decode_len for witness data checks * change witness data member count to voting member count; update clear limits * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance * merge master * fixes after merge master * revert to cb3e63 * disband alliance and return deposits * revert debug changes * weights * update docs * update test comments * Apply Joe suggestions from code review Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * rename event from AllianceDisband to AllianceDisbanded * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/impls.rs | 12 +- frame/alliance/README.md | 2 +- frame/alliance/src/benchmarking.rs | 174 ++++++++++++++++-- frame/alliance/src/lib.rs | 154 +++++++++++++--- frame/alliance/src/mock.rs | 56 +++++- frame/alliance/src/tests.rs | 183 ++++++++++++++++--- frame/alliance/src/types.rs | 31 ++++ frame/alliance/src/weights.rs | 280 ++++++++++++++++------------- 8 files changed, 711 insertions(+), 181 deletions(-) diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 510955a5b7b3e..4d58abc81d16d 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -18,7 +18,8 @@ //! Some configurable implementations as associated type for the substrate runtime. use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Call, Hash, NegativeImbalance, Runtime, + AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Call, Hash, + NegativeImbalance, Runtime, }; use frame_support::{ pallet_prelude::*, @@ -112,6 +113,15 @@ impl ProposalProvider for AllianceProposalProvider { fn proposal_of(proposal_hash: Hash) -> Option { AllianceMotion::proposal_of(proposal_hash) } + + fn proposals() -> Vec { + AllianceMotion::proposals().into_inner() + } + + fn proposals_count() -> u32 { + pallet_collective::Proposals::::decode_len().unwrap_or(0) + as u32 + } } #[cfg(test)] diff --git a/frame/alliance/README.md b/frame/alliance/README.md index f91475a5984ea..3fcd40b9527ce 100644 --- a/frame/alliance/README.md +++ b/frame/alliance/README.md @@ -66,4 +66,4 @@ to update the Alliance's rule and make announcements. #### Root Calls -- `init_founders` - Initialize the founding members. +- `force_set_members` - Set the members via chain governance. diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 33cf933aba421..60df1e1a84d37 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -38,6 +38,11 @@ fn assert_last_event, I: 'static>(generic_event: >:: frame_system::Pallet::::assert_last_event(generic_event.into()); } +fn assert_prev_event, I: 'static>(generic_event: >::Event) { + let events = frame_system::Pallet::::events(); + assert_eq!(events.get(events.len() - 2).expect("events expected").event, generic_event.into()); +} + fn cid(input: impl AsRef<[u8]>) -> Cid { use sha2::{Digest, Sha256}; let mut hasher = Sha256::new(); @@ -121,7 +126,13 @@ benchmarks_instance_pallet! { let proposer = founders[0].clone(); let fellows = (0 .. y).map(fellow::).collect::>(); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; let threshold = m; // Add previous proposals. @@ -167,7 +178,13 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; // Threshold is 1 less than the number of members so that one person can vote nay let threshold = m - 1; @@ -230,7 +247,13 @@ benchmarks_instance_pallet! { let founders = (0 .. m).map(founder::).collect::>(); let vetor = founders[0].clone(); - Alliance::::init_members(SystemOrigin::Root.into(), founders, vec![], vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + vec![], + vec![], + Default::default(), + )?; // Threshold is one less than total members so that two nays will disapprove the vote let threshold = m - 1; @@ -276,7 +299,13 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -356,7 +385,13 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -442,7 +477,13 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -513,7 +554,13 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::init_members(SystemOrigin::Root.into(), founders, fellows, vec![])?; + Alliance::::force_set_members( + SystemOrigin::Root.into(), + founders, + fellows, + vec![], + Default::default(), + )?; let proposer = members[0].clone(); let voter = members[1].clone(); @@ -568,21 +615,124 @@ benchmarks_instance_pallet! { assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); } - init_members { - // at least 2 founders - let x in 2 .. T::MaxFounders::get(); + force_set_members { + // at least 1 founders + let x in 1 .. T::MaxFounders::get(); let y in 0 .. T::MaxFellows::get(); let z in 0 .. T::MaxAllies::get(); + let p in 0 .. T::MaxProposals::get(); + let c in 0 .. T::MaxFounders::get() + T::MaxFellows::get(); + let m in 0 .. T::MaxAllies::get(); - let mut founders = (2 .. x).map(founder::).collect::>(); + let mut founders = (0 .. x).map(founder::).collect::>(); + let mut proposer = founders[0].clone(); let mut fellows = (0 .. y).map(fellow::).collect::>(); let mut allies = (0 .. z).map(ally::).collect::>(); + let witness = ForceSetWitness{ + proposals: p, + voting_members: c, + ally_members: m, + }; + let mut old_fellows: Vec = Vec::new(); + let mut old_allies: Vec = Vec::new(); + + let mut cc = c; + if (m > 0 && c == 0) || (p > 0 && c == 0) { + // if total member count `m` greater than zero, + // one voting member required to set non voting members. + // OR + // if some proposals, + // one voting member required to create proposal. + cc = 1; + } - }: _(SystemOrigin::Root, founders.clone(), fellows.clone(), allies.clone()) + // setting the Alliance to disband on the benchmark call + if cc > 0 { + old_fellows = (0..cc).map(fellow::).collect::>(); + old_allies = (0..m).map(ally::).collect::>(); + + // used later for proposal creation. + proposer = old_fellows[0].clone(); + + // set alliance before benchmarked call to include alliance disband. + Alliance::::force_set_members( + SystemOrigin::Root.into(), + vec![old_fellows[0].clone()], + vec![], + vec![], + Default::default(), + )?; + + // using `join_alliance` instead `force_set_members` to join alliance + // to have deposit reserved and bench the worst case scenario. + for fellow in old_fellows.iter().skip(1) { + Alliance::::join_alliance( + SystemOrigin::Signed(fellow.clone()).into() + ).unwrap(); + } + + // elevating allies to have desired voting members count. + for fellow in old_fellows.iter().skip(1) { + Alliance::::elevate_ally( + T::MembershipManager::successful_origin(), + T::Lookup::unlookup(fellow.clone()) + ).unwrap(); + } + + for ally in old_allies.iter() { + Alliance::::join_alliance( + SystemOrigin::Signed(ally.clone()).into() + ).unwrap(); + } + + assert_eq!(Alliance::::votable_members_count(), cc); + assert_eq!(Alliance::::ally_members_count(), m); + } + + // adding proposals to veto on the Alliance reset + for i in 0..p { + let threshold = cc; + let bytes_in_storage = i + size_of::() as u32 + 32; + // proposals should be different so that different proposal hashes are generated + let proposal: T::Proposal = + AllianceCall::::set_rule { rule: rule(vec![i as u8; i as usize]) }.into(); + Alliance::::propose( + SystemOrigin::Signed(proposer.clone()).into(), + threshold, + Box::new(proposal), + bytes_in_storage, + )?; + } + let mut proposals = T::ProposalProvider::proposals(); + if c != cc { + // removing a helper founder from the alliance which should not be + // included in the actual benchmark call. + Alliance::::give_retirement_notice( + SystemOrigin::Signed(proposer.clone()).into() + )?; + System::::set_block_number( + System::::block_number() + T::RetirementPeriod::get() + ); + Alliance::::retire( + SystemOrigin::Signed(proposer.clone()).into() + )?; + // remove a helper founder from fellows list. + old_fellows.remove(0); + } + }: _(SystemOrigin::Root, founders.clone(), fellows.clone(), allies.clone(), witness) verify { founders.sort(); fellows.sort(); allies.sort(); + if !witness.is_zero() { + old_fellows.append(&mut old_allies); + old_fellows.sort(); + proposals.sort(); + assert_prev_event::(Event::AllianceDisbanded { + members: old_fellows, + proposals: proposals, + }.into()); + } assert_last_event::(Event::MembersInitialized { founders: founders.clone(), fellows: fellows.clone(), diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index bc0a119cd54a6..99cb46c9ef58d 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -84,7 +84,7 @@ //! //! #### Root Calls //! -//! - `init_founders` - Initialize the founding members. +//! - `force_set_members` - Set the members via chain governance. #![cfg_attr(not(feature = "std"), no_std)] @@ -172,6 +172,8 @@ impl IdentityVerifier for () { /// The provider of a collective action interface, for example an instance of `pallet-collective`. pub trait ProposalProvider { + /// Add a new proposal. + /// Returns a proposal length and active proposals count if successful. fn propose_proposal( who: AccountId, threshold: u32, @@ -179,6 +181,8 @@ pub trait ProposalProvider { length_bound: u32, ) -> Result<(u32, u32), DispatchError>; + /// Add an aye or nay vote for the sender to the given proposal. + /// Returns true if the sender votes first time if successful. fn vote_proposal( who: AccountId, proposal: Hash, @@ -186,8 +190,11 @@ pub trait ProposalProvider { approve: bool, ) -> Result; + /// Veto a proposal, closing and removing it from the system, regardless of its current state. + /// Returns an active proposals count, which includes removed proposal. fn veto_proposal(proposal_hash: Hash) -> u32; + /// Close a proposal that is either approved, disapproved, or whose voting period has ended. fn close_proposal( proposal_hash: Hash, index: ProposalIndex, @@ -195,7 +202,16 @@ pub trait ProposalProvider { length_bound: u32, ) -> DispatchResultWithPostInfo; + /// Return a proposal of the given hash. fn proposal_of(proposal_hash: Hash) -> Option; + + /// Return hashes of all active proposals. + fn proposals() -> Vec; + + // Return count of all active proposals. + // + // Used to check witness data for an extrinsic. + fn proposals_count() -> u32; } /// The various roles that a member can hold. @@ -324,10 +340,10 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The founders/fellows/allies have already been initialized. - AllianceAlreadyInitialized, /// The Alliance has not been initialized yet, therefore accounts cannot join it. AllianceNotYetInitialized, + /// The Alliance has been initialized, therefore cannot be initialized again. + AllianceAlreadyInitialized, /// Account is already a member. AlreadyMember, /// Account is not a member. @@ -367,12 +383,16 @@ pub mod pallet { TooManyMembers, /// Number of announcements exceeds `MaxAnnouncementsCount`. TooManyAnnouncements, + /// Invalid witness data given. + BadWitness, /// Account already gave retirement notice AlreadyRetiring, /// Account did not give a retirement notice required to retire. RetirementNoticeNotGiven, /// Retirement period has not passed. RetirementPeriodNotPassed, + /// Founders must be provided to initialize the Alliance. + FoundersMissing, } #[pallet::event] @@ -408,6 +428,8 @@ pub mod pallet { UnscrupulousItemAdded { items: Vec> }, /// Accounts or websites have been removed from the list of unscrupulous items. UnscrupulousItemRemoved { items: Vec> }, + /// Alliance disbanded. + AllianceDisbanded { members: Vec, proposals: Vec }, } #[pallet::genesis_config] @@ -451,15 +473,22 @@ pub mod pallet { !Pallet::::has_member(MemberRole::Fellow), "Fellows are already initialized!" ); + assert!( + !self.founders.is_empty(), + "Founders must be provided to initialize the Alliance" + ); let members: BoundedVec = self.fellows.clone().try_into().expect("Too many genesis fellows"); Members::::insert(MemberRole::Fellow, members); } if !self.allies.is_empty() { - // Only allow Allies if the Alliance is "initialized". assert!( - Pallet::::is_initialized(), - "Alliance must have Founders or Fellows to have Allies" + !Pallet::::has_member(MemberRole::Ally), + "Allies are already initialized!" + ); + assert!( + !self.founders.is_empty(), + "Founders must be provided to initialize the Alliance" ); let members: BoundedVec = self.allies.clone().try_into().expect("Too many genesis allies"); @@ -619,24 +648,80 @@ pub mod pallet { } /// Initialize the founders, fellows, and allies. + /// Founders must be provided to initialize the Alliance. + /// + /// Provide witness data to disband current Alliance before initializing new. + /// Alliance must be empty or disband first to initialize new. /// - /// This should only be called once, and must be called by the Root origin. - #[pallet::weight(T::WeightInfo::init_members( + /// Alliance is only disbanded if new member set is not provided. + /// + /// Must be called by the Root origin. + #[pallet::weight(T::WeightInfo::force_set_members( T::MaxFounders::get(), T::MaxFellows::get(), - T::MaxAllies::get() + T::MaxAllies::get(), + witness.proposals, + witness.voting_members, + witness.ally_members, ))] - pub fn init_members( + pub fn force_set_members( origin: OriginFor, founders: Vec, fellows: Vec, allies: Vec, + witness: ForceSetWitness, ) -> DispatchResult { ensure_root(origin)?; - // Cannot be called if the Alliance already has Founders or Fellows. - // TODO: Remove check and allow Root to set members at any time. - // https://github.com/paritytech/substrate/issues/11928 + if !witness.is_zero() { + // Disband Alliance by removing all members and returning deposits. + // Veto and remove all active proposals to avoid any unexpected behavior from + // actionable items managed outside of the pallet. Items managed within the pallet, + // like `UnscrupulousWebsites`, are left for the new Alliance to clean up or keep. + + ensure!( + T::ProposalProvider::proposals_count() <= witness.proposals, + Error::::BadWitness + ); + ensure!( + Self::votable_members_count() <= witness.voting_members, + Error::::BadWitness + ); + ensure!( + Self::ally_members_count() <= witness.ally_members, + Error::::BadWitness + ); + + let mut proposals = T::ProposalProvider::proposals(); + for hash in proposals.iter() { + T::ProposalProvider::veto_proposal(*hash); + } + + let mut members = Self::votable_members(); + T::MembershipChanged::change_members_sorted(&[], &members, &[]); + + members.append(&mut Self::members_of(MemberRole::Ally)); + for member in members.iter() { + if let Some(deposit) = DepositOf::::take(&member) { + let err_amount = T::Currency::unreserve(&member, deposit); + debug_assert!(err_amount.is_zero()); + } + } + + Members::::remove(&MemberRole::Founder); + Members::::remove(&MemberRole::Fellow); + Members::::remove(&MemberRole::Ally); + + members.sort(); + proposals.sort(); + Self::deposit_event(Event::AllianceDisbanded { members, proposals }); + } + + if founders.is_empty() { + ensure!(fellows.is_empty() && allies.is_empty(), Error::::FoundersMissing); + // new members set not provided. + return Ok(()) + } ensure!(!Self::is_initialized(), Error::::AllianceAlreadyInitialized); let mut founders: BoundedVec = @@ -665,9 +750,11 @@ pub mod pallet { T::InitializeMembers::initialize_members(&voteable_members); log::debug!( - target: "runtime::alliance", + target: LOG_TARGET, "Initialize alliance founders: {:?}, fellows: {:?}, allies: {:?}", - founders, fellows, allies + founders, + fellows, + allies ); Self::deposit_event(Event::MembersInitialized { @@ -913,7 +1000,9 @@ pub mod pallet { impl, I: 'static> Pallet { /// Check if the Alliance has been initialized. fn is_initialized() -> bool { - Self::has_member(MemberRole::Founder) || Self::has_member(MemberRole::Fellow) + Self::has_member(MemberRole::Founder) || + Self::has_member(MemberRole::Fellow) || + Self::has_member(MemberRole::Ally) } /// Check if a given role has any members. @@ -957,13 +1046,36 @@ impl, I: 'static> Pallet { Self::is_founder(who) || Self::is_fellow(who) } + /// Count of ally members. + fn ally_members_count() -> u32 { + Members::::decode_len(MemberRole::Ally).unwrap_or(0) as u32 + } + + /// Count of all members who have voting rights. + fn votable_members_count() -> u32 { + Members::::decode_len(MemberRole::Founder) + .unwrap_or(0) + .saturating_add(Members::::decode_len(MemberRole::Fellow).unwrap_or(0)) as u32 + } + + /// Get all members of a given role. + fn members_of(role: MemberRole) -> Vec { + Members::::get(role).into_inner() + } + /// Collect all members who have voting rights into one list. - fn votable_members_sorted() -> Vec { - let mut founders = Members::::get(MemberRole::Founder).into_inner(); - let mut fellows = Members::::get(MemberRole::Fellow).into_inner(); + fn votable_members() -> Vec { + let mut founders = Self::members_of(MemberRole::Founder); + let mut fellows = Self::members_of(MemberRole::Fellow); founders.append(&mut fellows); - founders.sort(); - founders.into() + founders + } + + /// Collect all members who have voting rights into one sorted list. + fn votable_members_sorted() -> Vec { + let mut members = Self::votable_members(); + members.sort(); + members } /// Add a user to the sorted alliance member set. diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index adc313e28ed7e..f2632ec3db08c 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -18,6 +18,7 @@ //! Test utilities pub use sp_core::H256; +use sp_runtime::traits::Hash; pub use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, @@ -196,6 +197,14 @@ impl ProposalProvider for AllianceProposalProvider { fn proposal_of(proposal_hash: H256) -> Option { AllianceMotion::proposal_of(proposal_hash) } + + fn proposals() -> Vec { + AllianceMotion::proposals().into_inner() + } + + fn proposals_count() -> u32 { + pallet_collective::Proposals::::decode_len().unwrap_or(0) as u32 + } } parameter_types! { @@ -254,7 +263,17 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![(1, 50), (2, 50), (3, 50), (4, 50), (5, 30), (6, 50), (7, 50)], + balances: vec![ + (1, 50), + (2, 50), + (3, 50), + (4, 50), + (5, 30), + (6, 50), + (7, 50), + (8, 50), + (9, 50), + ], } .assimilate_storage(&mut t) .unwrap(); @@ -296,6 +315,10 @@ pub fn new_test_ext() -> sp_io::TestExternalities { assert_ok!(Identity::set_identity(Origin::signed(5), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 5, Judgement::KnownGood)); assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(Origin::signed(8), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 8, Judgement::KnownGood)); + assert_ok!(Identity::set_identity(Origin::signed(9), Box::new(info.clone()))); + assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 9, Judgement::KnownGood)); // Joining before init should fail. assert_noop!( @@ -303,7 +326,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { Error::::AllianceNotYetInitialized ); - assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); + assert_ok!(Alliance::force_set_members( + Origin::root(), + vec![1, 2], + vec![3], + vec![], + Default::default() + )); System::set_block_number(1); }); @@ -323,10 +352,25 @@ pub fn test_cid() -> Cid { Cid::new_v0(&*result) } -pub fn make_proposal(value: u64) -> Call { - Call::System(frame_system::Call::remark { remark: value.encode() }) +pub fn make_remark_proposal(value: u64) -> (Call, u32, H256) { + make_proposal(Call::System(frame_system::Call::remark { remark: value.encode() })) +} + +pub fn make_set_rule_proposal(rule: Cid) -> (Call, u32, H256) { + make_proposal(Call::Alliance(pallet_alliance::Call::set_rule { rule })) +} + +pub fn make_kick_member_proposal(who: u64) -> (Call, u32, H256) { + make_proposal(Call::Alliance(pallet_alliance::Call::kick_member { who })) +} + +pub fn make_proposal(proposal: Call) -> (Call, u32, H256) { + let len: u32 = proposal.using_encoded(|p| p.len() as u32); + let hash = BlakeTwo256::hash_of(&proposal); + (proposal, len, hash) } -pub fn make_set_rule_proposal(rule: Cid) -> Call { - Call::Alliance(pallet_alliance::Call::set_rule { rule }) +pub fn assert_prev_event(event: Event) { + let events = System::events(); + assert_eq!(events.get(events.len() - 2).expect("events expected").event, event); } diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 918cfa840c3f0..3bd82a8870efe 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -17,9 +17,7 @@ //! Tests for the alliance pallet. -use sp_runtime::traits::Hash; - -use frame_support::{assert_noop, assert_ok, error::BadOrigin, Hashable}; +use frame_support::{assert_noop, assert_ok, error::BadOrigin}; use frame_system::{EventRecord, Phase}; use super::*; @@ -27,12 +25,156 @@ use crate::mock::*; type AllianceMotionEvent = pallet_collective::Event; +#[test] +fn force_set_members_works() { + new_test_ext().execute_with(|| { + // ensure alliance is set + assert_eq!(Alliance::votable_members_sorted(), vec![1, 2, 3]); + + // creating and proposing proposals + let (proposal, proposal_len, hash) = make_remark_proposal(42); + assert_ok!(Alliance::propose(Origin::signed(1), 3, Box::new(proposal), proposal_len)); + + let (k_proposal, k_proposal_len, k_hash) = make_kick_member_proposal(2); + assert_ok!(Alliance::propose(Origin::signed(1), 3, Box::new(k_proposal), k_proposal_len)); + let mut proposals = vec![hash, k_hash]; + + // give a retirement notice to check later a retiring member not removed + assert_ok!(Alliance::give_retirement_notice(Origin::signed(2))); + assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); + + // join alliance and reserve funds + assert_eq!(Balances::free_balance(9), 40); + assert_ok!(Alliance::join_alliance(Origin::signed(9))); + assert_eq!(Alliance::deposit_of(9), Some(25)); + assert_eq!(Balances::free_balance(9), 15); + assert!(Alliance::is_member_of(&9, MemberRole::Ally)); + + // ensure proposal is listed as active proposal + assert_eq!(::ProposalProvider::proposals(), proposals); + assert_eq!(::ProposalProvider::proposals_count(), 2); + + // fails without root + assert_noop!( + Alliance::force_set_members( + Origin::signed(1), + vec![], + vec![], + vec![], + Default::default() + ), + BadOrigin + ); + + // nothing to do, witness data is default, new members not provided. + assert_ok!(Alliance::force_set_members( + Origin::root(), + vec![], + vec![], + vec![], + Default::default() + )); + + // alliance must be reset first, no witness data. + assert_noop!( + Alliance::force_set_members( + Origin::root(), + vec![8], + vec![], + vec![], + Default::default() + ), + Error::::AllianceAlreadyInitialized, + ); + + // wrong witness data checks + assert_noop!( + Alliance::force_set_members( + Origin::root(), + vec![], + vec![], + vec![], + ForceSetWitness::new(1, 3, 1) + ), + Error::::BadWitness, + ); + assert_noop!( + Alliance::force_set_members( + Origin::root(), + vec![], + vec![], + vec![], + ForceSetWitness::new(2, 1, 1) + ), + Error::::BadWitness, + ); + assert_noop!( + Alliance::force_set_members( + Origin::root(), + vec![], + vec![], + vec![], + ForceSetWitness::new(1, 3, 0) + ), + Error::::BadWitness, + ); + + // founders missing, other members given + assert_noop!( + Alliance::force_set_members( + Origin::root(), + vec![], + vec![4], + vec![2], + ForceSetWitness::new(2, 3, 1) + ), + Error::::FoundersMissing, + ); + + // success call + assert_ok!(Alliance::force_set_members( + Origin::root(), + vec![8, 5], + vec![4], + vec![2], + ForceSetWitness::new(2, 3, 1) + )); + + // assert new set of voting members + assert_eq!(Alliance::votable_members_sorted(), vec![4, 5, 8]); + // assert new ally member + assert!(Alliance::is_ally(&2)); + // assert a retiring member from previous Alliance not removed + assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); + // assert old alliance disband. + assert!(!Alliance::is_member(&1)); + assert!(!Alliance::is_member(&3)); + assert!(!Alliance::is_member(&9)); + // deposit unreserved + assert_eq!(Balances::free_balance(9), 40); + // all proposals are removed + assert_eq!(::ProposalProvider::proposals(), vec![]); + assert_eq!(::ProposalProvider::proposals_count(), 0); + + // assert events + proposals.sort(); + assert_prev_event(mock::Event::Alliance(crate::Event::AllianceDisbanded { + members: vec![1, 3, 9], + proposals, + })); + + System::assert_last_event(mock::Event::Alliance(crate::Event::MembersInitialized { + founders: vec![5, 8], + fellows: vec![4], + allies: vec![2], + })); + }) +} + #[test] fn propose_works() { new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); + let (proposal, proposal_len, hash) = make_remark_proposal(42); // only votable member can propose proposal, 4 is ally not have vote rights assert_noop!( @@ -67,9 +209,7 @@ fn propose_works() { #[test] fn vote_works() { new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); + let (proposal, proposal_len, hash) = make_remark_proposal(42); assert_ok!(Alliance::propose( Origin::signed(1), 3, @@ -103,9 +243,7 @@ fn vote_works() { #[test] fn veto_works() { new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); - let hash: H256 = proposal.blake2_256().into(); + let (proposal, proposal_len, hash) = make_remark_proposal(42); assert_ok!(Alliance::propose( Origin::signed(1), 3, @@ -119,9 +257,7 @@ fn veto_works() { ); let cid = test_cid(); - let vetoable_proposal = make_set_rule_proposal(cid); - let vetoable_proposal_len: u32 = vetoable_proposal.using_encoded(|p| p.len() as u32); - let vetoable_hash: H256 = vetoable_proposal.blake2_256().into(); + let (vetoable_proposal, vetoable_proposal_len, vetoable_hash) = make_set_rule_proposal(cid); assert_ok!(Alliance::propose( Origin::signed(1), 3, @@ -163,10 +299,8 @@ fn veto_works() { #[test] fn close_works() { new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let (proposal, proposal_len, hash) = make_remark_proposal(42); let proposal_weight = proposal.get_dispatch_info().weight; - let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Alliance::propose( Origin::signed(1), 3, @@ -452,9 +586,18 @@ fn retire_works() { fn assert_powerless(user: Origin) { //vote / veto with a valid propsal let cid = test_cid(); - let proposal = make_proposal(42); + let (proposal, _, _) = make_kick_member_proposal(42); - assert_noop!(Alliance::init_members(user.clone(), vec![], vec![], vec![]), BadOrigin); + assert_noop!( + Alliance::force_set_members( + user.clone(), + vec![], + vec![], + vec![], + ForceSetWitness { voting_members: 3, ..Default::default() } + ), + BadOrigin + ); assert_noop!(Alliance::set_rule(user.clone(), cid.clone()), BadOrigin); diff --git a/frame/alliance/src/types.rs b/frame/alliance/src/types.rs index 8fb0ae96fd02d..a7fa9f83e4cf3 100644 --- a/frame/alliance/src/types.rs +++ b/frame/alliance/src/types.rs @@ -93,3 +93,34 @@ impl Cid { } } } + +/// Witness data for the `force_set_members` call. +/// Relevant only if executed on an initialized alliance to reset it. +#[derive( + Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo, Default, +)] +pub struct ForceSetWitness { + /// Number of active proposals which will be vetoed and removed. + #[codec(compact)] + pub(super) proposals: u32, + /// Total number of voting members in the current alliance. + #[codec(compact)] + pub(super) voting_members: u32, + /// Total number of ally members in the current alliance. + #[codec(compact)] + pub(super) ally_members: u32, +} + +#[cfg(test)] +impl ForceSetWitness { + // Creates new ForceSetWitness. + pub(super) fn new(proposals: u32, voting_members: u32, ally_members: u32) -> Self { + Self { proposals, voting_members, ally_members } + } +} + +impl ForceSetWitness { + pub(super) fn is_zero(self) -> bool { + self == Self::default() + } +} diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 048bea3d1e5a4..8436b3ddbd530 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-08-26, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -53,7 +53,7 @@ pub trait WeightInfo { fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight; fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight; fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight; - fn init_members(x: u32, y: u32, z: u32, ) -> Weight; + fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight; fn set_rule() -> Weight; fn announce() -> Weight; fn remove_announcement() -> Weight; @@ -79,16 +79,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_575_000 as RefTimeWeight) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(37_864_000 as RefTimeWeight) + // Standard Error: 28_000 + .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -96,12 +94,10 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. - fn vote(x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(45_486_000 as RefTimeWeight) - // Standard Error: 29_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn vote(_x: u32, y: u32, ) -> Weight { + Weight::from_ref_time(46_813_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -111,9 +107,9 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_296_000 as RefTimeWeight) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + Weight::from_ref_time(35_316_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -126,13 +122,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(39_252_000 as RefTimeWeight) + Weight::from_ref_time(36_245_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -145,12 +141,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(50_357_000 as RefTimeWeight) + fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(48_088_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -164,11 +164,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(41_258_000 as RefTimeWeight) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + Weight::from_ref_time(43_374_000 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -182,45 +182,65 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(40_490_000 as RefTimeWeight) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(42_798_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } - // Storage: Alliance Members (r:2 w:3) - // Storage: AllianceMotion Members (r:1 w:1) - /// The range of component `x` is `[2, 10]`. + // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: Alliance Members (r:3 w:3) + // Storage: Alliance DepositOf (r:200 w:199) + // Storage: System Account (r:199 w:199) + // Storage: AllianceMotion Voting (r:0 w:100) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:100) + /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(35_186_000 as RefTimeWeight) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + /// The range of component `p` is `[0, 100]`. + /// The range of component `c` is `[0, 100]`. + /// The range of component `m` is `[0, 100]`. + fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 221_000 + .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_189_000 as RefTimeWeight) + Weight::from_ref_time(18_721_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_106_000 as RefTimeWeight) + Weight::from_ref_time(21_887_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(22_208_000 as RefTimeWeight) + Weight::from_ref_time(23_052_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -229,14 +249,14 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(53_771_000 as RefTimeWeight) + Weight::from_ref_time(54_504_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(41_912_000 as RefTimeWeight) + Weight::from_ref_time(42_601_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -245,7 +265,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(36_811_000 as RefTimeWeight) + Weight::from_ref_time(37_704_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -255,7 +275,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(41_079_000 as RefTimeWeight) + Weight::from_ref_time(40_859_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } @@ -264,7 +284,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(42_703_000 as RefTimeWeight) + Weight::from_ref_time(43_447_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -275,7 +295,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_370_000 as RefTimeWeight) + Weight::from_ref_time(61_718_000 as RefTimeWeight) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } @@ -284,11 +304,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(359_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -298,10 +318,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) - // Standard Error: 153_000 - .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) - // Standard Error: 59_000 - .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + // Standard Error: 145_000 + .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -318,16 +338,14 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_575_000 as RefTimeWeight) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(11_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(37_864_000 as RefTimeWeight) + // Standard Error: 28_000 + .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(90_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(216_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -335,12 +353,10 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. - fn vote(x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(45_486_000 as RefTimeWeight) - // Standard Error: 29_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn vote(_x: u32, y: u32, ) -> Weight { + Weight::from_ref_time(46_813_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(128_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -350,9 +366,9 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_296_000 as RefTimeWeight) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(171_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + Weight::from_ref_time(35_316_000 as RefTimeWeight) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -365,13 +381,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(39_252_000 as RefTimeWeight) + Weight::from_ref_time(36_245_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(75_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(170_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -384,12 +400,16 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(50_357_000 as RefTimeWeight) + fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(48_088_000 as RefTimeWeight) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(103_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(204_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -403,11 +423,11 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(41_258_000 as RefTimeWeight) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(111_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + Weight::from_ref_time(43_374_000 as RefTimeWeight) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -421,45 +441,65 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(40_490_000 as RefTimeWeight) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(42_798_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(195_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } - // Storage: Alliance Members (r:2 w:3) - // Storage: AllianceMotion Members (r:1 w:1) - /// The range of component `x` is `[2, 10]`. + // Storage: AllianceMotion Proposals (r:1 w:1) + // Storage: Alliance Members (r:3 w:3) + // Storage: Alliance DepositOf (r:200 w:199) + // Storage: System Account (r:199 w:199) + // Storage: AllianceMotion Voting (r:0 w:100) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) + // Storage: AllianceMotion ProposalOf (r:0 w:100) + /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(35_186_000 as RefTimeWeight) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(127_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + /// The range of component `p` is `[0, 100]`. + /// The range of component `c` is `[0, 100]`. + /// The range of component `m` is `[0, 100]`. + fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { + Weight::from_ref_time(0 as RefTimeWeight) + // Standard Error: 221_000 + .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + // Standard Error: 21_000 + .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_189_000 as RefTimeWeight) + Weight::from_ref_time(18_721_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_106_000 as RefTimeWeight) + Weight::from_ref_time(21_887_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(22_208_000 as RefTimeWeight) + Weight::from_ref_time(23_052_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -468,14 +508,14 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(53_771_000 as RefTimeWeight) + Weight::from_ref_time(54_504_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(41_912_000 as RefTimeWeight) + Weight::from_ref_time(42_601_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -484,7 +524,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(36_811_000 as RefTimeWeight) + Weight::from_ref_time(37_704_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -494,7 +534,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(41_079_000 as RefTimeWeight) + Weight::from_ref_time(40_859_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } @@ -503,7 +543,7 @@ impl WeightInfo for () { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(42_703_000 as RefTimeWeight) + Weight::from_ref_time(43_447_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -514,7 +554,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_370_000 as RefTimeWeight) + Weight::from_ref_time(61_718_000 as RefTimeWeight) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } @@ -523,11 +563,11 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(359_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_385_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(119_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -537,10 +577,10 @@ impl WeightInfo for () { /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) - // Standard Error: 153_000 - .saturating_add(Weight::from_ref_time(21_484_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) - // Standard Error: 59_000 - .saturating_add(Weight::from_ref_time(3_772_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + // Standard Error: 145_000 + .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } From 324a18e3c5cbf333672c54f9367f530ea976928d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 1 Sep 2022 11:33:22 +0100 Subject: [PATCH 080/348] Runtime State Test + Integration with `try-runtime` (#10174) * add missing version to dependencies * Huh * add features more * more fixing * last touches * it all finally works * remove some feature gates * remove unused * fix old macro * make it work again * fmt * remove unused import * ".git/.scripts/fmt.sh" 1 * Cleanup more * fix and rename everything * a few clippy fixes * Add try-runtime feature Signed-off-by: Oliver Tale-Yazdi * small fixes * fmt * Update bin/node-template/runtime/src/lib.rs * fix build * Update utils/frame/try-runtime/cli/src/lib.rs Co-authored-by: David * Update utils/frame/try-runtime/cli/src/commands/execute_block.rs Co-authored-by: David * address all review comments * fix typos * revert spec change * last touches * update docs * fmt * remove some debug_assertions * fmt Signed-off-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: David --- Cargo.lock | 3 + bin/node-template/runtime/Cargo.toml | 4 +- bin/node-template/runtime/src/lib.rs | 10 +- bin/node/runtime/Cargo.toml | 20 +- bin/node/runtime/src/lib.rs | 17 +- frame/bags-list/fuzzer/src/main.rs | 2 +- frame/bags-list/remote-tests/src/lib.rs | 2 +- .../src/{sanity_check.rs => try_state.rs} | 2 +- frame/bags-list/src/lib.rs | 15 +- frame/bags-list/src/list/mod.rs | 43 ++--- frame/bags-list/src/list/tests.rs | 26 +-- frame/bags-list/src/mock.rs | 2 +- frame/beefy-mmr/Cargo.toml | 1 + frame/beefy/Cargo.toml | 1 + frame/election-provider-support/src/lib.rs | 4 +- frame/executive/Cargo.toml | 3 +- frame/executive/src/lib.rs | 77 ++++++-- frame/nomination-pools/Cargo.toml | 2 +- .../nomination-pools/benchmarking/src/lib.rs | 4 +- frame/nomination-pools/src/lib.rs | 16 +- frame/nomination-pools/src/mock.rs | 2 +- frame/remark/Cargo.toml | 1 + frame/staking/src/migrations.rs | 4 +- frame/staking/src/pallet/impls.rs | 107 ++++++++++- frame/staking/src/pallet/mod.rs | 6 +- .../procedural/src/pallet/expand/hooks.rs | 32 +++- frame/support/src/dispatch.rs | 36 ++++ frame/support/src/traits.rs | 7 +- frame/support/src/traits/hooks.rs | 55 ++---- frame/support/src/traits/try_runtime.rs | 174 ++++++++++++++++++ frame/system/src/lib.rs | 5 +- frame/transaction-storage/Cargo.toml | 1 + frame/try-runtime/Cargo.toml | 4 +- frame/try-runtime/src/lib.rs | 3 +- utils/frame/remote-externalities/src/lib.rs | 2 +- utils/frame/try-runtime/cli/Cargo.toml | 1 + .../cli/src/commands/execute_block.rs | 43 ++++- .../cli/src/commands/follow_chain.rs | 22 ++- utils/frame/try-runtime/cli/src/lib.rs | 41 +++-- 39 files changed, 621 insertions(+), 179 deletions(-) rename frame/bags-list/remote-tests/src/{sanity_check.rs => try_state.rs} (96%) create mode 100644 frame/support/src/traits/try_runtime.rs diff --git a/Cargo.lock b/Cargo.lock index b0d0206a35300..ae225184c5b28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2228,6 +2228,7 @@ version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", + "frame-try-runtime", "hex-literal", "pallet-balances", "pallet-transaction-payment", @@ -2414,6 +2415,7 @@ name = "frame-try-runtime" version = "0.10.0-dev" dependencies = [ "frame-support", + "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", @@ -11103,6 +11105,7 @@ name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ "clap 3.1.18", + "frame-try-runtime", "jsonrpsee", "log", "parity-scale-codec", diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 734ed089aa4bd..bbe3e8eef7d3c 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -63,6 +63,7 @@ std = [ "frame-support/std", "frame-system-rpc-runtime-api/std", "frame-system/std", + "frame-try-runtime/std", "pallet-aura/std", "pallet-balances/std", "pallet-grandpa/std", @@ -97,9 +98,10 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-executive/try-runtime", "frame-try-runtime", + "frame-executive/try-runtime", "frame-system/try-runtime", + "frame-support/try-runtime", "pallet-aura/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index b43fbde52dcdc..7280336cada3d 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -545,8 +545,14 @@ impl_runtime_apis! { (weight, BlockWeights::get().max_block) } - fn execute_block_no_check(block: Block) -> Weight { - Executive::execute_block_no_check(block) + fn execute_block( + block: Block, + state_root_check: bool, + select: frame_try_runtime::TryStateSelect + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, select).expect("execute-block failed") } } } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 10b15b6ec554d..ebec9b8b755f6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -245,14 +245,16 @@ runtime-benchmarks = [ "hex-literal", ] try-runtime = [ - "frame-executive/try-runtime", "frame-try-runtime", + "frame-executive/try-runtime", "frame-system/try-runtime", + "frame-support/try-runtime", "pallet-alliance/try-runtime", "pallet-assets/try-runtime", "pallet-authority-discovery/try-runtime", "pallet-authorship/try-runtime", "pallet-babe/try-runtime", + "pallet-bags-list/try-runtime", "pallet-balances/try-runtime", "pallet-bounties/try-runtime", "pallet-child-bounties/try-runtime", @@ -264,32 +266,36 @@ try-runtime = [ "pallet-elections-phragmen/try-runtime", "pallet-gilt/try-runtime", "pallet-grandpa/try-runtime", - "pallet-identity/try-runtime", "pallet-im-online/try-runtime", "pallet-indices/try-runtime", + "pallet-identity/try-runtime", "pallet-lottery/try-runtime", "pallet-membership/try-runtime", "pallet-mmr/try-runtime", "pallet-multisig/try-runtime", + "pallet-nomination-pools/try-runtime", "pallet-offences/try-runtime", "pallet-preimage/try-runtime", "pallet-proxy/try-runtime", - "pallet-ranked-collective/try-runtime", "pallet-randomness-collective-flip/try-runtime", + "pallet-ranked-collective/try-runtime", "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", - "pallet-scheduler/try-runtime", + "pallet-remark/try-runtime", "pallet-session/try-runtime", - "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-society/try-runtime", "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", "pallet-tips/try-runtime", - "pallet-transaction-payment/try-runtime", "pallet-treasury/try-runtime", - "pallet-uniques/try-runtime", "pallet-utility/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-asset-tx-payment/try-runtime", + "pallet-transaction-storage/try-runtime", + "pallet-uniques/try-runtime", "pallet-vesting/try-runtime", "pallet-whitelist/try-runtime", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5d4a21cea09b2..8ef7e7852ad02 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2079,8 +2079,21 @@ impl_runtime_apis! { (weight, RuntimeBlockWeights::get().max_block) } - fn execute_block_no_check(block: Block) -> Weight { - Executive::execute_block_no_check(block) + fn execute_block( + block: Block, + state_root_check: bool, + select: frame_try_runtime::TryStateSelect + ) -> Weight { + log::info!( + target: "node-runtime", + "try-runtime: executing block {:?} / root checks: {:?} / try-state-select: {:?}", + block.header.hash(), + state_root_check, + select, + ); + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, select).unwrap() } } diff --git a/frame/bags-list/fuzzer/src/main.rs b/frame/bags-list/fuzzer/src/main.rs index c17fbe0a2f77f..9f7ca464cc2b8 100644 --- a/frame/bags-list/fuzzer/src/main.rs +++ b/frame/bags-list/fuzzer/src/main.rs @@ -88,7 +88,7 @@ fn main() { }, } - assert!(BagsList::sanity_check().is_ok()); + assert!(BagsList::try_state().is_ok()); }) }); } diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index 458064cf79f57..927c3dc91cb58 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -24,8 +24,8 @@ use sp_std::prelude::*; pub const LOG_TARGET: &str = "runtime::bags-list::remote-tests"; pub mod migration; -pub mod sanity_check; pub mod snapshot; +pub mod try_state; /// A wrapper for a runtime that the functions of this crate expect. /// diff --git a/frame/bags-list/remote-tests/src/sanity_check.rs b/frame/bags-list/remote-tests/src/try_state.rs similarity index 96% rename from frame/bags-list/remote-tests/src/sanity_check.rs rename to frame/bags-list/remote-tests/src/try_state.rs index 1027efb8539ee..11278c20eb8ed 100644 --- a/frame/bags-list/remote-tests/src/sanity_check.rs +++ b/frame/bags-list/remote-tests/src/try_state.rs @@ -44,7 +44,7 @@ pub async fn execute ext.execute_with(|| { sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); - pallet_bags_list::Pallet::::sanity_check().unwrap(); + pallet_bags_list::Pallet::::try_state().unwrap(); log::info!(target: crate::LOG_TARGET, "executed bags-list sanity check with no errors."); crate::display_and_check_bags::(currency_unit, currency_name); diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 5163a579c6f43..70f8b2a32f817 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -263,6 +263,11 @@ pub mod pallet { "thresholds must strictly increase, and have no duplicates", ); } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), &'static str> { + >::try_state() + } } } @@ -340,14 +345,8 @@ impl, I: 'static> SortedListProvider for Pallet List::::unsafe_regenerate(all, score_of) } - #[cfg(feature = "std")] - fn sanity_check() -> Result<(), &'static str> { - List::::sanity_check() - } - - #[cfg(not(feature = "std"))] - fn sanity_check() -> Result<(), &'static str> { - Ok(()) + fn try_state() -> Result<(), &'static str> { + List::::try_state() } fn unsafe_clear() { diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index b4f852685842d..cfa7daf198937 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -28,8 +28,8 @@ use crate::Config; use codec::{Decode, Encode, MaxEncodedLen}; use frame_election_provider_support::ScoreProvider; use frame_support::{ - ensure, - traits::{Defensive, Get}, + defensive, ensure, + traits::{Defensive, DefensiveOption, Get}, DefaultNoBound, PalletError, }; use scale_info::TypeInfo; @@ -220,7 +220,8 @@ impl, I: 'static> List { crate::ListBags::::remove(removed_bag); } - debug_assert_eq!(Self::sanity_check(), Ok(())); + #[cfg(feature = "std")] + debug_assert_eq!(Self::try_state(), Ok(())); num_affected } @@ -325,8 +326,7 @@ impl, I: 'static> List { crate::log!( debug, - "inserted {:?} with score {:? - } into bag {:?}, new count is {}", + "inserted {:?} with score {:?} into bag {:?}, new count is {}", id, score, bag_score, @@ -457,11 +457,8 @@ impl, I: 'static> List { // re-fetch `lighter_node` from storage since it may have been updated when `heavier_node` // was removed. - let lighter_node = Node::::get(lighter_id).ok_or_else(|| { - debug_assert!(false, "id that should exist cannot be found"); - crate::log!(warn, "id that should exist cannot be found"); - ListError::NodeNotFound - })?; + let lighter_node = + Node::::get(lighter_id).defensive_ok_or_else(|| ListError::NodeNotFound)?; // insert `heavier_node` directly in front of `lighter_node`. This will update both nodes // in storage and update the node counter. @@ -508,7 +505,7 @@ impl, I: 'static> List { node.put(); } - /// Sanity check the list. + /// Check the internal state of the list. /// /// This should be called from the call-site, whenever one of the mutating apis (e.g. `insert`) /// is being used, after all other staking data (such as counter) has been updated. It checks: @@ -517,8 +514,7 @@ impl, I: 'static> List { /// * length of this list is in sync with `ListNodes::count()`, /// * and sanity-checks all bags and nodes. This will cascade down all the checks and makes sure /// all bags and nodes are checked per *any* update to `List`. - #[cfg(feature = "std")] - pub(crate) fn sanity_check() -> Result<(), &'static str> { + pub(crate) fn try_state() -> Result<(), &'static str> { let mut seen_in_list = BTreeSet::new(); ensure!( Self::iter().map(|node| node.id).all(|id| seen_in_list.insert(id)), @@ -546,7 +542,7 @@ impl, I: 'static> List { thresholds.into_iter().filter_map(|t| Bag::::get(t)) }; - let _ = active_bags.clone().try_for_each(|b| b.sanity_check())?; + let _ = active_bags.clone().try_for_each(|b| b.try_state())?; let nodes_in_bags_count = active_bags.clone().fold(0u32, |acc, cur| acc + cur.iter().count() as u32); @@ -557,17 +553,12 @@ impl, I: 'static> List { // check that all nodes are sane. We check the `ListNodes` storage item directly in case we // have some "stale" nodes that are not in a bag. for (_id, node) in crate::ListNodes::::iter() { - node.sanity_check()? + node.try_state()? } Ok(()) } - #[cfg(not(feature = "std"))] - pub(crate) fn sanity_check() -> Result<(), &'static str> { - Ok(()) - } - /// Returns the nodes of all non-empty bags. For testing and benchmarks. #[cfg(any(feature = "std", feature = "runtime-benchmarks"))] #[allow(dead_code)] @@ -701,8 +692,7 @@ impl, I: 'static> Bag { if *tail == node.id { // this should never happen, but this check prevents one path to a worst case // infinite loop. - debug_assert!(false, "system logic error: inserting a node who has the id of tail"); - crate::log!(warn, "system logic error: inserting a node who has the id of tail"); + defensive!("system logic error: inserting a node who has the id of tail"); return }; } @@ -753,7 +743,7 @@ impl, I: 'static> Bag { } } - /// Sanity check this bag. + /// Check the internal state of the bag. /// /// Should be called by the call-site, after any mutating operation on a bag. The call site of /// this struct is always `List`. @@ -761,8 +751,7 @@ impl, I: 'static> Bag { /// * Ensures head has no prev. /// * Ensures tail has no next. /// * Ensures there are no loops, traversal from head to tail is correct. - #[cfg(feature = "std")] - fn sanity_check(&self) -> Result<(), &'static str> { + fn try_state(&self) -> Result<(), &'static str> { frame_support::ensure!( self.head() .map(|head| head.prev().is_none()) @@ -801,7 +790,6 @@ impl, I: 'static> Bag { } /// Check if the bag contains a node with `id`. - #[cfg(feature = "std")] fn contains(&self, id: &T::AccountId) -> bool { self.iter().any(|n| n.id() == id) } @@ -906,8 +894,7 @@ impl, I: 'static> Node { self.bag_upper } - #[cfg(feature = "std")] - fn sanity_check(&self) -> Result<(), &'static str> { + fn try_state(&self) -> Result<(), &'static str> { let expected_bag = Bag::::get(self.bag_upper).ok_or("bag not found for node")?; let id = self.id(); diff --git a/frame/bags-list/src/list/tests.rs b/frame/bags-list/src/list/tests.rs index 9bdd54289fd88..966ea1a74c71c 100644 --- a/frame/bags-list/src/list/tests.rs +++ b/frame/bags-list/src/list/tests.rs @@ -350,15 +350,15 @@ mod list { } #[test] - fn sanity_check_works() { + fn try_state_works() { ExtBuilder::default().build_and_execute_no_post_check(|| { - assert_ok!(List::::sanity_check()); + assert_ok!(List::::try_state()); }); // make sure there are no duplicates. ExtBuilder::default().build_and_execute_no_post_check(|| { Bag::::get(10).unwrap().insert_unchecked(2, 10); - assert_eq!(List::::sanity_check(), Err("duplicate identified")); + assert_eq!(List::::try_state(), Err("duplicate identified")); }); // ensure count is in sync with `ListNodes::count()`. @@ -372,7 +372,7 @@ mod list { CounterForListNodes::::mutate(|counter| *counter += 1); assert_eq!(crate::ListNodes::::count(), 5); - assert_eq!(List::::sanity_check(), Err("iter_count != stored_count")); + assert_eq!(List::::try_state(), Err("iter_count != stored_count")); }); } @@ -804,7 +804,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![2, 3, 13, 14]); - assert_ok!(bag_1000.sanity_check()); + assert_ok!(bag_1000.try_state()); // and the node isn't mutated when its removed assert_eq!(node_4, node_4_pre_remove); @@ -814,7 +814,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13, 14]); - assert_ok!(bag_1000.sanity_check()); + assert_ok!(bag_1000.try_state()); // when removing a tail that is not pointing at the head let node_14 = Node::::get(&14).unwrap(); @@ -822,7 +822,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3, 13]); - assert_ok!(bag_1000.sanity_check()); + assert_ok!(bag_1000.try_state()); // when removing a tail that is pointing at the head let node_13 = Node::::get(&13).unwrap(); @@ -830,7 +830,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_1000), vec![3]); - assert_ok!(bag_1000.sanity_check()); + assert_ok!(bag_1000.try_state()); // when removing a node that is both the head & tail let node_3 = Node::::get(&3).unwrap(); @@ -846,7 +846,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![1, 12]); - assert_ok!(bag_10.sanity_check()); + assert_ok!(bag_10.try_state()); // when removing a head that is pointing at the tail let node_1 = Node::::get(&1).unwrap(); @@ -854,7 +854,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_10), vec![12]); - assert_ok!(bag_10.sanity_check()); + assert_ok!(bag_10.try_state()); // and since we updated the bag's head/tail, we need to write this storage so we // can correctly `get` it again in later checks bag_10.put(); @@ -865,7 +865,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 18, 19]); - assert_ok!(bag_2000.sanity_check()); + assert_ok!(bag_2000.try_state()); // when removing a node that is pointing at tail, but not head let node_18 = Node::::get(&18).unwrap(); @@ -873,7 +873,7 @@ mod bags { // then assert_eq!(bag_as_ids(&bag_2000), vec![15, 17, 19]); - assert_ok!(bag_2000.sanity_check()); + assert_ok!(bag_2000.try_state()); // finally, when reading from storage, the state of all bags is as expected assert_eq!( @@ -905,7 +905,7 @@ mod bags { // .. and the bag it was removed from let bag_1000 = Bag::::get(1_000).unwrap(); // is sane - assert_ok!(bag_1000.sanity_check()); + assert_ok!(bag_1000.try_state()); // and has the correct head and tail. assert_eq!(bag_1000.head, Some(3)); assert_eq!(bag_1000.tail, Some(4)); diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index 961bf2b83552f..3bc1b34657262 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -147,7 +147,7 @@ impl ExtBuilder { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); - List::::sanity_check().expect("Sanity check post condition failed") + List::::try_state().expect("Try-state post condition failed") }) } diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index 8da182f1c29fc..e8366943c85b0 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -50,3 +50,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml index eecce963d19f0..84aa8c7757c45 100644 --- a/frame/beefy/Cargo.toml +++ b/frame/beefy/Cargo.toml @@ -37,3 +37,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index eee865d0b737b..21a01ce1904ec 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -513,8 +513,8 @@ pub trait SortedListProvider { /// unbounded amount of storage accesses. fn unsafe_clear(); - /// Sanity check internal state of list. Only meant for debug compilation. - fn sanity_check() -> Result<(), &'static str>; + /// Check internal state of list. Only meant for debugging. + fn try_state() -> Result<(), &'static str>; /// If `who` changes by the returned amount they are guaranteed to have a worst case change /// in their list position. diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index b67f3313e612b..1ae22ddbb0260 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -19,6 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../try-runtime", optional = true } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } @@ -48,4 +49,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = ["frame-support/try-runtime", "frame-try-runtime" ] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 45361084f2f42..d7cd1da7910a4 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -202,6 +202,7 @@ where } } +#[cfg(feature = "try-runtime")] impl< System: frame_system::Config + EnsureInherentsAreFirst, Block: traits::Block

, @@ -211,7 +212,8 @@ impl< + OnInitialize + OnIdle + OnFinalize - + OffchainWorker, + + OffchainWorker + + frame_support::traits::TryState, COnRuntimeUpgrade: OnRuntimeUpgrade, > Executive where @@ -222,16 +224,20 @@ where OriginOf: From>, UnsignedValidator: ValidateUnsigned>, { - /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. - pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() - } - - /// Execute given block, but don't do any of the `final_checks`. + /// Execute given block, but don't as strict is the normal block execution. /// - /// Should only be used for testing. - #[cfg(feature = "try-runtime")] - pub fn execute_block_no_check(block: Block) -> frame_support::weights::Weight { + /// Some consensus related checks such as the state root check can be switched off via + /// `try_state_root`. Some additional non-consensus checks can be additionally enabled via + /// `try_state`. + /// + /// Should only be used for testing ONLY. + pub fn try_execute_block( + block: Block, + try_state_root: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Result { + use frame_support::traits::TryState; + Self::initialize_block(block.header()); Self::initial_checks(&block); @@ -239,7 +245,17 @@ where Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); - // do some of the checks that would normally happen in `final_checks`, but definitely skip + // run the try-state checks of all pallets. + >::try_state( + *header.number(), + select, + ) + .map_err(|e| { + frame_support::log::error!(target: "runtime::executive", "failure: {:?}", e); + e + })?; + + // do some of the checks that would normally happen in `final_checks`, but perhaps skip // the state root check. { let new_header = >::finalize(); @@ -249,27 +265,60 @@ where assert!(header_item == computed_item, "Digest item must match that calculated."); } + if try_state_root { + let storage_root = new_header.state_root(); + header.state_root().check_equal(storage_root); + assert!( + header.state_root() == storage_root, + "Storage root must match that calculated." + ); + } + assert!( header.extrinsics_root() == new_header.extrinsics_root(), "Transaction trie root must be valid.", ); } - frame_system::Pallet::::block_weight().total() + Ok(frame_system::Pallet::::block_weight().total()) } /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. /// /// This should only be used for testing. - #[cfg(feature = "try-runtime")] pub fn try_runtime_upgrade() -> Result { <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::pre_upgrade().unwrap(); let weight = Self::execute_on_runtime_upgrade(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::post_upgrade().unwrap(); Ok(weight) } +} + +impl< + System: frame_system::Config + EnsureInherentsAreFirst, + Block: traits::Block
, + Context: Default, + UnsignedValidator, + AllPalletsWithSystem: OnRuntimeUpgrade + + OnInitialize + + OnIdle + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, + > Executive +where + Block::Extrinsic: Checkable + Codec, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: + Dispatchable, + OriginOf: From>, + UnsignedValidator: ValidateUnsigned>, +{ + /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. + pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::on_runtime_upgrade() + } /// Start the execution of a particular block. pub fn initialize_block(header: &System::Header) { diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index be5c38d85552c..1d613511eacec 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -32,7 +32,7 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } [features] runtime-benchmarks = [] -try-runtime = [] +try-runtime = [ "frame-support/try-runtime" ] default = ["std"] std = [ "codec/std", diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index f4ecb4a9b0ff1..07d5c63493ef8 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -201,11 +201,11 @@ impl ListScenario { Pools::::join(Origin::Signed(joiner.clone()).into(), amount, 1).unwrap(); - // Sanity check that the vote weight is still the same as the original bonded + // check that the vote weight is still the same as the original bonded let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&self.origin1)).unwrap(), original_bonded); - // Sanity check the member was added correctly + // check the member was added correctly let member = PoolMembers::::get(&joiner).unwrap(); assert_eq!(member.points, amount); assert_eq!(member.pool_id, 1); diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 40db4ac64851a..3809a70440d27 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1110,7 +1110,7 @@ impl SubPools { } /// The sum of all unbonding balance, regardless of whether they are actually unlocked or not. - #[cfg(any(test, debug_assertions))] + #[cfg(any(feature = "try-runtime", test, debug_assertions))] fn sum_unbonding_balance(&self) -> BalanceOf { self.no_era.balance.saturating_add( self.with_era @@ -2138,6 +2138,11 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumberFor) -> Result<(), &'static str> { + Self::do_try_state(u8::MAX) + } + fn integrity_test() { assert!( T::MaxPointsToBalance::get() > 0, @@ -2389,9 +2394,9 @@ impl Pallet { /// /// To cater for tests that want to escape parts of these checks, this function is split into /// multiple `level`s, where the higher the level, the more checks we performs. So, - /// `sanity_check(255)` is the strongest sanity check, and `0` performs no checks. - #[cfg(any(test, debug_assertions))] - pub fn sanity_checks(level: u8) -> Result<(), &'static str> { + /// `try_state(255)` is the strongest sanity check, and `0` performs no checks. + #[cfg(any(feature = "try-runtime", test, debug_assertions))] + pub fn do_try_state(level: u8) -> Result<(), &'static str> { if level.is_zero() { return Ok(()) } @@ -2401,7 +2406,8 @@ impl Pallet { let reward_pools = RewardPools::::iter_keys().collect::>(); assert_eq!(bonded_pools, reward_pools); - assert!(Metadata::::iter_keys().all(|k| bonded_pools.contains(&k))); + // TODO: can't check this right now: https://github.com/paritytech/substrate/issues/12077 + // assert!(Metadata::::iter_keys().all(|k| bonded_pools.contains(&k))); assert!(SubPoolsStorage::::iter_keys().all(|k| bonded_pools.contains(&k))); assert!(MaxPools::::get().map_or(true, |max| bonded_pools.len() <= (max as usize))); diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 5138c55afccac..3bb260e56f180 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -304,7 +304,7 @@ impl ExtBuilder { pub fn build_and_execute(self, test: impl FnOnce() -> ()) { self.build().execute_with(|| { test(); - Pools::sanity_checks(CheckLevel::get()).unwrap(); + Pools::do_try_state(CheckLevel::get()).unwrap(); }) } } diff --git a/frame/remark/Cargo.toml b/frame/remark/Cargo.toml index 71a65ce554975..fe20365b7c904 100644 --- a/frame/remark/Cargo.toml +++ b/frame/remark/Cargo.toml @@ -41,3 +41,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] +try-runtime = [ "frame-support/try-runtime" ] diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 7e3bf6ccb93e1..ff01e1fe3b09b 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -153,7 +153,7 @@ pub mod v8 { Nominators::::iter().map(|(id, _)| id), Pallet::::weight_of_fn(), ); - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); + debug_assert_eq!(T::VoterList::try_state(), Ok(())); StorageVersion::::put(crate::Releases::V8_0_0); crate::log!( @@ -170,7 +170,7 @@ pub mod v8 { #[cfg(feature = "try-runtime")] pub fn post_migrate() -> Result<(), &'static str> { - T::VoterList::sanity_check().map_err(|_| "VoterList is not in a sane state.")?; + T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?; crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",); Ok(()) } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 2a55d3baea2e6..386c413132f6c 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -789,7 +789,6 @@ impl Pallet { Nominators::::count() + Validators::::count(), T::VoterList::count() ); - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } /// This function will remove a nominator from the `Nominators` storage map, @@ -809,7 +808,6 @@ impl Pallet { false }; - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); debug_assert_eq!( Nominators::::count() + Validators::::count(), T::VoterList::count() @@ -837,7 +835,6 @@ impl Pallet { Nominators::::count() + Validators::::count(), T::VoterList::count() ); - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } /// This function will remove a validator from the `Validators` storage map. @@ -856,7 +853,6 @@ impl Pallet { false }; - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); debug_assert_eq!( Nominators::::count() + Validators::::count(), T::VoterList::count() @@ -1369,7 +1365,7 @@ impl SortedListProvider for UseNominatorsAndValidatorsM // nothing to do upon regenerate. 0 } - fn sanity_check() -> Result<(), &'static str> { + fn try_state() -> Result<(), &'static str> { Ok(()) } @@ -1452,3 +1448,104 @@ impl StakingInterface for Pallet { Nominators::::get(who).map(|n| n.targets.into_inner()) } } + +#[cfg(feature = "try-runtime")] +impl Pallet { + pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { + T::VoterList::try_state()?; + Self::check_nominators()?; + Self::check_exposures()?; + Self::check_ledgers()?; + Self::check_count() + } + + fn check_count() -> Result<(), &'static str> { + ensure!( + ::VoterList::count() == + Nominators::::count() + Validators::::count(), + "wrong external count" + ); + Ok(()) + } + + fn check_ledgers() -> Result<(), &'static str> { + Bonded::::iter() + .map(|(_, ctrl)| Self::ensure_ledger_consistent(ctrl)) + .collect::>() + } + + fn check_exposures() -> Result<(), &'static str> { + // a check per validator to ensure the exposure struct is always sane. + let era = Self::active_era().unwrap().index; + ErasStakers::::iter_prefix_values(era) + .map(|expo| { + ensure!( + expo.total == + expo.own + + expo.others + .iter() + .map(|e| e.value) + .fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure.", + ); + Ok(()) + }) + .collect::>() + } + + fn check_nominators() -> Result<(), &'static str> { + // a check per nominator to ensure their entire stake is correctly distributed. Will only + // kick-in if the nomination was submitted before the current era. + let era = Self::active_era().unwrap().index; + >::iter() + .filter_map( + |(nominator, nomination)| { + if nomination.submitted_in > era { + Some(nominator) + } else { + None + } + }, + ) + .map(|nominator| { + // must be bonded. + Self::ensure_is_stash(&nominator)?; + let mut sum = BalanceOf::::zero(); + T::SessionInterface::validators() + .iter() + .map(|v| Self::eras_stakers(era, v)) + .map(|e| { + let individual = + e.others.iter().filter(|e| e.who == nominator).collect::>(); + let len = individual.len(); + match len { + 0 => { /* not supporting this validator at all. */ }, + 1 => sum += individual[0].value, + _ => return Err("nominator cannot back a validator more than once."), + }; + Ok(()) + }) + .collect::>() + }) + .collect::>() + } + + fn ensure_is_stash(who: &T::AccountId) -> Result<(), &'static str> { + ensure!(Self::bonded(who).is_some(), "Not a stash."); + Ok(()) + } + + fn ensure_ledger_consistent(ctrl: T::AccountId) -> Result<(), &'static str> { + // ensures ledger.total == ledger.active + sum(ledger.unlocking). + let ledger = Self::ledger(ctrl.clone()).ok_or("Not a controller.")?; + let real_total: BalanceOf = + ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); + ensure!(real_total == ledger.total, "ledger.total corrupt"); + + if !(ledger.active >= T::Currency::minimum_balance() || ledger.active.is_zero()) { + log!(warn, "ledger.active less than ED: {:?}, {:?}", ctrl, ledger) + } + + Ok(()) + } +} diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 1f11f0ff00ac1..74374cc586a23 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -744,6 +744,11 @@ pub mod pallet { ); } } + + #[cfg(feature = "try-runtime")] + fn try_state(n: BlockNumberFor) -> Result<(), &'static str> { + Self::do_try_state(n) + } } #[pallet::call] @@ -856,7 +861,6 @@ pub mod pallet { if T::VoterList::contains(&stash) { let _ = T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); - debug_assert_eq!(T::VoterList::sanity_check(), Ok(())); } Self::deposit_event(Event::::Bonded(stash, extra)); diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 7a1a94cf46d31..03878f3f186df 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -17,7 +17,6 @@ use crate::pallet::Def; -/// /// * implement the individual traits using the Hooks trait pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let (where_clause, span, has_runtime_upgrade) = match def.hooks.as_ref() { @@ -59,6 +58,19 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } }; + let log_try_state = quote::quote! { + let pallet_name = < + ::PalletInfo + as + #frame_support::traits::PalletInfo + >::name::().expect("Every active pallet has a name in the runtime; qed"); + #frame_support::log::debug!( + target: #frame_support::LOG_TARGET, + "🩺 try-state pallet {:?}", + pallet_name, + ); + }; + let hooks_impl = if def.hooks.is_none() { let frame_system = &def.frame_system; quote::quote! { @@ -191,5 +203,23 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { >::integrity_test() } } + + #[cfg(feature = "try-runtime")] + impl<#type_impl_gen> + #frame_support::traits::TryState<::BlockNumber> + for #pallet_ident<#type_use_gen> #where_clause + { + fn try_state( + n: ::BlockNumber, + _s: #frame_support::traits::TryStateSelect + ) -> Result<(), &'static str> { + #log_try_state + < + Self as #frame_support::traits::Hooks< + ::BlockNumber + > + >::try_state(n) + } + } ) } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 97312ac3476b5..94974888c1d5c 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1549,6 +1549,35 @@ macro_rules! decl_module { {} }; + (@impl_try_state_default + { $system:ident } + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + ) => { + #[cfg(feature = "try-runtime")] + impl<$trait_instance: $system::Config + $trait_name$(, $instance: $instantiable)?> + $crate::traits::TryState<<$trait_instance as $system::Config>::BlockNumber> + for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* + { + fn try_state( + _: <$trait_instance as $system::Config>::BlockNumber, + _: $crate::traits::TryStateSelect, + ) -> Result<(), &'static str> { + let pallet_name = << + $trait_instance + as + $system::Config + >::PalletInfo as $crate::traits::PalletInfo>::name::().unwrap_or(""); + $crate::log::debug!( + target: $crate::LOG_TARGET, + "⚠️ pallet {} cannot have try-state because it is using decl_module!", + pallet_name, + ); + Ok(()) + } + } + }; + (@impl_on_runtime_upgrade { $system:ident } $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; @@ -2026,6 +2055,13 @@ macro_rules! decl_module { $( $on_initialize )* } + $crate::decl_module! { + @impl_try_state_default + { $system } + $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; + { $( $other_where_bounds )* } + } + $crate::decl_module! { @impl_on_runtime_upgrade { $system } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 16504beb16907..d4f0e73557c77 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -84,8 +84,6 @@ pub use hooks::{ Hooks, IntegrityTest, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, }; -#[cfg(feature = "try-runtime")] -pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; pub mod schedule; mod storage; @@ -106,3 +104,8 @@ pub use voting::{ ClassCountOf, CurrencyToVote, PollStatus, Polling, SaturatingCurrencyToVote, U128CurrencyToVote, VoteTally, }; + +#[cfg(feature = "try-runtime")] +mod try_runtime; +#[cfg(feature = "try-runtime")] +pub use try_runtime::{OnRuntimeUpgradeHelpersExt, Select as TryStateSelect, TryState}; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index f8e91886edf0c..e8cd87823f3c3 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -20,6 +20,7 @@ use crate::weights::Weight; use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; +use sp_std::prelude::*; /// The block initialization trait. /// @@ -93,9 +94,9 @@ impl OnIdle for Tuple { let start_index = start_index.try_into().ok().expect( "`start_index % len` always fits into `usize`, because `len` can be in maximum `usize::MAX`; qed" ); - for on_idle in on_idle_functions.iter().cycle().skip(start_index).take(len) { + for on_idle_fn in on_idle_functions.iter().cycle().skip(start_index).take(len) { let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); - weight = weight.saturating_add(on_idle(n, adjusted_remaining_weight)); + weight = weight.saturating_add(on_idle_fn(n, adjusted_remaining_weight)); } weight } @@ -114,47 +115,6 @@ pub trait OnGenesis { fn on_genesis() {} } -/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. -#[cfg(feature = "try-runtime")] -pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; - -/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. -#[cfg(feature = "try-runtime")] -pub trait OnRuntimeUpgradeHelpersExt { - /// Generate a storage key unique to this runtime upgrade. - /// - /// This can be used to communicate data from pre-upgrade to post-upgrade state and check - /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. - #[cfg(feature = "try-runtime")] - fn storage_key(ident: &str) -> [u8; 32] { - crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) - } - - /// Get temporary storage data written by [`Self::set_temp_storage`]. - /// - /// Returns `None` if either the data is unavailable or un-decodable. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being read from. - #[cfg(feature = "try-runtime")] - fn get_temp_storage(at: &str) -> Option { - sp_io::storage::get(&Self::storage_key(at)) - .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) - } - - /// Write some temporary data to a specific storage that can be read (potentially in - /// post-upgrade hook) via [`Self::get_temp_storage`]. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being written - /// to. - #[cfg(feature = "try-runtime")] - fn set_temp_storage(data: T, at: &str) { - sp_io::storage::set(&Self::storage_key(at), &data.encode()); - } -} - -#[cfg(feature = "try-runtime")] -impl OnRuntimeUpgradeHelpersExt for U {} - /// The runtime upgrade trait. /// /// Implementing this lets you express what should happen when the runtime upgrades, @@ -272,6 +232,15 @@ pub trait Hooks { Weight::new() } + /// Execute the sanity checks of this pallet, per block. + /// + /// It should focus on certain checks to ensure that the state is sensible. This is never + /// executed in a consensus code-path, therefore it can consume as much weight as it needs. + #[cfg(feature = "try-runtime")] + fn try_state(_n: BlockNumber) -> Result<(), &'static str> { + Ok(()) + } + /// Execute some pre-checks prior to a runtime upgrade. /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs new file mode 100644 index 0000000000000..c5ddd1cae42be --- /dev/null +++ b/frame/support/src/traits/try_runtime.rs @@ -0,0 +1,174 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Try-runtime specific traits and types. + +use super::*; +use impl_trait_for_tuples::impl_for_tuples; +use sp_arithmetic::traits::AtLeast32BitUnsigned; +use sp_std::prelude::*; + +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. +const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. + fn storage_key(ident: &str) -> [u8; 32] { + crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) + } + + /// Get temporary storage data written by [`Self::set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`Self::get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +impl OnRuntimeUpgradeHelpersExt for U {} + +// Which state tests to execute. +#[derive(codec::Encode, codec::Decode, Clone)] +pub enum Select { + /// None of them. + None, + /// All of them. + All, + /// Run a fixed number of them in a round robin manner. + RoundRobin(u32), + /// Run only pallets who's name matches the given list. + /// + /// Pallet names are obtained from [`PalletInfoAccess`]. + Only(Vec>), +} + +impl Default for Select { + fn default() -> Self { + Select::None + } +} + +impl sp_std::fmt::Debug for Select { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + match self { + Select::RoundRobin(x) => write!(f, "RoundRobin({})", x), + Select::Only(x) => write!( + f, + "Only({:?})", + x.iter() + .map(|x| sp_std::str::from_utf8(x).unwrap_or("")) + .collect::>(), + ), + Select::All => write!(f, "All"), + Select::None => write!(f, "None"), + } + } +} + +#[cfg(feature = "std")] +impl sp_std::str::FromStr for Select { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "all" | "All" => Ok(Select::All), + "none" | "None" => Ok(Select::None), + _ => + if s.starts_with("rr-") { + let count = s + .split_once('-') + .and_then(|(_, count)| count.parse::().ok()) + .ok_or("failed to parse count")?; + Ok(Select::RoundRobin(count)) + } else { + let pallets = s.split(',').map(|x| x.as_bytes().to_vec()).collect::>(); + Ok(Select::Only(pallets)) + }, + } + } +} + +/// Execute some checks to ensure the internal state of a pallet is consistent. +/// +/// Usually, these checks should check all of the invariants that are expected to be held on all of +/// the storage items of your pallet. +pub trait TryState { + /// Execute the state checks. + fn try_state(_: BlockNumber, _: Select) -> Result<(), &'static str>; +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(all(feature = "tuples-128"), impl_for_tuples(128))] +impl TryState + for Tuple +{ + for_tuples!( where #( Tuple: crate::traits::PalletInfoAccess )* ); + fn try_state(n: BlockNumber, targets: Select) -> Result<(), &'static str> { + match targets { + Select::None => Ok(()), + Select::All => { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::try_state(n.clone(), targets.clone())); )* ); + result + }, + Select::RoundRobin(len) => { + let functions: &[fn(BlockNumber, Select) -> Result<(), &'static str>] = + &[for_tuples!(#( Tuple::try_state ),*)]; + let skip = n.clone() % (functions.len() as u32).into(); + let skip: u32 = + skip.try_into().unwrap_or_else(|_| sp_runtime::traits::Bounded::max_value()); + let mut result = Ok(()); + for try_state_fn in functions.iter().cycle().skip(skip as usize).take(len as usize) + { + result = result.and(try_state_fn(n.clone(), targets.clone())); + } + result + }, + Select::Only(ref pallet_names) => { + let try_state_fns: &[( + &'static str, + fn(BlockNumber, Select) -> Result<(), &'static str>, + )] = &[for_tuples!( + #( (::name(), Tuple::try_state) ),* + )]; + let mut result = Ok(()); + for (name, try_state_fn) in try_state_fns { + if pallet_names.iter().any(|n| n == name.as_bytes()) { + result = result.and(try_state_fn(n.clone(), targets.clone())); + } + } + result + }, + } + } +} diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index d9afa17eec959..ba076d963bda7 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1309,9 +1309,10 @@ impl Pallet { pub fn finalize() -> T::Header { log::debug!( target: "runtime::system", - "[{:?}] length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight: {} ({}%) \ - / op weight {} ({}%) / mandatory weight {} ({}%)", + "[{:?}] {} extrinsics, length: {} (normal {}%, op: {}%, mandatory {}%) / normal weight:\ + {} ({}%) op weight {} ({}%) / mandatory weight {} ({}%)", Self::block_number(), + Self::extrinsic_index().unwrap_or_default(), Self::all_extrinsics_len(), sp_runtime::Percent::from_rational( Self::all_extrinsics_len(), diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index f001ef6acd468..8ed34cb50a652 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -48,3 +48,4 @@ std = [ "sp-std/std", "sp-transaction-storage-proof/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index 075de318c2a05..dd77c0438d71f 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -13,7 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"]} +frame-support = { version = "4.0.0-dev", default-features = false, features = [ "try-runtime" ], path = "../support" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -21,6 +22,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [features] default = [ "std" ] std = [ + "codec/std", "frame-support/std", "sp-api/std", "sp-runtime/std", diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs index e4f01d40c9d42..7fec92712cd19 100644 --- a/frame/try-runtime/src/lib.rs +++ b/frame/try-runtime/src/lib.rs @@ -19,6 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +pub use frame_support::traits::TryStateSelect; use frame_support::weights::Weight; sp_api::decl_runtime_apis! { @@ -37,6 +38,6 @@ sp_api::decl_runtime_apis! { /// /// This is only sensible where the incoming block is from a different network, yet it has /// the same block format as the runtime implementing this API. - fn execute_block_no_check(block: Block) -> Weight; + fn execute_block(block: Block, state_root_check: bool, try_state: TryStateSelect) -> Weight; } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 202560f18cf84..83481e745f5ee 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -56,7 +56,7 @@ type ChildKeyValues = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443"; const BATCH_SIZE: usize = 1000; -const PAGE: u32 = 512; +const PAGE: u32 = 1000; #[rpc(client)] pub trait RpcApi { diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 4b4f9bdb2809a..dd98854e84d75 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -31,3 +31,4 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } +frame-try-runtime = { path = "../../../../frame/try-runtime" } diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index aee3c34a1e0e8..6a3ef24ff3771 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -19,6 +19,7 @@ use crate::{ build_executor, ensure_matching_spec, extract_code, full_extensions, hash_of, local_spec, state_machine_call_with_proof, SharedParams, State, LOG_TARGET, }; +use parity_scale_codec::Encode; use remote_externalities::rpc_api; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_core::storage::well_known_keys; @@ -26,17 +27,30 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{fmt::Debug, str::FromStr}; /// Configurations of the [`Command::ExecuteBlock`]. +/// +/// This will always call into `TryRuntime_execute_block`, which can optionally skip the state-root +/// check (useful for trying a unreleased runtime), and can execute runtime sanity checks as well. #[derive(Debug, Clone, clap::Parser)] pub struct ExecuteBlockCmd { /// Overwrite the wasm code in state or not. #[clap(long)] overwrite_wasm_code: bool, - /// If set, then the state root check is disabled by the virtue of calling into - /// `TryRuntime_execute_block_no_check` instead of - /// `Core_execute_block`. + /// If set the state root check is disabled. #[clap(long)] - no_check: bool, + no_state_root_check: bool, + + /// Which try-state targets to execute when running this command. + /// + /// Expected values: + /// - `all` + /// - `none` + /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. + /// `Staking, System`). + /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a + /// round-robin fashion. + #[clap(long, default_value = "none")] + try_state: frame_try_runtime::TryStateSelect, /// The block hash at which to fetch the block. /// @@ -70,7 +84,7 @@ pub struct ExecuteBlockCmd { } impl ExecuteBlockCmd { - fn block_at(&self) -> sc_cli::Result + async fn block_at(&self, ws_uri: String) -> sc_cli::Result where Block::Hash: FromStr, ::Err: Debug, @@ -81,6 +95,15 @@ impl ExecuteBlockCmd { log::warn!(target: LOG_TARGET, "--block-at is provided while state type is live. the `Live::at` will be ignored"); hash_of::(block_at) }, + (None, State::Live { at: None, .. }) => { + log::warn!( + target: LOG_TARGET, + "No --block-at or --at provided, using the latest finalized block instead" + ); + remote_externalities::rpc_api::get_finalized_head::(ws_uri) + .await + .map_err(Into::into) + }, (None, State::Live { at: Some(at), .. }) => hash_of::(at), _ => { panic!("either `--block-at` must be provided, or state must be `live with a proper `--at``"); @@ -123,13 +146,14 @@ where let executor = build_executor::(&shared, &config); let execution = shared.execution; - let block_at = command.block_at::()?; let block_ws_uri = command.block_ws_uri::(); + let block_at = command.block_at::(block_ws_uri.clone()).await?; let block: Block = rpc_api::get_block::(block_ws_uri.clone(), block_at).await?; let parent_hash = block.header().parent_hash(); log::info!( target: LOG_TARGET, - "fetched block from {:?}, parent_hash to fetch the state {:?}", + "fetched block #{:?} from {:?}, parent_hash to fetch the state {:?}", + block.header().number(), block_ws_uri, parent_hash ); @@ -162,6 +186,7 @@ where let (mut header, extrinsics) = block.deconstruct(); header.digest_mut().pop(); let block = Block::new(header, extrinsics); + let payload = (block.clone(), !command.no_state_root_check, command.try_state).encode(); let (expected_spec_name, expected_spec_version, _) = local_spec::(&ext, &executor); @@ -177,8 +202,8 @@ where &ext, &executor, execution, - if command.no_check { "TryRuntime_execute_block_no_check" } else { "Core_execute_block" }, - block.encode().as_ref(), + "TryRuntime_execute_block", + &payload, full_extensions(), )?; diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index b6a11699a3d72..01fc1dae15a05 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -23,7 +23,7 @@ use jsonrpsee::{ core::client::{Subscription, SubscriptionClientT}, ws_client::WsClientBuilder, }; -use parity_scale_codec::Decode; +use parity_scale_codec::{Decode, Encode}; use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig}; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; @@ -40,6 +40,22 @@ pub struct FollowChainCmd { /// The url to connect to. #[clap(short, long, parse(try_from_str = parse::url))] uri: String, + + /// If set, then the state root check is enabled. + #[clap(long)] + state_root_check: bool, + + /// Which try-state targets to execute when running this command. + /// + /// Expected values: + /// - `all` + /// - `none` + /// - A comma separated list of pallets, as per pallet names in `construct_runtime!()` (e.g. + /// `Staking, System`). + /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a + /// round-robin fashion. + #[clap(long, default_value = "none")] + try_state: frame_try_runtime::TryStateSelect, } pub(crate) async fn follow_chain( @@ -141,8 +157,8 @@ where state_ext, &executor, execution, - "TryRuntime_execute_block_no_check", - block.encode().as_ref(), + "TryRuntime_execute_block", + (block, command.state_root_check, command.try_state.clone()).encode().as_ref(), full_extensions(), )?; diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 9013da95f1722..76679c43f7f14 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -335,17 +335,18 @@ pub enum Command { /// different state transition function. /// /// To make testing slightly more dynamic, you can disable the state root check by enabling - /// `ExecuteBlockCmd::no_check`. If you get signature verification errors, you should - /// manually tweak your local runtime's spec version to fix this. + /// `ExecuteBlockCmd::no_check`. If you get signature verification errors, you should manually + /// tweak your local runtime's spec version to fix this. /// /// A subtle detail of execute block is that if you want to execute block 100 of a live chain /// again, you need to scrape the state of block 99. This is already done automatically if you /// use [`State::Live`], and the parent hash of the target block is used to scrape the state. /// If [`State::Snap`] is being used, then this needs to be manually taken into consideration. /// - /// This executes the same runtime api as normal block import, namely `Core_execute_block`. If - /// `ExecuteBlockCmd::no_check` is set, it uses a custom, try-runtime-only runtime - /// api called `TryRuntime_execute_block_no_check`. + /// This does not execute the same runtime api as normal block import do, namely + /// `Core_execute_block`. Instead, it uses `TryRuntime_execute_block`, which can optionally + /// skip state-root check (useful for trying a unreleased runtime), and can execute runtime + /// sanity checks as well. ExecuteBlock(commands::execute_block::ExecuteBlockCmd), /// Executes *the offchain worker hooks* of a given block against some state. @@ -656,21 +657,27 @@ pub(crate) async fn ensure_matching_spec { - log::error!( - target: LOG_TARGET, + let msg = format!( "failed to fetch runtime version from {}: {:?}. Skipping the check", - uri, - why + uri, why ); + if relaxed { + log::error!(target: LOG_TARGET, "{}", msg); + } else { + panic!("{}", msg); + } }, } } @@ -801,15 +808,15 @@ pub(crate) fn state_machine_call_with_proof>()), proof_nodes.len() ); - log::info!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); - log::info!(target: LOG_TARGET, "compact proof size: {}", humanize(compact_proof_size),); - log::info!( + log::debug!(target: LOG_TARGET, "proof size: {}", humanize(proof_size)); + log::debug!(target: LOG_TARGET, "compact proof size: {}", humanize(compact_proof_size),); + log::debug!( target: LOG_TARGET, "zstd-compressed compact proof {}", humanize(compressed_proof.len()), From 5b0f999d6be8a29e4d6adfdfb22c314292f1d318 Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Thu, 1 Sep 2022 18:39:25 +0800 Subject: [PATCH 081/348] Name the score of the election in election finalized event (#12069) * Name the score of the election in ElectionFinalized event * fmt * 1.Adjust the logic of the function 'do_elect()'. 2.Add one test that does ElectionCompute::Signed and one that does ElectionCompute::Unsigned to check 'Event::SolutionStored' and 'Event::ElectionFinalized' * Fix from kian * fmt and update comment. Co-authored-by: kianenigma --- .../election-provider-multi-phase/src/lib.rs | 146 +++++++++++++++--- 1 file changed, 121 insertions(+), 25 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 906de8a6c9d20..84949080aa58c 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -494,6 +494,8 @@ pub enum ElectionError { DataProvider(&'static str), /// An error nested in the fallback. Fallback(FallbackErrorOf), + /// No solution has been queued. + NothingQueued, } // NOTE: we have to do this manually because of the additional where clause needed on @@ -900,7 +902,7 @@ pub mod pallet { let ejected_a_solution = >::exists(); >::put(ready); Self::deposit_event(Event::SolutionStored { - election_compute: ElectionCompute::Unsigned, + compute: ElectionCompute::Unsigned, prev_ejected: ejected_a_solution, }); @@ -948,7 +950,7 @@ pub mod pallet { }; Self::deposit_event(Event::SolutionStored { - election_compute: ElectionCompute::Emergency, + compute: ElectionCompute::Emergency, prev_ejected: QueuedSolution::::exists(), }); @@ -1024,7 +1026,7 @@ pub mod pallet { signed_submissions.put(); Self::deposit_event(Event::SolutionStored { - election_compute: ElectionCompute::Signed, + compute: ElectionCompute::Signed, prev_ejected: ejected_a_solution, }); Ok(()) @@ -1062,7 +1064,7 @@ pub mod pallet { }; Self::deposit_event(Event::SolutionStored { - election_compute: ElectionCompute::Fallback, + compute: ElectionCompute::Fallback, prev_ejected: QueuedSolution::::exists(), }); @@ -1080,10 +1082,13 @@ pub mod pallet { /// solution is unsigned, this means that it has also been processed. /// /// The `bool` is `true` when a previous solution was ejected to make room for this one. - SolutionStored { election_compute: ElectionCompute, prev_ejected: bool }, - /// The election has been finalized, with `Some` of the given computation, or else if the - /// election failed, `None`. - ElectionFinalized { election_compute: Option }, + SolutionStored { compute: ElectionCompute, prev_ejected: bool }, + /// The election has been finalized, with the given computation and score. + ElectionFinalized { compute: ElectionCompute, score: ElectionScore }, + /// An election failed. + /// + /// Not much can be said about which computes failed in the process. + ElectionFailed, /// An account has been rewarded for their signed submission being finalized. Rewarded { account: ::AccountId, value: BalanceOf }, /// An account has been slashed for submitting an invalid signed submission. @@ -1530,23 +1535,25 @@ impl Pallet { // inexpensive (1 read of an empty vector). let _ = Self::finalize_signed_phase(); >::take() - .map_or_else( - || { - T::Fallback::elect() - .map_err(|fe| ElectionError::Fallback(fe)) - .map(|supports| (supports, ElectionCompute::Fallback)) - }, - |ReadySolution { supports, compute, .. }| Ok((supports, compute)), - ) - .map(|(supports, compute)| { - Self::deposit_event(Event::ElectionFinalized { election_compute: Some(compute) }); + .ok_or(ElectionError::::NothingQueued) + .or_else(|_| { + T::Fallback::elect() + .map(|supports| ReadySolution { + supports, + score: Default::default(), + compute: ElectionCompute::Fallback, + }) + .map_err(|fe| ElectionError::Fallback(fe)) + }) + .map(|ReadySolution { compute, score, supports }| { + Self::deposit_event(Event::ElectionFinalized { compute, score }); if Self::round() != 1 { log!(info, "Finalized election round with compute {:?}.", compute); } supports }) .map_err(|err| { - Self::deposit_event(Event::ElectionFinalized { election_compute: None }); + Self::deposit_event(Event::ElectionFailed); if Self::round() != 1 { log!(warn, "Failed to finalize election round. reason {:?}", err); } @@ -1800,8 +1807,9 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, roll_to, AccountId, ExtBuilder, MockWeightInfo, MockedWeightInfo, - MultiPhase, Origin, Runtime, SignedMaxSubmissions, System, TargetIndex, Targets, + multi_phase_events, raw_solution, roll_to, AccountId, ExtBuilder, MockWeightInfo, + MockedWeightInfo, MultiPhase, Origin, Runtime, SignedMaxSubmissions, System, + TargetIndex, Targets, }, Phase, }; @@ -1967,7 +1975,10 @@ mod tests { multi_phase_events(), vec![ Event::SignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { election_compute: Some(ElectionCompute::Fallback) } + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: Default::default() + } ], ); // All storage items must be cleared. @@ -2016,6 +2027,88 @@ mod tests { }) } + #[test] + fn check_events_with_compute_signed() { + ExtBuilder::default().build_and_execute(|| { + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + assert_ok!(MultiPhase::submit(crate::mock::Origin::signed(99), Box::new(solution))); + + roll_to(30); + assert_ok!(MultiPhase::elect()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Signed, + score: ElectionScore { + minimal_stake: 40, + sum_stake: 100, + sum_stake_squared: 5200 + } + } + ], + ); + }) + } + + #[test] + fn check_events_with_compute_unsigned() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // ensure we have snapshots in place. + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::desired_targets().unwrap(), 2); + + // mine seq_phragmen solution with 2 iters. + let (solution, witness) = MultiPhase::mine_solution().unwrap(); + + // ensure this solution is valid. + assert!(MultiPhase::queued_solution().is_none()); + assert_ok!(MultiPhase::submit_unsigned( + crate::mock::Origin::none(), + Box::new(solution), + witness + )); + assert!(MultiPhase::queued_solution().is_some()); + + roll_to(30); + assert_ok!(MultiPhase::elect()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Unsigned, + prev_ejected: false + }, + Event::ElectionFinalized { + compute: ElectionCompute::Unsigned, + score: ElectionScore { + minimal_stake: 40, + sum_stake: 100, + sum_stake_squared: 5200 + } + } + ], + ); + }) + } + #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { @@ -2080,12 +2173,15 @@ mod tests { vec![ Event::SignedPhaseStarted { round: 1 }, Event::UnsignedPhaseStarted { round: 1 }, - Event::ElectionFinalized { election_compute: None }, + Event::ElectionFailed, Event::SolutionStored { - election_compute: ElectionCompute::Fallback, + compute: ElectionCompute::Fallback, prev_ejected: false }, - Event::ElectionFinalized { election_compute: Some(ElectionCompute::Fallback) } + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: Default::default() + } ] ); }) From 6bd6b3109d2fa759e384353791aadd2988454881 Mon Sep 17 00:00:00 2001 From: NingLin-P Date: Thu, 1 Sep 2022 23:45:34 +0800 Subject: [PATCH 082/348] statedb: allow longer state pruning history (#11980) * introduce DbBackedQueue for the state pruning window Signed-off-by: linning * avoid cloning for next_hash Signed-off-by: linning * add tests Signed-off-by: linning * make clippy happy Signed-off-by: linning * impl have_block by checking block number Signed-off-by: linning * refactor Signed-off-by: linning * fix tests & add test for init db-backed queue Signed-off-by: linning * update comment Signed-off-by: linning * add check for have_state_at Signed-off-by: linning * address comment Signed-off-by: linning * renanme unload_blocks to uncached_blocks Signed-off-by: linning * address comment Signed-off-by: linning * fix syncs_state test Signed-off-by: linning * address comment Signed-off-by: linning * revert change to make_test_db to add test cases Signed-off-by: linning * do not prune unavailable block & add tests Signed-off-by: linning * Update client/state-db/src/lib.rs Signed-off-by: linning Co-authored-by: cheme * Update client/state-db/src/pruning.rs Signed-off-by: linning Co-authored-by: cheme * address comment Signed-off-by: linning Signed-off-by: linning Co-authored-by: cheme --- client/db/src/lib.rs | 67 +- client/state-db/src/lib.rs | 281 +++++--- client/state-db/src/noncanonical.rs | 11 +- client/state-db/src/pruning.rs | 976 ++++++++++++++++++++++------ client/state-db/src/test.rs | 34 +- 5 files changed, 1051 insertions(+), 318 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 465db08fe3afc..79ef7e9b6625d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -63,7 +63,7 @@ use sc_client_api::{ utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; -use sc_state_db::StateDb; +use sc_state_db::{IsPruned, StateDb}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{ well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, @@ -442,9 +442,10 @@ struct PendingBlock { } // wrapper that implements trait required for state_db -struct StateMetaDb<'a>(&'a dyn Database); +#[derive(Clone)] +struct StateMetaDb(Arc>); -impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { +impl sc_state_db::MetaDb for StateMetaDb { type Error = sp_database::error::DatabaseError; fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { @@ -915,7 +916,7 @@ impl sc_client_api::backend::BlockImportOperation struct StorageDb { pub db: Arc>, - pub state_db: StateDb>, + pub state_db: StateDb, StateMetaDb>, prefix_keys: bool, } @@ -1104,11 +1105,11 @@ impl Backend { let mut db_init_transaction = Transaction::new(); let requested_state_pruning = config.state_pruning.clone(); - let state_meta_db = StateMetaDb(db.as_ref()); + let state_meta_db = StateMetaDb(db.clone()); let map_e = sp_blockchain::Error::from_state_db; let (state_db_init_commit_set, state_db) = StateDb::open( - &state_meta_db, + state_meta_db, requested_state_pruning, !db.supports_ref_counting(), should_init, @@ -1317,10 +1318,11 @@ impl Backend { } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = - self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::>, - )?; + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; apply_state_commit(transaction, commit); } Ok(()) @@ -1471,14 +1473,16 @@ impl Backend { .storage .state_db .insert_block(&hash, number_u64, pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| { + .map_err(|e: sc_state_db::Error| { sp_blockchain::Error::from_state_db(e) })?; apply_state_commit(&mut transaction, commit); if number <= last_finalized_num { // Canonicalize in the db when re-importing existing blocks with state. let commit = self.storage.state_db.canonicalize_block(&hash).map_err( - sp_blockchain::Error::from_state_db::>, + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, )?; apply_state_commit(&mut transaction, commit); meta_updates.push(MetaUpdate { @@ -1679,10 +1683,11 @@ impl Backend { .map(|c| f_num.saturated_into::() > c) .unwrap_or(true) { - let commit = - self.storage.state_db.canonicalize_block(&f_hash).map_err( - sp_blockchain::Error::from_state_db::>, - )?; + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; apply_state_commit(transaction, commit); } @@ -2294,13 +2299,14 @@ impl sc_client_api::backend::Backend for Backend { match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { - if !self.have_state_at(&hash, hdr.number) { - return Err(sp_blockchain::Error::UnknownBlock(format!( - "State already discarded for {:?}", - block - ))) - } - if let Ok(()) = self.storage.state_db.pin(&hash) { + let hint = || { + sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) + .unwrap_or(None) + .is_some() + }; + if let Ok(()) = + self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) + { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) .with_optional_cache( @@ -2333,7 +2339,20 @@ impl sc_client_api::backend::Backend for Backend { _ => false, } } else { - !self.storage.state_db.is_pruned(hash, number.saturated_into::()) + match self.storage.state_db.is_pruned(hash, number.saturated_into::()) { + IsPruned::Pruned => false, + IsPruned::NotPruned => true, + IsPruned::MaybePruned => match self.blockchain.header_metadata(*hash) { + Ok(header) => sp_state_machine::Storage::get( + self.storage.as_ref(), + &header.state_root, + (&[], None), + ) + .unwrap_or(None) + .is_some(), + _ => false, + }, + } } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1c7140777e16e..f21b707a489f0 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -51,7 +51,7 @@ use log::trace; use noncanonical::NonCanonicalOverlay; use parity_util_mem::{malloc_size, MallocSizeOf}; use parking_lot::RwLock; -use pruning::RefWindow; +use pruning::{HaveBlock, RefWindow}; use sc_client_api::{MemorySize, StateDbMemoryInfo}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -62,6 +62,7 @@ const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; const PRUNING_MODE_ARCHIVE_CANON: &[u8] = b"archive_canonical"; const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; +pub(crate) const DEFAULT_MAX_BLOCK_CONSTRAINT: u32 = 256; /// Database value type. pub type DBValue = Vec; @@ -115,12 +116,14 @@ pub trait NodeDb { } /// Error type. +#[derive(Eq, PartialEq)] pub enum Error { /// Database backend error. Db(E), StateDb(StateDbError), } +#[derive(Eq, PartialEq)] pub enum StateDbError { /// `Codec` decoding error. Decoding(codec::Error), @@ -138,6 +141,10 @@ pub enum StateDbError { BlockAlreadyExists, /// Invalid metadata Metadata(String), + /// Trying to get a block record from db while it is not commit to db yet + BlockUnavailable, + /// Block record is missing from the pruning window + BlockMissing, } impl From for Error { @@ -182,6 +189,9 @@ impl fmt::Debug for StateDbError { Self::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), Self::BlockAlreadyExists => write!(f, "Block already exists"), Self::Metadata(message) => write!(f, "Invalid metadata: {}", message), + Self::BlockUnavailable => + write!(f, "Trying to get a block record from db while it is not commit to db yet"), + Self::BlockMissing => write!(f, "Block record is missing from the pruning window"), } } } @@ -266,7 +276,7 @@ impl Default for PruningMode { impl Default for Constraints { fn default() -> Self { - Self { max_blocks: Some(256), max_mem: None } + Self { max_blocks: Some(DEFAULT_MAX_BLOCK_CONSTRAINT), max_mem: None } } } @@ -276,38 +286,41 @@ fn to_meta_key(suffix: &[u8], data: &S) -> Vec { buffer } -struct StateDbSync { +pub struct StateDbSync { mode: PruningMode, non_canonical: NonCanonicalOverlay, - pruning: Option>, + pruning: Option>, pinned: HashMap, } -impl StateDbSync { - fn new( +impl + StateDbSync +{ + fn new( mode: PruningMode, ref_counting: bool, - db: &D, - ) -> Result, Error> { + db: D, + ) -> Result, Error> { trace!(target: "state-db", "StateDb settings: {:?}. Ref-counting: {}", mode, ref_counting); - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; - let pruning: Option> = match mode { + let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(&db)?; + let pruning: Option> = match mode { PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), - PruningMode::Constrained(_) => Some(RefWindow::new(db, ref_counting)?), + PruningMode::Constrained(Constraints { max_blocks, .. }) => + Some(RefWindow::new(db, max_blocks.unwrap_or(0), ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) } - fn insert_block( + fn insert_block( &mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { match self.mode { PruningMode::ArchiveAll => { changeset.deleted.clear(); @@ -321,25 +334,23 @@ impl StateDbSync( - &mut self, - hash: &BlockHash, - ) -> Result, Error> { + fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { + // NOTE: it is important that the change to `LAST_CANONICAL` (emit from + // `non_canonical.canonicalize`) and the insert of the new pruning journal (emit from + // `pruning.note_canonical`) are collected into the same `CommitSet` and are committed to + // the database atomically to keep their consistency when restarting the node let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { return Ok(commit) } - match self.non_canonical.canonicalize(hash, &mut commit) { - Ok(()) => - if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); - }, - Err(e) => return Err(e.into()), - }; + let number = self.non_canonical.canonicalize(hash, &mut commit)?; + if self.mode == PruningMode::ArchiveCanonical { + commit.data.deleted.clear(); + } if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(hash, &mut commit); + pruning.note_canonical(hash, number, &mut commit)?; } - self.prune(&mut commit); + self.prune(&mut commit)?; Ok(commit) } @@ -347,22 +358,31 @@ impl StateDbSync bool { + fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { match self.mode { - PruningMode::ArchiveAll => false, + PruningMode::ArchiveAll => IsPruned::NotPruned, PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { if self.best_canonical().map(|c| number > c).unwrap_or(true) { - !self.non_canonical.have_block(hash) + if self.non_canonical.have_block(hash) { + IsPruned::NotPruned + } else { + IsPruned::Pruned + } } else { - self.pruning.as_ref().map_or(false, |pruning| { - number < pruning.pending() || !pruning.have_block(hash) - }) + match self.pruning.as_ref() { + None => IsPruned::NotPruned, + Some(pruning) => match pruning.have_block(hash, number) { + HaveBlock::NotHave => IsPruned::Pruned, + HaveBlock::Have => IsPruned::NotPruned, + HaveBlock::MayHave => IsPruned::MaybePruned, + }, + } } }, } } - fn prune(&mut self, commit: &mut CommitSet) { + fn prune(&mut self, commit: &mut CommitSet) -> Result<(), Error> { if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { @@ -376,12 +396,23 @@ impl StateDbSync break, + res => + if res?.map_or(false, |h| pinned.contains_key(&h)) { + break + }, + } + match pruning.prune_one(commit) { + // this branch should not reach as previous `next_hash` don't return error + // keeping it for robustness + Err(Error::StateDb(StateDbError::BlockUnavailable)) => break, + res => res?, } - pruning.prune_one(commit); } } + Ok(()) } /// Revert all non-canonical blocks with the best block number. @@ -403,13 +434,22 @@ impl StateDbSync Result<(), PinError> { + fn pin(&mut self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> + where + F: Fn() -> bool, + { match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| pruning.have_block(hash)) - { + let have_block = self.non_canonical.have_block(hash) || + self.pruning.as_ref().map_or(false, |pruning| { + match pruning.have_block(hash, number) { + HaveBlock::NotHave => false, + HaveBlock::Have => true, + HaveBlock::MayHave => hint(), + } + }); + if have_block { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { trace!(target: "state-db-pin", "Pinned block: {:?}", hash); @@ -440,13 +480,13 @@ impl StateDbSync( + pub fn get( &self, key: &Q, - db: &D, - ) -> Result, Error> + db: &DB, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -461,10 +501,11 @@ impl StateDbSync StateDbSync StateDbMemoryInfo { StateDbMemoryInfo { non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), - pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))), + pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(&p))), pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), } } @@ -490,22 +531,21 @@ impl StateDbSync { - db: RwLock>, +pub struct StateDb { + db: RwLock>, } -impl StateDb { +impl + StateDb +{ /// Create an instance of [`StateDb`]. - pub fn open( - db: &D, + pub fn open( + db: D, requested_mode: Option, ref_counting: bool, should_init: bool, - ) -> Result<(CommitSet, StateDb), Error> - where - D: MetaDb, - { - let stored_mode = fetch_stored_pruning_mode(db)?; + ) -> Result<(CommitSet, StateDb), Error> { + let stored_mode = fetch_stored_pruning_mode(&db)?; let selected_mode = match (should_init, stored_mode, requested_mode) { (true, stored_mode, requested_mode) => { @@ -548,27 +588,28 @@ impl StateDb( + pub fn insert_block( &self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet, - ) -> Result, Error> { + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. - pub fn canonicalize_block( - &self, - hash: &BlockHash, - ) -> Result, Error> { + pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { self.db.write().canonicalize_block(hash) } /// Prevents pruning of specified block and its descendants. - pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { - self.db.write().pin(hash) + /// `hint` used for futher checking if the given block exists + pub fn pin(&self, hash: &BlockHash, number: u64, hint: F) -> Result<(), PinError> + where + F: Fn() -> bool, + { + self.db.write().pin(hash, number, hint) } /// Allows pruning of specified block. @@ -577,13 +618,13 @@ impl StateDb( + pub fn get( &self, key: &Q, - db: &D, - ) -> Result, Error> + db: &DB, + ) -> Result, Error> where - Q: AsRef, + Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { @@ -609,7 +650,7 @@ impl StateDb bool { + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> IsPruned { return self.db.read().is_pruned(hash, number) } @@ -629,6 +670,17 @@ impl StateDb(db: &D) -> Result, Error> { let meta_key_mode = to_meta_key(PRUNING_MODE, &()); if let Some(stored_mode) = db.get_meta(&meta_key_mode).map_err(Error::Db)? { @@ -664,20 +716,19 @@ fn choose_pruning_mode( mod tests { use crate::{ test::{make_changeset, make_db, TestDb}, - Constraints, Error, PruningMode, StateDb, StateDbError, + Constraints, Error, IsPruned, PruningMode, StateDb, StateDbError, }; use sp_core::H256; - use std::io; - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { + fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); let (state_db_init, state_db) = - StateDb::open(&mut db, Some(settings), false, true).unwrap(); + StateDb::open(db.clone(), Some(settings), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), @@ -687,7 +738,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), @@ -697,7 +748,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), @@ -707,7 +758,7 @@ mod tests { ); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), @@ -716,11 +767,11 @@ mod tests { .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(1)).unwrap()); state_db.apply_pending(); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), @@ -729,9 +780,9 @@ mod tests { .unwrap(), ); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(21)).unwrap()); state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(3)).unwrap()); state_db.apply_pending(); (db, state_db) @@ -741,7 +792,7 @@ mod tests { fn full_archive_keeps_everything() { let (db, sdb) = make_test_db(PruningMode::ArchiveAll); assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::NotPruned); } #[test] @@ -750,6 +801,43 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); } + #[test] + fn block_record_unavailable() { + let (mut db, state_db) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(1), + max_mem: None, + })); + // import 2 blocks + for i in &[5, 6] { + db.commit( + &state_db + .insert_block( + &H256::from_low_u64_be(*i), + *i, + &H256::from_low_u64_be(*i - 1), + make_changeset(&[], &[]), + ) + .unwrap(), + ); + } + // canonicalize block 4 but not commit it to db + let c1 = state_db.canonicalize_block(&H256::from_low_u64_be(4)).unwrap(); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(3), 3), IsPruned::Pruned); + + // canonicalize block 5 but not commit it to db, block 4 is not pruned due to it is not + // commit to db yet (unavailable), return `MaybePruned` here because `apply_pending` is not + // called and block 3 is still in cache + let c2 = state_db.canonicalize_block(&H256::from_low_u64_be(5)).unwrap(); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::MaybePruned); + + // commit block 4 and 5 to db, and import a new block will prune both block 4 and 5 + db.commit(&c1); + db.commit(&c2); + db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(6)).unwrap()); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(4), 4), IsPruned::Pruned); + assert_eq!(state_db.is_pruned(&H256::from_low_u64_be(5), 5), IsPruned::Pruned); + } + #[test] fn prune_window_0() { let (db, _) = make_test_db(PruningMode::Constrained(Constraints { @@ -765,10 +853,10 @@ mod tests { max_blocks: Some(1), max_mem: None, })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); } @@ -778,10 +866,10 @@ mod tests { max_blocks: Some(2), max_mem: None, })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(0), 0), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(1), 1), IsPruned::Pruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(21), 2), IsPruned::NotPruned); + assert_eq!(sdb.is_pruned(&H256::from_low_u64_be(22), 2), IsPruned::Pruned); assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); } @@ -789,11 +877,11 @@ mod tests { fn detects_incompatible_mode() { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::open(&mut db, Some(PruningMode::ArchiveAll), false, true).unwrap(); + StateDb::open(db.clone(), Some(PruningMode::ArchiveAll), false, true).unwrap(); db.commit(&state_db_init); db.commit( &state_db - .insert_block::( + .insert_block( &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), @@ -802,8 +890,8 @@ mod tests { .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db_open_result: Result<(_, StateDb), _> = - StateDb::open(&mut db, Some(new_mode), false, false); + let state_db_open_result: Result<(_, StateDb), _> = + StateDb::open(db.clone(), Some(new_mode), false, false); assert!(state_db_open_result.is_err()); } @@ -814,12 +902,13 @@ mod tests { ) { let mut db = make_db(&[]); let (state_db_init, state_db) = - StateDb::::open(&mut db, mode_when_created, false, true).unwrap(); + StateDb::::open(db.clone(), mode_when_created, false, true) + .unwrap(); db.commit(&state_db_init); std::mem::drop(state_db); let state_db_reopen_result = - StateDb::::open(&mut db, mode_when_reopened, false, false); + StateDb::::open(db.clone(), mode_when_reopened, false, false); if let Ok(expected_mode) = expected_effective_mode_when_reopenned { let (state_db_init, state_db_reopened) = state_db_reopen_result.unwrap(); db.commit(&state_db_init); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 13cf5825b1b24..559fc7ca023fe 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -28,7 +28,7 @@ use log::trace; use std::collections::{hash_map::Entry, HashMap, VecDeque}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -const LAST_CANONICAL: &[u8] = b"last_canonical"; +pub(crate) const LAST_CANONICAL: &[u8] = b"last_canonical"; const MAX_BLOCKS_PER_LEVEL: u64 = 32; /// See module documentation. @@ -376,12 +376,13 @@ impl NonCanonicalOverlay { } /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Returns a set of changes that need to be added to the DB. + /// Add a set of changes of the canonicalized block to `CommitSet` + /// Return the block number of the canonicalized block pub fn canonicalize( &mut self, hash: &BlockHash, commit: &mut CommitSet, - ) -> Result<(), StateDbError> { + ) -> Result { trace!(target: "state-db", "Canonicalizing {:?}", hash); let level = self .levels @@ -431,7 +432,7 @@ impl NonCanonicalOverlay { .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); self.pending_canonicalizations.push(hash.clone()); - Ok(()) + Ok(canonicalized.1) } fn apply_canonicalizations(&mut self) { @@ -755,7 +756,7 @@ mod tests { .unwrap(), ); db.commit(&overlay.insert(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta.len(), 3); + assert_eq!(db.meta_len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); assert_eq!(overlay.levels, overlay2.levels); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 0fdcb8e822b6f..2c23110910495 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -24,74 +24,74 @@ //! the death list. //! The changes are journaled in the DB. -use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; +use crate::{ + noncanonical::LAST_CANONICAL, to_meta_key, CommitSet, Error, Hash, MetaDb, StateDbError, + DEFAULT_MAX_BLOCK_CONSTRAINT, +}; use codec::{Decode, Encode}; -use log::{trace, warn}; -use std::collections::{HashMap, HashSet, VecDeque}; +use log::{error, trace, warn}; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, +}; -const LAST_PRUNED: &[u8] = b"last_pruned"; +pub(crate) const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] -pub struct RefWindow { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, +pub struct RefWindow { + /// A queue of blocks keep tracking keys that should be deleted for each block in the + /// pruning window. + queue: DeathRowQueue, /// Block number that corresponds to the front of `death_rows`. - pending_number: u64, + base: u64, /// Number of call of `note_canonical` after /// last call `apply_pending` or `revert_pending` pending_canonicalizations: usize, /// Number of calls of `prune_one` after /// last call `apply_pending` or `revert_pending` pending_prunings: usize, - /// Keep track of re-inserted keys and do not delete them when pruning. - /// Setting this to false requires backend that supports reference - /// counting. - count_insertions: bool, } -#[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] -struct DeathRow { - hash: BlockHash, - journal_key: Vec, - deleted: HashSet, -} - -#[derive(Encode, Decode)] -struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, -} - -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) +/// `DeathRowQueue` used to keep track of blocks in the pruning window, there are two flavors: +/// - `Mem`, used when the backend database do not supports reference counting, keep all +/// blocks in memory, and keep track of re-inserted keys to not delete them when pruning +/// - `DbBacked`, used when the backend database supports reference counting, only keep +/// a few number of blocks in memory and load more blocks on demand +#[derive(parity_util_mem_derive::MallocSizeOf)] +enum DeathRowQueue { + Mem { + /// A queue of keys that should be deleted for each block in the pruning window. + death_rows: VecDeque>, + /// An index that maps each key from `death_rows` to block number. + death_index: HashMap, + }, + DbBacked { + // The backend database + #[ignore_malloc_size_of = "Shared data"] + db: D, + /// A queue of keys that should be deleted for each block in the pruning window. + /// Only caching the first fews blocks of the pruning window, blocks inside are + /// successive and ordered by block number + cache: VecDeque>, + /// A soft limit of the cache's size + cache_capacity: usize, + /// The number of blocks in queue that are not loaded into `cache`. + uncached_blocks: usize, + }, } -impl RefWindow { - pub fn new( - db: &D, - count_insertions: bool, - ) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)?; - let pending_number: u64 = match last_pruned { - Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, - None => 0, - }; - let mut block = pending_number; - let mut pruning = RefWindow { - death_rows: Default::default(), - death_index: Default::default(), - pending_number, - pending_canonicalizations: 0, - pending_prunings: 0, - count_insertions, +impl DeathRowQueue { + /// Return a `DeathRowQueue` that all blocks are keep in memory + fn new_mem(db: &D, base: u64) -> Result, Error> { + let mut block = base; + let mut queue = DeathRowQueue::::Mem { + death_rows: VecDeque::new(), + death_index: HashMap::new(), }; // read the journal - trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); + trace!(target: "state-db", "Reading pruning journal for the memory queue. Pending #{}", base); loop { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(Error::Db)? { @@ -99,102 +99,444 @@ impl RefWindow { let record: JournalRecord = Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import( - &record.hash, - journal_key, - record.inserted.into_iter(), - record.deleted, - ); + queue.import(base, record); }, None => break, } block += 1; } - Ok(pruning) + Ok(queue) + } + + /// Return a `DeathRowQueue` that backed by an database, and only keep a few number + /// of blocks in memory + fn new_db_backed( + db: D, + base: u64, + mut uncached_blocks: usize, + window_size: u32, + ) -> Result, Error> { + // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` + let cache_capacity = window_size.max(1).min(DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; + let mut cache = VecDeque::with_capacity(cache_capacity); + trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); + // Load block from db + DeathRowQueue::load_batch_from_db( + &db, + &mut uncached_blocks, + &mut cache, + base, + cache_capacity, + )?; + Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, uncached_blocks }) + } + + /// import a new block to the back of the queue + fn import(&mut self, base: u64, journal_record: JournalRecord) { + let JournalRecord { hash, inserted, deleted } = journal_record; + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, cache_capacity, .. } => { + // `uncached_blocks` is zero means currently all block are loaded into `cache` + // thus if `cache` is not full, load the next block into `cache` too + if *uncached_blocks == 0 && cache.len() < *cache_capacity { + cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); + } else { + *uncached_blocks += 1; + } + }, + DeathRowQueue::Mem { death_rows, death_index } => { + // remove all re-inserted keys from death rows + for k in inserted { + if let Some(block) = death_index.remove(&k) { + death_rows[(block - base) as usize].deleted.remove(&k); + } + } + // add new keys + let imported_block = base + death_rows.len() as u64; + for k in deleted.iter() { + death_index.insert(k.clone(), imported_block); + } + death_rows.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); + }, + } } - fn import>( + /// Pop out one block from the front of the queue, `base` is the block number + /// of the first block of the queue + fn pop_front( &mut self, - hash: &BlockHash, - journal_key: Vec, - inserted: I, - deleted: Vec, - ) { - if self.count_insertions { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); + base: u64, + ) -> Result>, Error> { + match self { + DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { + if cache.is_empty() && *uncached_blocks != 0 { + // load more blocks from db since there are still blocks in it + DeathRowQueue::load_batch_from_db( + db, + uncached_blocks, + cache, + base, + *cache_capacity, + )?; } - } + Ok(cache.pop_front()) + }, + DeathRowQueue::Mem { death_rows, death_index } => match death_rows.pop_front() { + Some(row) => { + for k in row.deleted.iter() { + death_index.remove(k); + } + Ok(Some(row)) + }, + None => Ok(None), + }, + } + } - // add new keys - let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); + /// Revert recent additions to the queue, namely remove `amount` number of blocks from the back + /// of the queue, `base` is the block number of the first block of the queue + fn revert_recent_add(&mut self, base: u64, amout: usize) { + debug_assert!(amout <= self.len()); + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => { + // remove from `uncached_blocks` if it can cover + if *uncached_blocks >= amout { + *uncached_blocks -= amout; + return + } + // reset `uncached_blocks` and remove remain blocks from `cache` + let remain = amout - *uncached_blocks; + *uncached_blocks = 0; + cache.truncate(cache.len() - remain); + }, + DeathRowQueue::Mem { death_rows, death_index } => { + // Revert recent addition to the queue + // Note that pending insertions might cause some existing deletions to be removed + // from `death_index` We don't bother to track and revert that for now. This means + // that a few nodes might end up no being deleted in case transaction fails and + // `revert_pending` is called. + death_rows.truncate(death_rows.len() - amout); + let new_max_block = death_rows.len() as u64 + base; + death_index.retain(|_, block| *block < new_max_block); + }, + } + } + + /// Load a batch of blocks from the backend database into `cache`, start from (and include) the + /// next block followe the last block of `cache`, `base` is the block number of the first block + /// of the queue + fn load_batch_from_db( + db: &D, + uncached_blocks: &mut usize, + cache: &mut VecDeque>, + base: u64, + cache_capacity: usize, + ) -> Result<(), Error> { + // return if all blocks already loaded into `cache` and there are no other + // blocks in the backend database + if *uncached_blocks == 0 { + return Ok(()) + } + let start = base + cache.len() as u64; + let batch_size = cmp::min(*uncached_blocks, cache_capacity); + let mut loaded = 0; + for i in 0..batch_size as u64 { + match load_death_row_from_db::(db, start + i)? { + Some(row) => { + cache.push_back(row); + loaded += 1; + }, + // block may added to the queue but not commit into the db yet, if there are + // data missing in the db `load_death_row_from_db` should return a db error + None => break, } } - self.death_rows.push_back(DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key, - }); + *uncached_blocks -= loaded; + Ok(()) + } + + /// Get the block in the given index of the queue, `base` is the block number of the + /// first block of the queue + fn get( + &mut self, + base: u64, + index: usize, + ) -> Result>, Error> { + match self { + DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { + // check if `index` target a block reside on disk + if index >= cache.len() && index < cache.len() + *uncached_blocks { + // if `index` target the next batch of `DeathRow`, load a batch from db + if index - cache.len() < cmp::min(*uncached_blocks, *cache_capacity) { + DeathRowQueue::load_batch_from_db( + db, + uncached_blocks, + cache, + base, + *cache_capacity, + )?; + } else { + // load a single `DeathRow` from db, but do not insert it to `cache` + // because `cache` is a queue of successive `DeathRow` + // NOTE: this branch should not be entered because blocks are visited + // in successive increasing order, just keeping it for robustness + return load_death_row_from_db(db, base + index as u64) + } + } + Ok(cache.get(index).cloned()) + }, + DeathRowQueue::Mem { death_rows, .. } => Ok(death_rows.get(index).cloned()), + } + } + + /// Check if the block at the given `index` of the queue exist + /// it is the caller's responsibility to ensure `index` won't be out of bound + fn have_block(&self, hash: &BlockHash, index: usize) -> HaveBlock { + match self { + DeathRowQueue::DbBacked { cache, .. } => { + if cache.len() > index { + (cache[index].hash == *hash).into() + } else { + // the block not exist in `cache`, but it may exist in the unload + // blocks + HaveBlock::MayHave + } + }, + DeathRowQueue::Mem { death_rows, .. } => (death_rows[index].hash == *hash).into(), + } + } + + /// Return the number of block in the pruning window + fn len(&self) -> usize { + match self { + DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => + cache.len() + *uncached_blocks, + DeathRowQueue::Mem { death_rows, .. } => death_rows.len(), + } + } + + #[cfg(test)] + fn get_mem_queue_state( + &self, + ) -> Option<(&VecDeque>, &HashMap)> { + match self { + DeathRowQueue::DbBacked { .. } => None, + DeathRowQueue::Mem { death_rows, death_index } => Some((death_rows, death_index)), + } + } + + #[cfg(test)] + fn get_db_backed_queue_state(&self) -> Option<(&VecDeque>, usize)> { + match self { + DeathRowQueue::DbBacked { cache, uncached_blocks, .. } => + Some((cache, *uncached_blocks)), + DeathRowQueue::Mem { .. } => None, + } + } +} + +fn load_death_row_from_db( + db: &D, + block: u64, +) -> Result>, Error> { + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(Error::Db)? { + Some(record) => { + let JournalRecord { hash, deleted, .. } = Decode::decode(&mut record.as_slice())?; + Ok(Some(DeathRow { hash, deleted: deleted.into_iter().collect() })) + }, + None => Ok(None), + } +} + +#[derive(Clone, Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] +struct DeathRow { + hash: BlockHash, + deleted: HashSet, +} + +#[derive(Encode, Decode, Default)] +struct JournalRecord { + hash: BlockHash, + inserted: Vec, + deleted: Vec, +} + +fn to_journal_key(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL, &block) +} + +/// The result return by `RefWindow::have_block` +#[derive(Debug, PartialEq, Eq)] +pub enum HaveBlock { + /// Definitely not having this block + NotHave, + /// May or may not have this block, need futher checking + MayHave, + /// Definitely having this block + Have, +} + +impl From for HaveBlock { + fn from(have: bool) -> Self { + if have { + HaveBlock::Have + } else { + HaveBlock::NotHave + } + } +} + +impl RefWindow { + pub fn new( + db: D, + window_size: u32, + count_insertions: bool, + ) -> Result, Error> { + // the block number of the first block in the queue or the next block number if the queue is + // empty + let base = match db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(Error::Db)? { + Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, + None => 0, + }; + // the block number of the last block in the queue + let last_canonicalized_number = + match db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(Error::Db)? { + Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?.1), + None => None, + }; + + let queue = if count_insertions { + DeathRowQueue::new_mem(&db, base)? + } else { + let unload = match last_canonicalized_number { + Some(last_canonicalized_number) => { + debug_assert!(last_canonicalized_number + 1 >= base); + last_canonicalized_number + 1 - base + }, + // None means `LAST_CANONICAL` is never been wrote, since the pruning journals are + // in the same `CommitSet` as `LAST_CANONICAL`, it means no pruning journal have + // ever been committed to the db, thus set `unload` to zero + None => 0, + }; + DeathRowQueue::new_db_backed(db, base, unload as usize, window_size)? + }; + + Ok(RefWindow { queue, base, pending_canonicalizations: 0, pending_prunings: 0 }) } pub fn window_size(&self) -> u64 { - (self.death_rows.len() - self.pending_prunings) as u64 + (self.queue.len() - self.pending_prunings) as u64 } - pub fn next_hash(&self) -> Option { - self.death_rows.get(self.pending_prunings).map(|r| r.hash.clone()) + /// Get the hash of the next pruning block + pub fn next_hash(&mut self) -> Result, Error> { + let res = match &self.queue { + DeathRowQueue::DbBacked { cache, .. } => + if self.pending_prunings < cache.len() { + cache.get(self.pending_prunings).map(|r| r.hash.clone()) + } else { + self.get(self.pending_prunings)?.map(|r| r.hash) + }, + DeathRowQueue::Mem { death_rows, .. } => + death_rows.get(self.pending_prunings).map(|r| r.hash.clone()), + }; + Ok(res) } pub fn mem_used(&self) -> usize { 0 } + // Return the block number of the first block that not been pending pruned pub fn pending(&self) -> u64 { - self.pending_number + self.pending_prunings as u64 + self.base + self.pending_prunings as u64 } - pub fn have_block(&self, hash: &BlockHash) -> bool { - self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) + fn is_empty(&self) -> bool { + self.queue.len() <= self.pending_prunings + } + + // Check if a block is in the pruning window and not be pruned yet + pub fn have_block(&self, hash: &BlockHash, number: u64) -> HaveBlock { + // if the queue is empty or the block number exceed the pruning window, we definitely + // do not have this block + if self.is_empty() || + number < self.pending() || + number >= self.base + self.queue.len() as u64 + { + return HaveBlock::NotHave + } + self.queue.have_block(hash, (number - self.base) as usize) + } + + fn get(&mut self, index: usize) -> Result>, Error> { + if index >= self.queue.len() { + return Ok(None) + } + match self.queue.get(self.base, index)? { + None => { + if matches!(self.queue, DeathRowQueue::DbBacked { .. }) && + // whether trying to get a pending canonicalize block which may not commit to the db yet + index >= self.queue.len() - self.pending_canonicalizations + { + trace!(target: "state-db", "Trying to get a pending canonicalize block that not commit to the db yet"); + Err(Error::StateDb(StateDbError::BlockUnavailable)) + } else { + // A block of the queue is missing, this may happen if `CommitSet` are commit to + // db concurrently and calling `apply_pending/revert_pending` out of order, this + // should not happen under current implementation but keeping it as a defensive + error!(target: "state-db", "Block record is missing from the pruning window, block number {}", self.base + index as u64); + Err(Error::StateDb(StateDbError::BlockMissing)) + } + }, + s => Ok(s), + } } /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) { - if let Some(pruned) = self.death_rows.get(self.pending_prunings) { + pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { + if let Some(pruned) = self.get(self.pending_prunings)? { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); + let index = self.base + self.pending_prunings as u64; + commit.data.deleted.extend(pruned.deleted.into_iter()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(pruned.journal_key.clone()); + commit + .meta + .deleted + .push(to_journal_key(self.base + self.pending_prunings as u64)); self.pending_prunings += 1; } else { warn!(target: "state-db", "Trying to prune when there's nothing to prune"); } + Ok(()) } /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { + pub fn note_canonical( + &mut self, + hash: &BlockHash, + number: u64, + commit: &mut CommitSet, + ) -> Result<(), Error> { + if self.base == 0 && self.queue.len() == 0 && number > 0 { + // assume that parent was canonicalized + self.base = number; + } else if (self.base + self.queue.len() as u64) != number { + return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) + } trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = if self.count_insertions { + let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() } else { Default::default() }; let deleted = ::std::mem::take(&mut commit.data.deleted); let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; - let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); - commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import( - &journal_record.hash, - journal_key, - journal_record.inserted.into_iter(), - journal_record.deleted, - ); + commit.meta.inserted.push((to_journal_key(number), journal_record.encode())); + self.queue.import(self.base, journal_record); self.pending_canonicalizations += 1; + Ok(()) } /// Apply all pending changes @@ -202,32 +544,22 @@ impl RefWindow { self.pending_canonicalizations = 0; for _ in 0..self.pending_prunings { let pruned = self - .death_rows - .pop_front() - .expect("pending_prunings is always < death_rows.len()"); + .queue + .pop_front(self.base) + // NOTE: `pop_front` should not return `MetaDb::Error` because blocks are visited + // by `RefWindow::prune_one` first then `RefWindow::apply_pending` and + // `DeathRowQueue::get` should load the blocks into cache already + .expect("block must loaded in cache thus no MetaDb::Error") + .expect("pending_prunings is always < queue.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - if self.count_insertions { - for k in pruned.deleted.iter() { - self.death_index.remove(k); - } - } - self.pending_number += 1; + self.base += 1; } self.pending_prunings = 0; } /// Revert all pending changes pub fn revert_pending(&mut self) { - // Revert pending deletions. - // Note that pending insertions might cause some existing deletions to be removed from - // `death_index` We don't bother to track and revert that for now. This means that a few - // nodes might end up no being deleted in case transaction fails and `revert_pending` is - // called. - self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); - if self.count_insertions { - let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); - } + self.queue.revert_recent_add(self.base, self.pending_canonicalizations); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -235,38 +567,45 @@ impl RefWindow { #[cfg(test)] mod tests { - use super::RefWindow; + use super::{to_journal_key, DeathRowQueue, HaveBlock, JournalRecord, RefWindow, LAST_PRUNED}; use crate::{ + noncanonical::LAST_CANONICAL, test::{make_commit, make_db, TestDb}, - CommitSet, + to_meta_key, CommitSet, Error, Hash, StateDbError, DEFAULT_MAX_BLOCK_CONSTRAINT, }; + use codec::Encode; use sp_core::H256; - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let restored: RefWindow = RefWindow::new(db, pruning.count_insertions).unwrap(); - assert_eq!(pruning.pending_number, restored.pending_number); - assert_eq!(pruning.death_rows, restored.death_rows); - assert_eq!(pruning.death_index, restored.death_index); + fn check_journal(pruning: &RefWindow, db: &TestDb) { + let count_insertions = matches!(pruning.queue, DeathRowQueue::Mem { .. }); + let restored: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + assert_eq!(pruning.base, restored.base); + assert_eq!(pruning.queue.get_mem_queue_state(), restored.queue.get_mem_queue_state()); } #[test] fn created_from_empty_db() { let db = make_db(&[]); - let pruning: RefWindow = RefWindow::new(&db, true).unwrap(); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); + assert_eq!(pruning.base, 0); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); } #[test] fn prune_empty() { let db = make_db(&[]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + pruning.prune_one(&mut commit).unwrap(); + assert_eq!(pruning.base, 0); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); assert!(pruning.pending_prunings == 0); assert!(pruning.pending_canonicalizations == 0); } @@ -274,41 +613,45 @@ mod tests { #[test] fn prune_one() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4, 5], &[1, 3]); - let h = H256::random(); - pruning.note_canonical(&h, &mut commit); + let hash = H256::random(); + pruning.note_canonical(&hash, 0, &mut commit).unwrap(); db.commit(&commit); - assert!(pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); pruning.apply_pending(); - assert!(pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); assert!(commit.data.deleted.is_empty()); - assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert_eq!(death_rows.len(), 1); + assert_eq!(death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert!(!pruning.have_block(&h)); + pruning.prune_one(&mut commit).unwrap(); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); db.commit(&commit); pruning.apply_pending(); - assert!(!pruning.have_block(&h)); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); assert!(db.data_eq(&make_db(&[2, 4, 5]))); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert_eq!(pruning.pending_number, 1); + let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); + assert!(death_rows.is_empty()); + assert!(death_index.is_empty()); + assert_eq!(pruning.base, 1); } #[test] fn prune_two() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); @@ -316,53 +659,55 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); + assert_eq!(pruning.base, 2); } #[test] fn prune_two_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); + assert_eq!(pruning.base, 2); } #[test] fn reinserted_survives() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.apply_pending(); @@ -370,62 +715,64 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); + assert_eq!(pruning.base, 3); } #[test] fn reinserted_survive_pending() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, true).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); + assert_eq!(pruning.base, 3); } #[test] fn reinserted_ignores() { let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db, false).unwrap(); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 0, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); + pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); pruning.apply_pending(); @@ -433,9 +780,276 @@ mod tests { check_journal(&pruning, &db); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); + pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - assert!(pruning.death_index.is_empty()); + } + + fn push_last_canonicalized(block: u64, commit: &mut CommitSet) { + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), (block, block).encode())); + } + + fn push_last_pruned(block: u64, commit: &mut CommitSet) { + commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), block.encode())); + } + + #[test] + fn init_db_backed_queue() { + let mut db = make_db(&[]); + let mut commit = CommitSet::default(); + + fn load_pruning_from_db(db: TestDb) -> (usize, u64) { + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); + (cache.len(), pruning.base) + } + + fn push_record(block: u64, commit: &mut CommitSet) { + commit + .meta + .inserted + .push((to_journal_key(block), JournalRecord::::default().encode())); + } + + // empty database + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 0); + + // canonicalized the genesis block but no pruning + push_last_canonicalized(0, &mut commit); + push_record(0, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 1); + assert_eq!(base, 0); + + // pruned the genesis block + push_last_pruned(0, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 1); + + // canonicalize more blocks + push_last_canonicalized(10, &mut commit); + for i in 1..=10 { + push_record(i, &mut commit); + } + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 10); + assert_eq!(base, 1); + + // pruned all blocks + push_last_pruned(10, &mut commit); + db.commit(&commit); + let (loaded_blocks, base) = load_pruning_from_db(db.clone()); + assert_eq!(loaded_blocks, 0); + assert_eq!(base, 11); + } + + #[test] + fn db_backed_queue() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; + + // start as an empty queue + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 0); + assert_eq!(uncached_blocks, 0); + + // import blocks + // queue size and content should match + for i in 0..(cache_capacity + 10) { + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&(i as u64), i as u64, &mut commit).unwrap(); + push_last_canonicalized(i as u64, &mut commit); + db.commit(&commit); + // block will fill in cache first + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + if i < cache_capacity { + assert_eq!(cache.len(), i + 1); + assert_eq!(uncached_blocks, 0); + } else { + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, i - cache_capacity + 1); + } + } + pruning.apply_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 10); + for i in 0..cache_capacity { + assert_eq!(cache[i].hash, i as u64); + } + + // import a new block to the end of the queue + // won't keep the new block in memory + let mut commit = CommitSet::default(); + pruning + .note_canonical(&(cache_capacity as u64 + 10), cache_capacity as u64 + 10, &mut commit) + .unwrap(); + assert_eq!(pruning.queue.len(), cache_capacity + 11); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 11); + + // revert the last add that no apply yet + // NOTE: do not commit the previous `CommitSet` to db + pruning.revert_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 10); + + // remove one block from the start of the queue + // block is removed from the head of cache + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + pruning.apply_pending(); + assert_eq!(pruning.queue.len(), cache_capacity + 9); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity - 1); + assert_eq!(uncached_blocks, 10); + for i in 0..(cache_capacity - 1) { + assert_eq!(cache[i].hash, (i + 1) as u64); + } + + // load a new queue from db + // `cache` is full again but the content of the queue should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + assert_eq!(pruning.queue.len(), cache_capacity + 9); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, 9); + for i in 0..cache_capacity { + assert_eq!(cache[i].hash, (i + 1) as u64); + } + } + + #[test] + fn load_block_from_db() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; + + // import blocks + for i in 0..(cache_capacity as u64 * 2 + 10) { + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&i, i, &mut commit).unwrap(); + push_last_canonicalized(i as u64, &mut commit); + db.commit(&commit); + } + + // the following operations won't triger loading block from db: + // - getting block in cache + // - getting block not in the queue + let index = cache_capacity; + assert_eq!( + pruning.queue.get(0, index - 1).unwrap().unwrap().hash, + cache_capacity as u64 - 1 + ); + assert_eq!(pruning.queue.get(0, cache_capacity * 2 + 10).unwrap(), None); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity); + assert_eq!(uncached_blocks, cache_capacity + 10); + + // getting a block not in cache will triger loading block from db + assert_eq!(pruning.queue.get(0, index).unwrap().unwrap().hash, cache_capacity as u64); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), cache_capacity * 2); + assert_eq!(uncached_blocks, 10); + + // clear all block loaded in cache + for _ in 0..cache_capacity * 2 { + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + } + pruning.apply_pending(); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert!(cache.is_empty()); + assert_eq!(uncached_blocks, 10); + + // getting the hash of block that not in cache will also triger loading + // the remaining blocks from db + assert_eq!( + pruning.queue.get(pruning.base, 0).unwrap().unwrap().hash, + (cache_capacity * 2) as u64 + ); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 10); + assert_eq!(uncached_blocks, 0); + + // load a new queue from db + // `cache` should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + assert_eq!(pruning.queue.len(), 10); + let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(cache.len(), 10); + assert_eq!(uncached_blocks, 0); + for i in 0..10 { + assert_eq!(cache[i].hash, (cache_capacity * 2 + i) as u64); + } + } + + #[test] + fn get_block_from_queue() { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as u64; + + // import blocks and commit to db + let mut commit = make_commit(&[], &[]); + for i in 0..(cache_capacity + 10) { + pruning.note_canonical(&i, i, &mut commit).unwrap(); + } + db.commit(&commit); + + // import a block but not commit to db yet + let mut pending_commit = make_commit(&[], &[]); + let index = cache_capacity + 10; + pruning.note_canonical(&index, index, &mut pending_commit).unwrap(); + + let mut commit = make_commit(&[], &[]); + // prune blocks that had committed to db + for i in 0..(cache_capacity + 10) { + assert_eq!(pruning.next_hash().unwrap(), Some(i)); + pruning.prune_one(&mut commit).unwrap(); + } + // return `BlockUnavailable` for block that did not commit to db + assert_eq!( + pruning.next_hash().unwrap_err(), + Error::StateDb(StateDbError::BlockUnavailable) + ); + assert_eq!( + pruning.prune_one(&mut commit).unwrap_err(), + Error::StateDb(StateDbError::BlockUnavailable) + ); + // commit block to db and no error return + db.commit(&pending_commit); + assert_eq!(pruning.next_hash().unwrap(), Some(index)); + pruning.prune_one(&mut commit).unwrap(); + db.commit(&commit); + + // import a block and do not commit it to db before calling `apply_pending` + pruning + .note_canonical(&(index + 1), index + 1, &mut make_commit(&[], &[])) + .unwrap(); + pruning.apply_pending(); + assert_eq!(pruning.next_hash().unwrap_err(), Error::StateDb(StateDbError::BlockMissing)); } } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 9fb97036b2f24..314ec2902452a 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -20,10 +20,16 @@ use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use std::collections::HashMap; +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Default, Debug, Clone)] +pub struct TestDb(Arc>); #[derive(Default, Debug, Clone, PartialEq, Eq)] -pub struct TestDb { +struct TestDbInner { pub data: HashMap, pub meta: HashMap, DBValue>, } @@ -32,7 +38,7 @@ impl MetaDb for TestDb { type Error = (); fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.meta.get(key).cloned()) + Ok(self.0.read().unwrap().meta.get(key).cloned()) } } @@ -41,25 +47,29 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) + Ok(self.0.read().unwrap().data.get(key).cloned()) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); - self.meta.extend(commit.meta.inserted.iter().cloned()); + self.0.write().unwrap().data.extend(commit.data.inserted.iter().cloned()); + self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.data.deleted.iter() { - self.data.remove(k); + self.0.write().unwrap().data.remove(k); } - self.meta.extend(commit.meta.inserted.iter().cloned()); + self.0.write().unwrap().meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { - self.meta.remove(k); + self.0.write().unwrap().meta.remove(k); } } pub fn data_eq(&self, other: &TestDb) -> bool { - self.data == other.data + self.0.read().unwrap().data == other.0.read().unwrap().data + } + + pub fn meta_len(&self) -> usize { + self.0.read().unwrap().meta.len() } } @@ -78,11 +88,11 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { } pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb { + TestDb(Arc::new(RwLock::new(TestDbInner { data: inserted .iter() .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), meta: Default::default(), - } + }))) } From ed12e6065c505a5225e2156985a75d1fb75fa0a4 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 1 Sep 2022 18:48:03 +0100 Subject: [PATCH 083/348] Weight v1.5 Follow Ups (#12155) * update api * update * remove unused * remove `one` api * fix unused * fmt * add saturating accrue * remove `Weight::new()` * use some macros * div makes no sense * Update weight_v2.rs * missed some * more patch * fixes * more fixes * more fix * more fix * Update frame/support/src/weights/weight_v2.rs * not needed * fix weight file --- .maintain/frame-weight-template.hbs | 4 +- bin/node-template/runtime/src/lib.rs | 2 +- bin/node/runtime/src/impls.rs | 14 +- bin/node/runtime/src/lib.rs | 2 +- docs/Upgrading-2.0-to-3.0.md | 4 +- frame/alliance/src/migration.rs | 2 +- frame/alliance/src/weights.rs | 104 ++-- frame/assets/src/weights.rs | 24 +- frame/babe/src/default_weights.rs | 10 +- frame/balances/src/tests.rs | 8 +- frame/benchmarking/src/weights.rs | 12 +- frame/bounties/src/weights.rs | 4 +- frame/child-bounties/src/weights.rs | 4 +- frame/collective/src/weights.rs | 88 ++-- frame/contracts/src/migration.rs | 6 +- frame/contracts/src/storage.rs | 3 +- frame/contracts/src/tests.rs | 2 +- frame/contracts/src/wasm/code_cache.rs | 2 +- frame/contracts/src/weights.rs | 448 +++++++++--------- frame/conviction-voting/src/weights.rs | 8 +- frame/democracy/src/lib.rs | 2 +- frame/democracy/src/weights.rs | 76 +-- .../election-provider-multi-phase/src/mock.rs | 2 +- .../src/weights.rs | 48 +- .../election-provider-support/src/weights.rs | 24 +- frame/elections-phragmen/src/migrations/v5.rs | 2 +- frame/elections-phragmen/src/weights.rs | 32 +- frame/examples/basic/src/weights.rs | 12 +- frame/executive/src/lib.rs | 4 +- frame/gilt/src/weights.rs | 16 +- frame/grandpa/src/default_weights.rs | 12 +- frame/identity/src/weights.rs | 96 ++-- frame/im-online/src/weights.rs | 8 +- frame/lottery/src/weights.rs | 4 +- frame/membership/src/weights.rs | 28 +- .../src/default_weights.rs | 2 +- frame/multisig/src/weights.rs | 56 +-- frame/nomination-pools/src/weights.rs | 16 +- frame/offences/benchmarking/src/mock.rs | 2 +- frame/offences/src/mock.rs | 2 +- frame/preimage/src/weights.rs | 12 +- frame/proxy/src/weights.rs | 56 +-- frame/ranked-collective/src/weights.rs | 16 +- frame/recovery/src/weights.rs | 20 +- frame/referenda/src/branch.rs | 6 +- frame/remark/src/weights.rs | 4 +- frame/scheduler/src/tests.rs | 5 +- frame/scheduler/src/weights.rs | 56 +-- frame/society/src/lib.rs | 2 +- frame/staking/src/tests.rs | 4 +- frame/staking/src/weights.rs | 68 +-- frame/state-trie-migration/src/lib.rs | 6 +- frame/state-trie-migration/src/weights.rs | 4 +- frame/sudo/src/tests.rs | 36 +- frame/support/src/dispatch.rs | 2 +- frame/support/src/migrations.rs | 2 +- frame/support/src/traits/hooks.rs | 18 +- frame/support/src/weights.rs | 2 +- frame/support/src/weights/block_weights.rs | 6 +- .../support/src/weights/extrinsic_weights.rs | 4 +- frame/support/src/weights/weight_v2.rs | 209 ++++---- frame/system/src/extensions/check_weight.rs | 8 +- frame/system/src/limits.rs | 6 +- frame/system/src/weights.rs | 16 +- frame/tips/src/weights.rs | 24 +- .../asset-tx-payment/src/tests.rs | 2 +- frame/transaction-payment/src/lib.rs | 2 +- frame/transaction-storage/src/weights.rs | 4 +- frame/treasury/src/lib.rs | 2 +- frame/treasury/src/weights.rs | 8 +- frame/uniques/src/weights.rs | 16 +- frame/utility/src/lib.rs | 10 +- frame/utility/src/tests.rs | 4 +- frame/utility/src/weights.rs | 12 +- frame/vesting/src/weights.rs | 64 +-- frame/whitelist/src/tests.rs | 2 +- frame/whitelist/src/weights.rs | 4 +- .../benchmarking-cli/src/overhead/README.md | 4 +- .../benchmarking-cli/src/overhead/weights.hbs | 2 +- .../benchmarking-cli/src/pallet/template.hbs | 2 +- 80 files changed, 965 insertions(+), 960 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index c6df85194ac99..5beeaf5f0521e 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -67,7 +67,7 @@ impl WeightInfo for SubstrateWeight { Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) @@ -102,7 +102,7 @@ impl WeightInfo for () { Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 7280336cada3d..e28a3bb2adb9d 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -138,7 +138,7 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults(2u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 4d58abc81d16d..0d0f864097af5 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -239,7 +239,7 @@ mod multiplier_tests { #[test] fn multiplier_cannot_go_below_limit() { // will not go any further below even if block is empty. - run_with_system_weight(Weight::new(), || { + run_with_system_weight(Weight::zero(), || { let next = runtime_multiplier_update(min_multiplier()); assert_eq!(next, min_multiplier()); }) @@ -257,7 +257,7 @@ mod multiplier_tests { // 1 < 0.00001 * k * 0.1875 // 10^9 / 1875 < k // k > 533_333 ~ 18,5 days. - run_with_system_weight(Weight::new(), || { + run_with_system_weight(Weight::zero(), || { // start from 1, the default. let mut fm = Multiplier::one(); let mut iterations: u64 = 0; @@ -419,20 +419,20 @@ mod multiplier_tests { #[test] fn weight_to_fee_should_not_overflow_on_large_weights() { let kb = Weight::from_ref_time(1024); - let mb = kb * kb; + let mb = 1024u64 * kb; let max_fm = Multiplier::saturating_from_integer(i128::MAX); // check that for all values it can compute, correctly. vec![ Weight::zero(), - Weight::one(), + Weight::from_ref_time(1), Weight::from_ref_time(10), Weight::from_ref_time(1000), kb, - 10 * kb, - 100 * kb, + 10u64 * kb, + 100u64 * kb, mb, - 10 * mb, + 10u64 * mb, Weight::from_ref_time(2147483647), Weight::from_ref_time(4294967295), BlockWeights::get().max_block / 2, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8ef7e7852ad02..fa0f877c5938d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -170,7 +170,7 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); /// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.scalar_saturating_mul(2); +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index f750c6dd5865b..bcba7d88047ce 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -100,12 +100,12 @@ And update the overall definition for weights on frame and a few related types a +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. -+const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; ++const MAXIMUM_BLOCK_WEIGHT: Weight = 2u64 * WEIGHT_PER_SECOND; + parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// We allow for 2 seconds of compute with a 6 second average block time. -- pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; +- pub const MaximumBlockWeight: Weight = 2u64 * WEIGHT_PER_SECOND; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Assume 10% of weight for average on_initialize calls. - pub MaximumExtrinsicWeight: Weight = diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs index 010603902f0fd..7e3df5219f25b 100644 --- a/frame/alliance/src/migration.rs +++ b/frame/alliance/src/migration.rs @@ -25,7 +25,7 @@ pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); /// Wrapper for all migrations of this pallet. pub fn migrate, I: 'static>() -> Weight { let onchain_version = Pallet::::on_chain_storage_version(); - let mut weight: Weight = Weight::new(); + let mut weight: Weight = Weight::zero(); if onchain_version < 1 { weight = weight.saturating_add(v0_to_v1::migrate::()); diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 8436b3ddbd530..2da5782f1bb4f 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -82,11 +82,11 @@ impl WeightInfo for SubstrateWeight { fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(37_864_000 as RefTimeWeight) // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -97,7 +97,7 @@ impl WeightInfo for SubstrateWeight { fn vote(_x: u32, y: u32, ) -> Weight { Weight::from_ref_time(46_813_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -109,7 +109,7 @@ impl WeightInfo for SubstrateWeight { fn veto(p: u32, ) -> Weight { Weight::from_ref_time(35_316_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -124,11 +124,11 @@ impl WeightInfo for SubstrateWeight { fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(36_245_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -144,13 +144,13 @@ impl WeightInfo for SubstrateWeight { fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(48_088_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -166,9 +166,9 @@ impl WeightInfo for SubstrateWeight { fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(43_374_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -185,9 +185,9 @@ impl WeightInfo for SubstrateWeight { fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(42_798_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -208,17 +208,17 @@ impl WeightInfo for SubstrateWeight { fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) @@ -306,9 +306,9 @@ impl WeightInfo for SubstrateWeight { fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(359_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -319,9 +319,9 @@ impl WeightInfo for SubstrateWeight { fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -341,11 +341,11 @@ impl WeightInfo for () { fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(37_864_000 as RefTimeWeight) // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -356,7 +356,7 @@ impl WeightInfo for () { fn vote(_x: u32, y: u32, ) -> Weight { Weight::from_ref_time(46_813_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -368,7 +368,7 @@ impl WeightInfo for () { fn veto(p: u32, ) -> Weight { Weight::from_ref_time(35_316_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -383,11 +383,11 @@ impl WeightInfo for () { fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(36_245_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -403,13 +403,13 @@ impl WeightInfo for () { fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(48_088_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -425,9 +425,9 @@ impl WeightInfo for () { fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(43_374_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -444,9 +444,9 @@ impl WeightInfo for () { fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { Weight::from_ref_time(42_798_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -467,17 +467,17 @@ impl WeightInfo for () { fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).scalar_saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) @@ -565,9 +565,9 @@ impl WeightInfo for () { fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(359_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -578,9 +578,9 @@ impl WeightInfo for () { fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index 971728df46c24..d59567e00ef69 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -92,11 +92,11 @@ impl WeightInfo for SubstrateWeight { fn destroy(c: u32, s: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -188,9 +188,9 @@ impl WeightInfo for SubstrateWeight { fn set_metadata(n: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -206,7 +206,7 @@ impl WeightInfo for SubstrateWeight { fn force_set_metadata(_n: u32, s: u32, ) -> Weight { Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -277,11 +277,11 @@ impl WeightInfo for () { fn destroy(c: u32, s: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -373,9 +373,9 @@ impl WeightInfo for () { fn set_metadata(n: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_395_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -391,7 +391,7 @@ impl WeightInfo for () { fn force_set_metadata(_n: u32, s: u32, ) -> Weight { Weight::from_ref_time(19_586_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index cc1d11108b2e1..d3e0c9d044883 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -38,14 +38,14 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - let ref_time_weight = (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) + let ref_time_weight = (35u64 * WEIGHT_PER_MICROS) + .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(110 * WEIGHT_PER_MICROS) + .saturating_add(110u64 * WEIGHT_PER_MICROS) // report offence - .saturating_add(110 * WEIGHT_PER_MICROS) - .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(110u64 * WEIGHT_PER_MICROS) + .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)); diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 6605af530563d..96bee9be1991c 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -188,14 +188,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(Weight::one()), + &info_from_weight(Weight::from_ref_time(1)), 1, ).is_err()); assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(Weight::one()), + &info_from_weight(Weight::from_ref_time(1)), 1, )); @@ -206,14 +206,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - &info_from_weight(Weight::one()), + &info_from_weight(Weight::from_ref_time(1)), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - &info_from_weight(Weight::one()), + &info_from_weight(Weight::from_ref_time(1)), 1, ).is_err()); }); diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index dc2ca26e1af02..9a40e488b7243 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -75,20 +75,20 @@ impl WeightInfo for SubstrateWeight { fn sr25519_verification(i: u32, ) -> Weight { Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } @@ -113,20 +113,20 @@ impl WeightInfo for () { fn sr25519_verification(i: u32, ) -> Weight { Weight::from_ref_time(319_000 as RefTimeWeight) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 27a23cb4ffeae..0bb9a16d3cd8a 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -142,7 +142,7 @@ impl WeightInfo for SubstrateWeight { fn spend_funds(b: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -234,7 +234,7 @@ impl WeightInfo for () { fn spend_funds(b: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index c5d00d6ed0bd4..d806a676356b6 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -66,7 +66,7 @@ impl WeightInfo for SubstrateWeight { fn add_child_bounty(d: u32, ) -> Weight { Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } @@ -145,7 +145,7 @@ impl WeightInfo for () { fn add_child_bounty(d: u32, ) -> Weight { Weight::from_ref_time(51_064_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index a0cc64cc2408d..fcc0e7134f51c 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -66,11 +66,11 @@ impl WeightInfo for SubstrateWeight { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -80,9 +80,9 @@ impl WeightInfo for SubstrateWeight { fn execute(b: u32, m: u32, ) -> Weight { Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) @@ -90,9 +90,9 @@ impl WeightInfo for SubstrateWeight { fn propose_execute(b: u32, m: u32, ) -> Weight { Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) @@ -103,11 +103,11 @@ impl WeightInfo for SubstrateWeight { fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -116,7 +116,7 @@ impl WeightInfo for SubstrateWeight { fn vote(m: u32, ) -> Weight { Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -127,9 +127,9 @@ impl WeightInfo for SubstrateWeight { fn close_early_disapproved(m: u32, p: u32, ) -> Weight { Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -140,11 +140,11 @@ impl WeightInfo for SubstrateWeight { fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -156,9 +156,9 @@ impl WeightInfo for SubstrateWeight { fn close_disapproved(m: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -170,11 +170,11 @@ impl WeightInfo for SubstrateWeight { fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -184,7 +184,7 @@ impl WeightInfo for SubstrateWeight { fn disapprove_proposal(p: u32, ) -> Weight { Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -199,11 +199,11 @@ impl WeightInfo for () { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -213,9 +213,9 @@ impl WeightInfo for () { fn execute(b: u32, m: u32, ) -> Weight { Weight::from_ref_time(16_819_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) @@ -223,9 +223,9 @@ impl WeightInfo for () { fn propose_execute(b: u32, m: u32, ) -> Weight { Weight::from_ref_time(18_849_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) } // Storage: Council Members (r:1 w:0) @@ -236,11 +236,11 @@ impl WeightInfo for () { fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(22_204_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -249,7 +249,7 @@ impl WeightInfo for () { fn vote(m: u32, ) -> Weight { Weight::from_ref_time(30_941_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -260,9 +260,9 @@ impl WeightInfo for () { fn close_early_disapproved(m: u32, p: u32, ) -> Weight { Weight::from_ref_time(32_485_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -273,11 +273,11 @@ impl WeightInfo for () { fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -289,9 +289,9 @@ impl WeightInfo for () { fn close_disapproved(m: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_494_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -303,11 +303,11 @@ impl WeightInfo for () { fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { Weight::from_ref_time(36_566_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -317,7 +317,7 @@ impl WeightInfo for () { fn disapprove_proposal(p: u32, ) -> Weight { Weight::from_ref_time(20_159_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 9d90cf38269b1..0db285e81a91f 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -26,7 +26,7 @@ use sp_std::{marker::PhantomData, prelude::*}; /// Wrapper for all migrations of this pallet, based on `StorageVersion`. pub fn migrate() -> Weight { let version = StorageVersion::get::>(); - let mut weight = Weight::new(); + let mut weight = Weight::zero(); if version < 4 { weight = weight.saturating_add(v4::migrate::()); @@ -127,7 +127,7 @@ mod v5 { type DeletionQueue = StorageValue, Vec>; pub fn migrate() -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); @@ -216,7 +216,7 @@ mod v6 { type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; pub fn migrate() -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); >::translate(|_key, old: OldContractInfo| { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 76e2d74f3d887..9c340b5c93deb 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -230,8 +230,7 @@ where let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - T::WeightInfo::on_initialize_per_trie_key(0)) .ref_time(); - let decoding_weight = - weight_per_queue_item.scalar_saturating_mul(queue_len as RefTimeWeight); + let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as RefTimeWeight); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 3571434bb823b..f4fa6bb3d72db 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -264,7 +264,7 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); pub static ExistentialDeposit: u64 = 1; } impl frame_system::Config for Test { diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 659caafa7f0b0..d482d5c4bbf3f 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -223,7 +223,7 @@ impl Token for CodeToken { Load(len) => { let computation = T::WeightInfo::call_with_code_per_byte(len) .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwidth = T::ContractAccessWeight::get().scalar_saturating_mul(len as u64); + let bandwidth = T::ContractAccessWeight::get().saturating_mul(len as u64); computation.max(bandwidth) }, }; diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 17be3c3da0138..daee41bc63f32 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -174,7 +174,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_per_trie_key(k: u32, ) -> Weight { Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) @@ -184,7 +184,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_per_queue_item(q: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -194,7 +194,7 @@ impl WeightInfo for SubstrateWeight { fn reinstrument(c: u32, ) -> Weight { Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -206,7 +206,7 @@ impl WeightInfo for SubstrateWeight { fn call_with_code_per_byte(c: u32, ) -> Weight { Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -222,9 +222,9 @@ impl WeightInfo for SubstrateWeight { fn instantiate_with_code(c: u32, s: u32, ) -> Weight { Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } @@ -238,7 +238,7 @@ impl WeightInfo for SubstrateWeight { fn instantiate(s: u32, ) -> Weight { Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) } @@ -258,7 +258,7 @@ impl WeightInfo for SubstrateWeight { fn upload_code(c: u32, ) -> Weight { Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -285,7 +285,7 @@ impl WeightInfo for SubstrateWeight { fn seal_caller(r: u32, ) -> Weight { Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -297,7 +297,7 @@ impl WeightInfo for SubstrateWeight { fn seal_is_contract(r: u32, ) -> Weight { Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -310,7 +310,7 @@ impl WeightInfo for SubstrateWeight { fn seal_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -323,7 +323,7 @@ impl WeightInfo for SubstrateWeight { fn seal_own_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -335,7 +335,7 @@ impl WeightInfo for SubstrateWeight { fn seal_caller_is_origin(r: u32, ) -> Weight { Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -347,7 +347,7 @@ impl WeightInfo for SubstrateWeight { fn seal_address(r: u32, ) -> Weight { Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -359,7 +359,7 @@ impl WeightInfo for SubstrateWeight { fn seal_gas_left(r: u32, ) -> Weight { Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -371,7 +371,7 @@ impl WeightInfo for SubstrateWeight { fn seal_balance(r: u32, ) -> Weight { Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -383,7 +383,7 @@ impl WeightInfo for SubstrateWeight { fn seal_value_transferred(r: u32, ) -> Weight { Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -395,7 +395,7 @@ impl WeightInfo for SubstrateWeight { fn seal_minimum_balance(r: u32, ) -> Weight { Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -407,7 +407,7 @@ impl WeightInfo for SubstrateWeight { fn seal_block_number(r: u32, ) -> Weight { Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -419,7 +419,7 @@ impl WeightInfo for SubstrateWeight { fn seal_now(r: u32, ) -> Weight { Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -432,7 +432,7 @@ impl WeightInfo for SubstrateWeight { fn seal_weight_to_fee(r: u32, ) -> Weight { Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -444,7 +444,7 @@ impl WeightInfo for SubstrateWeight { fn seal_gas(r: u32, ) -> Weight { Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -456,7 +456,7 @@ impl WeightInfo for SubstrateWeight { fn seal_input(r: u32, ) -> Weight { Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -468,7 +468,7 @@ impl WeightInfo for SubstrateWeight { fn seal_input_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -490,7 +490,7 @@ impl WeightInfo for SubstrateWeight { fn seal_return_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -504,7 +504,7 @@ impl WeightInfo for SubstrateWeight { fn seal_terminate(r: u32, ) -> Weight { Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -519,7 +519,7 @@ impl WeightInfo for SubstrateWeight { fn seal_random(r: u32, ) -> Weight { Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -531,7 +531,7 @@ impl WeightInfo for SubstrateWeight { fn seal_deposit_event(r: u32, ) -> Weight { Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -545,9 +545,9 @@ impl WeightInfo for SubstrateWeight { fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -561,7 +561,7 @@ impl WeightInfo for SubstrateWeight { fn seal_debug_message(r: u32, ) -> Weight { Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -570,7 +570,7 @@ impl WeightInfo for SubstrateWeight { fn seal_set_storage(r: u32, ) -> Weight { Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -581,7 +581,7 @@ impl WeightInfo for SubstrateWeight { fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) @@ -592,7 +592,7 @@ impl WeightInfo for SubstrateWeight { fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) @@ -603,7 +603,7 @@ impl WeightInfo for SubstrateWeight { fn seal_clear_storage(r: u32, ) -> Weight { Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -614,7 +614,7 @@ impl WeightInfo for SubstrateWeight { fn seal_clear_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(52 as RefTimeWeight)) @@ -625,7 +625,7 @@ impl WeightInfo for SubstrateWeight { fn seal_get_storage(r: u32, ) -> Weight { Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -635,7 +635,7 @@ impl WeightInfo for SubstrateWeight { fn seal_get_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -645,7 +645,7 @@ impl WeightInfo for SubstrateWeight { fn seal_contains_storage(r: u32, ) -> Weight { Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -655,7 +655,7 @@ impl WeightInfo for SubstrateWeight { fn seal_contains_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(54 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -665,7 +665,7 @@ impl WeightInfo for SubstrateWeight { fn seal_take_storage(r: u32, ) -> Weight { Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -676,7 +676,7 @@ impl WeightInfo for SubstrateWeight { fn seal_take_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) @@ -690,7 +690,7 @@ impl WeightInfo for SubstrateWeight { fn seal_transfer(r: u32, ) -> Weight { Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -704,7 +704,7 @@ impl WeightInfo for SubstrateWeight { fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -718,7 +718,7 @@ impl WeightInfo for SubstrateWeight { fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -731,9 +731,9 @@ impl WeightInfo for SubstrateWeight { fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(85 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(81 as RefTimeWeight)) @@ -749,7 +749,7 @@ impl WeightInfo for SubstrateWeight { fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) @@ -766,9 +766,9 @@ impl WeightInfo for SubstrateWeight { fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(167 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(165 as RefTimeWeight)) @@ -782,7 +782,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_sha2_256(r: u32, ) -> Weight { Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -794,7 +794,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -806,7 +806,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_keccak_256(r: u32, ) -> Weight { Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -818,7 +818,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -830,7 +830,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_blake2_256(r: u32, ) -> Weight { Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -842,7 +842,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -854,7 +854,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_blake2_128(r: u32, ) -> Weight { Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -866,7 +866,7 @@ impl WeightInfo for SubstrateWeight { fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -878,7 +878,7 @@ impl WeightInfo for SubstrateWeight { fn seal_ecdsa_recover(r: u32, ) -> Weight { Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -890,7 +890,7 @@ impl WeightInfo for SubstrateWeight { fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -903,7 +903,7 @@ impl WeightInfo for SubstrateWeight { fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } @@ -911,307 +911,307 @@ impl WeightInfo for SubstrateWeight { fn instr_i64const(r: u32, ) -> Weight { Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } } @@ -1227,7 +1227,7 @@ impl WeightInfo for () { fn on_initialize_per_trie_key(k: u32, ) -> Weight { Weight::from_ref_time(8_564_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) @@ -1237,7 +1237,7 @@ impl WeightInfo for () { fn on_initialize_per_queue_item(q: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1247,7 +1247,7 @@ impl WeightInfo for () { fn reinstrument(c: u32, ) -> Weight { Weight::from_ref_time(19_016_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1259,7 +1259,7 @@ impl WeightInfo for () { fn call_with_code_per_byte(c: u32, ) -> Weight { Weight::from_ref_time(205_194_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -1275,9 +1275,9 @@ impl WeightInfo for () { fn instantiate_with_code(c: u32, s: u32, ) -> Weight { Weight::from_ref_time(288_487_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } @@ -1291,7 +1291,7 @@ impl WeightInfo for () { fn instantiate(s: u32, ) -> Weight { Weight::from_ref_time(186_136_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) } @@ -1311,7 +1311,7 @@ impl WeightInfo for () { fn upload_code(c: u32, ) -> Weight { Weight::from_ref_time(51_721_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -1338,7 +1338,7 @@ impl WeightInfo for () { fn seal_caller(r: u32, ) -> Weight { Weight::from_ref_time(206_405_000 as RefTimeWeight) // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1350,7 +1350,7 @@ impl WeightInfo for () { fn seal_is_contract(r: u32, ) -> Weight { Weight::from_ref_time(106_220_000 as RefTimeWeight) // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1363,7 +1363,7 @@ impl WeightInfo for () { fn seal_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(104_498_000 as RefTimeWeight) // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1376,7 +1376,7 @@ impl WeightInfo for () { fn seal_own_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(208_696_000 as RefTimeWeight) // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1388,7 +1388,7 @@ impl WeightInfo for () { fn seal_caller_is_origin(r: u32, ) -> Weight { Weight::from_ref_time(205_612_000 as RefTimeWeight) // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1400,7 +1400,7 @@ impl WeightInfo for () { fn seal_address(r: u32, ) -> Weight { Weight::from_ref_time(206_947_000 as RefTimeWeight) // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1412,7 +1412,7 @@ impl WeightInfo for () { fn seal_gas_left(r: u32, ) -> Weight { Weight::from_ref_time(208_692_000 as RefTimeWeight) // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1424,7 +1424,7 @@ impl WeightInfo for () { fn seal_balance(r: u32, ) -> Weight { Weight::from_ref_time(209_811_000 as RefTimeWeight) // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1436,7 +1436,7 @@ impl WeightInfo for () { fn seal_value_transferred(r: u32, ) -> Weight { Weight::from_ref_time(207_406_000 as RefTimeWeight) // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1448,7 +1448,7 @@ impl WeightInfo for () { fn seal_minimum_balance(r: u32, ) -> Weight { Weight::from_ref_time(209_260_000 as RefTimeWeight) // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1460,7 +1460,7 @@ impl WeightInfo for () { fn seal_block_number(r: u32, ) -> Weight { Weight::from_ref_time(206_448_000 as RefTimeWeight) // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1472,7 +1472,7 @@ impl WeightInfo for () { fn seal_now(r: u32, ) -> Weight { Weight::from_ref_time(206_969_000 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1485,7 +1485,7 @@ impl WeightInfo for () { fn seal_weight_to_fee(r: u32, ) -> Weight { Weight::from_ref_time(211_611_000 as RefTimeWeight) // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1497,7 +1497,7 @@ impl WeightInfo for () { fn seal_gas(r: u32, ) -> Weight { Weight::from_ref_time(134_484_000 as RefTimeWeight) // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1509,7 +1509,7 @@ impl WeightInfo for () { fn seal_input(r: u32, ) -> Weight { Weight::from_ref_time(208_556_000 as RefTimeWeight) // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1521,7 +1521,7 @@ impl WeightInfo for () { fn seal_input_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(268_886_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1543,7 +1543,7 @@ impl WeightInfo for () { fn seal_return_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(204_258_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1557,7 +1557,7 @@ impl WeightInfo for () { fn seal_terminate(r: u32, ) -> Weight { Weight::from_ref_time(206_625_000 as RefTimeWeight) // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1572,7 +1572,7 @@ impl WeightInfo for () { fn seal_random(r: u32, ) -> Weight { Weight::from_ref_time(208_866_000 as RefTimeWeight) // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1584,7 +1584,7 @@ impl WeightInfo for () { fn seal_deposit_event(r: u32, ) -> Weight { Weight::from_ref_time(220_860_000 as RefTimeWeight) // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1598,9 +1598,9 @@ impl WeightInfo for () { fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { Weight::from_ref_time(439_782_000 as RefTimeWeight) // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1614,7 +1614,7 @@ impl WeightInfo for () { fn seal_debug_message(r: u32, ) -> Weight { Weight::from_ref_time(140_280_000 as RefTimeWeight) // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1623,7 +1623,7 @@ impl WeightInfo for () { fn seal_set_storage(r: u32, ) -> Weight { Weight::from_ref_time(161_247_000 as RefTimeWeight) // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1634,7 +1634,7 @@ impl WeightInfo for () { fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { Weight::from_ref_time(529_247_000 as RefTimeWeight) // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) @@ -1645,7 +1645,7 @@ impl WeightInfo for () { fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { Weight::from_ref_time(529_812_000 as RefTimeWeight) // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) @@ -1656,7 +1656,7 @@ impl WeightInfo for () { fn seal_clear_storage(r: u32, ) -> Weight { Weight::from_ref_time(184_803_000 as RefTimeWeight) // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -1667,7 +1667,7 @@ impl WeightInfo for () { fn seal_clear_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(500_958_000 as RefTimeWeight) // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(52 as RefTimeWeight)) @@ -1678,7 +1678,7 @@ impl WeightInfo for () { fn seal_get_storage(r: u32, ) -> Weight { Weight::from_ref_time(177_682_000 as RefTimeWeight) // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1688,7 +1688,7 @@ impl WeightInfo for () { fn seal_get_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(465_285_000 as RefTimeWeight) // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1698,7 +1698,7 @@ impl WeightInfo for () { fn seal_contains_storage(r: u32, ) -> Weight { Weight::from_ref_time(179_118_000 as RefTimeWeight) // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1708,7 +1708,7 @@ impl WeightInfo for () { fn seal_contains_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(423_056_000 as RefTimeWeight) // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(54 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1718,7 +1718,7 @@ impl WeightInfo for () { fn seal_take_storage(r: u32, ) -> Weight { Weight::from_ref_time(188_884_000 as RefTimeWeight) // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -1729,7 +1729,7 @@ impl WeightInfo for () { fn seal_take_storage_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(532_408_000 as RefTimeWeight) // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) @@ -1743,7 +1743,7 @@ impl WeightInfo for () { fn seal_transfer(r: u32, ) -> Weight { Weight::from_ref_time(127_181_000 as RefTimeWeight) // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -1757,7 +1757,7 @@ impl WeightInfo for () { fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -1771,7 +1771,7 @@ impl WeightInfo for () { fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1784,9 +1784,9 @@ impl WeightInfo for () { fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { Weight::from_ref_time(9_196_444_000 as RefTimeWeight) // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(85 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(81 as RefTimeWeight)) @@ -1802,7 +1802,7 @@ impl WeightInfo for () { fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) @@ -1819,9 +1819,9 @@ impl WeightInfo for () { fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { Weight::from_ref_time(12_282_498_000 as RefTimeWeight) // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(167 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(165 as RefTimeWeight)) @@ -1835,7 +1835,7 @@ impl WeightInfo for () { fn seal_hash_sha2_256(r: u32, ) -> Weight { Weight::from_ref_time(203_959_000 as RefTimeWeight) // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1847,7 +1847,7 @@ impl WeightInfo for () { fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(349_915_000 as RefTimeWeight) // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1859,7 +1859,7 @@ impl WeightInfo for () { fn seal_hash_keccak_256(r: u32, ) -> Weight { Weight::from_ref_time(209_219_000 as RefTimeWeight) // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1871,7 +1871,7 @@ impl WeightInfo for () { fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(208_860_000 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1883,7 +1883,7 @@ impl WeightInfo for () { fn seal_hash_blake2_256(r: u32, ) -> Weight { Weight::from_ref_time(206_165_000 as RefTimeWeight) // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1895,7 +1895,7 @@ impl WeightInfo for () { fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(255_955_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1907,7 +1907,7 @@ impl WeightInfo for () { fn seal_hash_blake2_128(r: u32, ) -> Weight { Weight::from_ref_time(208_153_000 as RefTimeWeight) // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1919,7 +1919,7 @@ impl WeightInfo for () { fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(278_368_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1931,7 +1931,7 @@ impl WeightInfo for () { fn seal_ecdsa_recover(r: u32, ) -> Weight { Weight::from_ref_time(331_955_000 as RefTimeWeight) // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1943,7 +1943,7 @@ impl WeightInfo for () { fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { Weight::from_ref_time(207_838_000 as RefTimeWeight) // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -1956,7 +1956,7 @@ impl WeightInfo for () { fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) } @@ -1964,306 +1964,306 @@ impl WeightInfo for () { fn instr_i64const(r: u32, ) -> Weight { Weight::from_ref_time(73_955_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { Weight::from_ref_time(74_057_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { Weight::from_ref_time(74_137_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { Weight::from_ref_time(73_844_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { Weight::from_ref_time(73_924_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { Weight::from_ref_time(73_574_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { Weight::from_ref_time(73_343_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { Weight::from_ref_time(76_267_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { Weight::from_ref_time(74_877_000 as RefTimeWeight) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { Weight::from_ref_time(88_665_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { Weight::from_ref_time(98_600_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { Weight::from_ref_time(74_555_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { Weight::from_ref_time(74_329_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { Weight::from_ref_time(74_612_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { Weight::from_ref_time(76_906_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { Weight::from_ref_time(76_979_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { Weight::from_ref_time(74_370_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { Weight::from_ref_time(73_584_000 as RefTimeWeight) // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { Weight::from_ref_time(74_206_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { Weight::from_ref_time(73_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { Weight::from_ref_time(73_985_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { Weight::from_ref_time(74_117_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { Weight::from_ref_time(73_981_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { Weight::from_ref_time(74_104_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { Weight::from_ref_time(74_293_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { Weight::from_ref_time(74_055_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { Weight::from_ref_time(73_710_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { Weight::from_ref_time(73_917_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { Weight::from_ref_time(74_048_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { Weight::from_ref_time(74_029_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { Weight::from_ref_time(74_267_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { Weight::from_ref_time(73_952_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { Weight::from_ref_time(73_851_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { Weight::from_ref_time(74_034_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { Weight::from_ref_time(73_979_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { Weight::from_ref_time(74_000_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { Weight::from_ref_time(73_883_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { Weight::from_ref_time(74_216_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { Weight::from_ref_time(73_989_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { Weight::from_ref_time(73_857_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { Weight::from_ref_time(73_801_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { Weight::from_ref_time(74_130_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { Weight::from_ref_time(74_071_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { Weight::from_ref_time(74_201_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { Weight::from_ref_time(74_241_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { Weight::from_ref_time(74_331_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { Weight::from_ref_time(73_674_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { Weight::from_ref_time(73_807_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { Weight::from_ref_time(73_725_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { Weight::from_ref_time(73_755_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) } } diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index 10c5c975a81f1..dbc7ce06ee825 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -99,7 +99,7 @@ impl WeightInfo for SubstrateWeight { fn delegate(r: u32, ) -> Weight { Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) @@ -111,7 +111,7 @@ impl WeightInfo for SubstrateWeight { fn undelegate(r: u32, ) -> Weight { Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -172,7 +172,7 @@ impl WeightInfo for () { fn delegate(r: u32, ) -> Weight { Weight::from_ref_time(51_518_000 as RefTimeWeight) // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) @@ -184,7 +184,7 @@ impl WeightInfo for () { fn undelegate(r: u32, ) -> Weight { Weight::from_ref_time(37_885_000 as RefTimeWeight) // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 5bbc97fa16e30..bbc5b767e97ff 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1765,7 +1765,7 @@ impl Pallet { /// # fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; - let mut weight = Weight::new(); + let mut weight = Weight::zero(); let next = Self::lowest_unbaked(); let last = Self::referendum_count(); diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 351ed42cca8e9..ad11526736c9c 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -88,7 +88,7 @@ impl WeightInfo for SubstrateWeight { fn second(s: u32, ) -> Weight { Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -98,7 +98,7 @@ impl WeightInfo for SubstrateWeight { fn vote_new(r: u32, ) -> Weight { Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -108,7 +108,7 @@ impl WeightInfo for SubstrateWeight { fn vote_existing(r: u32, ) -> Weight { Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -128,7 +128,7 @@ impl WeightInfo for SubstrateWeight { fn blacklist(p: u32, ) -> Weight { Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) } @@ -137,7 +137,7 @@ impl WeightInfo for SubstrateWeight { fn external_propose(v: u32, ) -> Weight { Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -164,7 +164,7 @@ impl WeightInfo for SubstrateWeight { fn veto_external(v: u32, ) -> Weight { Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -174,7 +174,7 @@ impl WeightInfo for SubstrateWeight { fn cancel_proposal(p: u32, ) -> Weight { Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -188,7 +188,7 @@ impl WeightInfo for SubstrateWeight { fn cancel_queued(r: u32, ) -> Weight { Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -198,7 +198,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_base(r: u32, ) -> Weight { Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -212,7 +212,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -223,7 +223,7 @@ impl WeightInfo for SubstrateWeight { fn delegate(r: u32, ) -> Weight { Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) @@ -234,7 +234,7 @@ impl WeightInfo for SubstrateWeight { fn undelegate(r: u32, ) -> Weight { Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -249,7 +249,7 @@ impl WeightInfo for SubstrateWeight { fn note_preimage(b: u32, ) -> Weight { Weight::from_ref_time(27_986_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -257,7 +257,7 @@ impl WeightInfo for SubstrateWeight { fn note_imminent_preimage(b: u32, ) -> Weight { Weight::from_ref_time(20_058_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -266,7 +266,7 @@ impl WeightInfo for SubstrateWeight { fn reap_preimage(b: u32, ) -> Weight { Weight::from_ref_time(28_619_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -276,7 +276,7 @@ impl WeightInfo for SubstrateWeight { fn unlock_remove(r: u32, ) -> Weight { Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -286,7 +286,7 @@ impl WeightInfo for SubstrateWeight { fn unlock_set(r: u32, ) -> Weight { Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -295,7 +295,7 @@ impl WeightInfo for SubstrateWeight { fn remove_vote(r: u32, ) -> Weight { Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -304,7 +304,7 @@ impl WeightInfo for SubstrateWeight { fn remove_other_vote(r: u32, ) -> Weight { Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -325,7 +325,7 @@ impl WeightInfo for () { fn second(s: u32, ) -> Weight { Weight::from_ref_time(30_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -335,7 +335,7 @@ impl WeightInfo for () { fn vote_new(r: u32, ) -> Weight { Weight::from_ref_time(40_345_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -345,7 +345,7 @@ impl WeightInfo for () { fn vote_existing(r: u32, ) -> Weight { Weight::from_ref_time(39_853_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -365,7 +365,7 @@ impl WeightInfo for () { fn blacklist(p: u32, ) -> Weight { Weight::from_ref_time(57_708_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) } @@ -374,7 +374,7 @@ impl WeightInfo for () { fn external_propose(v: u32, ) -> Weight { Weight::from_ref_time(10_714_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -401,7 +401,7 @@ impl WeightInfo for () { fn veto_external(v: u32, ) -> Weight { Weight::from_ref_time(21_319_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -411,7 +411,7 @@ impl WeightInfo for () { fn cancel_proposal(p: u32, ) -> Weight { Weight::from_ref_time(43_960_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -425,7 +425,7 @@ impl WeightInfo for () { fn cancel_queued(r: u32, ) -> Weight { Weight::from_ref_time(24_320_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -435,7 +435,7 @@ impl WeightInfo for () { fn on_initialize_base(r: u32, ) -> Weight { Weight::from_ref_time(3_428_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -449,7 +449,7 @@ impl WeightInfo for () { fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { Weight::from_ref_time(7_867_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -460,7 +460,7 @@ impl WeightInfo for () { fn delegate(r: u32, ) -> Weight { Weight::from_ref_time(37_902_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) @@ -471,7 +471,7 @@ impl WeightInfo for () { fn undelegate(r: u32, ) -> Weight { Weight::from_ref_time(21_272_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -486,7 +486,7 @@ impl WeightInfo for () { fn note_preimage(b: u32, ) -> Weight { Weight::from_ref_time(27_986_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -494,7 +494,7 @@ impl WeightInfo for () { fn note_imminent_preimage(b: u32, ) -> Weight { Weight::from_ref_time(20_058_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -503,7 +503,7 @@ impl WeightInfo for () { fn reap_preimage(b: u32, ) -> Weight { Weight::from_ref_time(28_619_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -513,7 +513,7 @@ impl WeightInfo for () { fn unlock_remove(r: u32, ) -> Weight { Weight::from_ref_time(26_619_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -523,7 +523,7 @@ impl WeightInfo for () { fn unlock_set(r: u32, ) -> Weight { Weight::from_ref_time(25_373_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -532,7 +532,7 @@ impl WeightInfo for () { fn remove_vote(r: u32, ) -> Weight { Weight::from_ref_time(15_961_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -541,7 +541,7 @@ impl WeightInfo for () { fn remove_other_vote(r: u32, ) -> Weight { Weight::from_ref_time(15_992_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 72b3ec9764079..48b76bbb98a8f 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -226,7 +226,7 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); } impl pallet_balances::Config for Runtime { diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 7ceb4a20e042a..0e7af39badd44 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -104,9 +104,9 @@ impl WeightInfo for SubstrateWeight { fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -121,9 +121,9 @@ impl WeightInfo for SubstrateWeight { fn elect_queued(a: u32, d: u32, ) -> Weight { Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } @@ -148,13 +148,13 @@ impl WeightInfo for SubstrateWeight { fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -165,13 +165,13 @@ impl WeightInfo for SubstrateWeight { fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) } } @@ -223,9 +223,9 @@ impl WeightInfo for () { fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { Weight::from_ref_time(3_186_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -240,9 +240,9 @@ impl WeightInfo for () { fn elect_queued(a: u32, d: u32, ) -> Weight { Weight::from_ref_time(137_653_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } @@ -267,13 +267,13 @@ impl WeightInfo for () { fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -284,13 +284,13 @@ impl WeightInfo for () { fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) } } diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index 4f9e47b09a4da..ed7360940ba58 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -55,20 +55,20 @@ impl WeightInfo for SubstrateWeight { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) } } @@ -77,19 +77,19 @@ impl WeightInfo for () { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).scalar_saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) } } diff --git a/frame/elections-phragmen/src/migrations/v5.rs b/frame/elections-phragmen/src/migrations/v5.rs index eb96d7ddf6a99..eb35c1fae0f29 100644 --- a/frame/elections-phragmen/src/migrations/v5.rs +++ b/frame/elections-phragmen/src/migrations/v5.rs @@ -8,7 +8,7 @@ use super::super::*; /// situation where they could increase their free balance but still not be able to use their funds /// because they were less than the lock. pub fn migrate(to_migrate: Vec) -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for who in to_migrate.iter() { if let Ok(mut voter) = Voting::::try_get(who) { diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 14c69bf16f7f1..051aaa793588c 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -72,7 +72,7 @@ impl WeightInfo for SubstrateWeight { fn vote_equal(v: u32, ) -> Weight { Weight::from_ref_time(27_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -85,7 +85,7 @@ impl WeightInfo for SubstrateWeight { fn vote_more(v: u32, ) -> Weight { Weight::from_ref_time(40_240_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -98,7 +98,7 @@ impl WeightInfo for SubstrateWeight { fn vote_less(v: u32, ) -> Weight { Weight::from_ref_time(40_394_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -116,7 +116,7 @@ impl WeightInfo for SubstrateWeight { fn submit_candidacy(c: u32, ) -> Weight { Weight::from_ref_time(42_217_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -125,7 +125,7 @@ impl WeightInfo for SubstrateWeight { fn renounce_candidacy_candidate(c: u32, ) -> Weight { Weight::from_ref_time(46_459_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -171,7 +171,7 @@ impl WeightInfo for SubstrateWeight { fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) @@ -191,9 +191,9 @@ impl WeightInfo for SubstrateWeight { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(280 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) @@ -212,7 +212,7 @@ impl WeightInfo for () { fn vote_equal(v: u32, ) -> Weight { Weight::from_ref_time(27_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -225,7 +225,7 @@ impl WeightInfo for () { fn vote_more(v: u32, ) -> Weight { Weight::from_ref_time(40_240_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -238,7 +238,7 @@ impl WeightInfo for () { fn vote_less(v: u32, ) -> Weight { Weight::from_ref_time(40_394_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -256,7 +256,7 @@ impl WeightInfo for () { fn submit_candidacy(c: u32, ) -> Weight { Weight::from_ref_time(42_217_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -265,7 +265,7 @@ impl WeightInfo for () { fn renounce_candidacy_candidate(c: u32, ) -> Weight { Weight::from_ref_time(46_459_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -311,7 +311,7 @@ impl WeightInfo for () { fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) @@ -331,9 +331,9 @@ impl WeightInfo for () { fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(280 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index e8fc44bc4b050..d27915ef1b02a 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -64,19 +64,19 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_dummy_benchmark(b: u32, ) -> Weight { Weight::from_ref_time(5_834_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { Weight::from_ref_time(51_353_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) } } @@ -84,18 +84,18 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { fn set_dummy_benchmark(b: u32, ) -> Weight { Weight::from_ref_time(5_834_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn accumulate_dummy(b: u32, ) -> Weight { Weight::from_ref_time(51_353_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } fn sort_vector(x: u32, ) -> Weight { Weight::from_ref_time(2_569_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) } } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index d7cd1da7910a4..5d3954ded0998 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -348,7 +348,7 @@ where // This means the format of all the event related storages must always be compatible. >::reset_events(); - let mut weight = Weight::new(); + let mut weight = Weight::zero(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } @@ -1121,7 +1121,7 @@ mod tests { .base_extrinsic; assert_eq!( >::block_weight().total(), - base_block_weight + 3 * extrinsic_weight, + base_block_weight + 3u64 * extrinsic_weight, ); assert_eq!(>::all_extrinsics_len(), 3 * len); diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 3d2b629e8b16b..124ee7593a778 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -62,7 +62,7 @@ impl WeightInfo for SubstrateWeight { fn place_bid(l: u32, ) -> Weight { Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -78,7 +78,7 @@ impl WeightInfo for SubstrateWeight { fn retract_bid(l: u32, ) -> Weight { Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -107,7 +107,7 @@ impl WeightInfo for SubstrateWeight { fn pursue_target_per_item(b: u32, ) -> Weight { Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) @@ -119,7 +119,7 @@ impl WeightInfo for SubstrateWeight { fn pursue_target_per_queue(q: u32, ) -> Weight { Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -134,7 +134,7 @@ impl WeightInfo for () { fn place_bid(l: u32, ) -> Weight { Weight::from_ref_time(41_605_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -150,7 +150,7 @@ impl WeightInfo for () { fn retract_bid(l: u32, ) -> Weight { Weight::from_ref_time(42_061_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -179,7 +179,7 @@ impl WeightInfo for () { fn pursue_target_per_item(b: u32, ) -> Weight { Weight::from_ref_time(40_797_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) @@ -191,7 +191,7 @@ impl WeightInfo for () { fn pursue_target_per_queue(q: u32, ) -> Weight { Weight::from_ref_time(14_944_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).scalar_saturating_mul(q as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) diff --git a/frame/grandpa/src/default_weights.rs b/frame/grandpa/src/default_weights.rs index f21c3ddc101f7..4ca94dd576fb7 100644 --- a/frame/grandpa/src/default_weights.rs +++ b/frame/grandpa/src/default_weights.rs @@ -34,14 +34,14 @@ impl crate::WeightInfo for () { const MAX_NOMINATORS: u64 = 200; // checking membership proof - (35 * WEIGHT_PER_MICROS) - .saturating_add((175 * WEIGHT_PER_NANOS).scalar_saturating_mul(validator_count)) + (35u64 * WEIGHT_PER_MICROS) + .saturating_add((175u64 * WEIGHT_PER_NANOS).saturating_mul(validator_count)) .saturating_add(DbWeight::get().reads(5)) // check equivocation proof - .saturating_add(95 * WEIGHT_PER_MICROS) + .saturating_add(95u64 * WEIGHT_PER_MICROS) // report offence - .saturating_add(110 * WEIGHT_PER_MICROS) - .saturating_add(25 * WEIGHT_PER_MICROS * MAX_NOMINATORS) + .saturating_add(110u64 * WEIGHT_PER_MICROS) + .saturating_add(25u64 * WEIGHT_PER_MICROS * MAX_NOMINATORS) .saturating_add(DbWeight::get().reads(14 + 3 * MAX_NOMINATORS)) .saturating_add(DbWeight::get().writes(10 + 3 * MAX_NOMINATORS)) // fetching set id -> session index mappings @@ -49,6 +49,6 @@ impl crate::WeightInfo for () { } fn note_stalled() -> Weight { - (3 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) + (3u64 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) } } diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 780abf1bb01df..1aa8d7963223e 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -71,7 +71,7 @@ impl WeightInfo for SubstrateWeight { fn add_registrar(r: u32, ) -> Weight { Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -81,9 +81,9 @@ impl WeightInfo for SubstrateWeight { fn set_identity(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -94,7 +94,7 @@ impl WeightInfo for SubstrateWeight { fn set_subs_new(s: u32, ) -> Weight { Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -107,7 +107,7 @@ impl WeightInfo for SubstrateWeight { fn set_subs_old(p: u32, ) -> Weight { Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) @@ -121,11 +121,11 @@ impl WeightInfo for SubstrateWeight { fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -137,9 +137,9 @@ impl WeightInfo for SubstrateWeight { fn request_judgement(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -149,9 +149,9 @@ impl WeightInfo for SubstrateWeight { fn cancel_request(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -160,7 +160,7 @@ impl WeightInfo for SubstrateWeight { fn set_fee(r: u32, ) -> Weight { Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -169,7 +169,7 @@ impl WeightInfo for SubstrateWeight { fn set_account_id(r: u32, ) -> Weight { Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -178,7 +178,7 @@ impl WeightInfo for SubstrateWeight { fn set_fields(r: u32, ) -> Weight { Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -189,9 +189,9 @@ impl WeightInfo for SubstrateWeight { fn provide_judgement(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -205,11 +205,11 @@ impl WeightInfo for SubstrateWeight { fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -221,7 +221,7 @@ impl WeightInfo for SubstrateWeight { fn add_sub(s: u32, ) -> Weight { Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -231,7 +231,7 @@ impl WeightInfo for SubstrateWeight { fn rename_sub(s: u32, ) -> Weight { Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -242,7 +242,7 @@ impl WeightInfo for SubstrateWeight { fn remove_sub(s: u32, ) -> Weight { Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -252,7 +252,7 @@ impl WeightInfo for SubstrateWeight { fn quit_sub(s: u32, ) -> Weight { Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -265,7 +265,7 @@ impl WeightInfo for () { fn add_registrar(r: u32, ) -> Weight { Weight::from_ref_time(16_649_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -275,9 +275,9 @@ impl WeightInfo for () { fn set_identity(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(31_322_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -288,7 +288,7 @@ impl WeightInfo for () { fn set_subs_new(s: u32, ) -> Weight { Weight::from_ref_time(30_012_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -301,7 +301,7 @@ impl WeightInfo for () { fn set_subs_old(p: u32, ) -> Weight { Weight::from_ref_time(29_623_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) @@ -315,11 +315,11 @@ impl WeightInfo for () { fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { Weight::from_ref_time(34_370_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -331,9 +331,9 @@ impl WeightInfo for () { fn request_judgement(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(34_759_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -343,9 +343,9 @@ impl WeightInfo for () { fn cancel_request(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(32_254_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -354,7 +354,7 @@ impl WeightInfo for () { fn set_fee(r: u32, ) -> Weight { Weight::from_ref_time(7_858_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -363,7 +363,7 @@ impl WeightInfo for () { fn set_account_id(r: u32, ) -> Weight { Weight::from_ref_time(8_011_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -372,7 +372,7 @@ impl WeightInfo for () { fn set_fields(r: u32, ) -> Weight { Weight::from_ref_time(7_970_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -383,9 +383,9 @@ impl WeightInfo for () { fn provide_judgement(r: u32, x: u32, ) -> Weight { Weight::from_ref_time(24_730_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -399,11 +399,11 @@ impl WeightInfo for () { fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { Weight::from_ref_time(44_988_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -415,7 +415,7 @@ impl WeightInfo for () { fn add_sub(s: u32, ) -> Weight { Weight::from_ref_time(36_768_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -425,7 +425,7 @@ impl WeightInfo for () { fn rename_sub(s: u32, ) -> Weight { Weight::from_ref_time(13_474_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -436,7 +436,7 @@ impl WeightInfo for () { fn remove_sub(s: u32, ) -> Weight { Weight::from_ref_time(37_720_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -446,7 +446,7 @@ impl WeightInfo for () { fn quit_sub(s: u32, ) -> Weight { Weight::from_ref_time(26_848_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 09fbc55854288..acad1e1165d4c 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -58,9 +58,9 @@ impl WeightInfo for SubstrateWeight { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -76,9 +76,9 @@ impl WeightInfo for () { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { Weight::from_ref_time(79_225_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index f646ca02a0377..10eda18e0e24f 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -71,7 +71,7 @@ impl WeightInfo for SubstrateWeight { fn set_calls(n: u32, ) -> Weight { Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) @@ -129,7 +129,7 @@ impl WeightInfo for () { fn set_calls(n: u32, ) -> Weight { Weight::from_ref_time(12_556_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Lottery Lottery (r:1 w:1) diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index 54b67e5b68a8a..636f8eba64694 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -63,7 +63,7 @@ impl WeightInfo for SubstrateWeight { fn add_member(m: u32, ) -> Weight { Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -75,7 +75,7 @@ impl WeightInfo for SubstrateWeight { fn remove_member(m: u32, ) -> Weight { Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -87,7 +87,7 @@ impl WeightInfo for SubstrateWeight { fn swap_member(m: u32, ) -> Weight { Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -99,7 +99,7 @@ impl WeightInfo for SubstrateWeight { fn reset_member(m: u32, ) -> Weight { Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -111,7 +111,7 @@ impl WeightInfo for SubstrateWeight { fn change_key(m: u32, ) -> Weight { Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -121,7 +121,7 @@ impl WeightInfo for SubstrateWeight { fn set_prime(m: u32, ) -> Weight { Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -130,7 +130,7 @@ impl WeightInfo for SubstrateWeight { fn clear_prime(m: u32, ) -> Weight { Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } } @@ -144,7 +144,7 @@ impl WeightInfo for () { fn add_member(m: u32, ) -> Weight { Weight::from_ref_time(15_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -156,7 +156,7 @@ impl WeightInfo for () { fn remove_member(m: u32, ) -> Weight { Weight::from_ref_time(18_005_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -168,7 +168,7 @@ impl WeightInfo for () { fn swap_member(m: u32, ) -> Weight { Weight::from_ref_time(18_029_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -180,7 +180,7 @@ impl WeightInfo for () { fn reset_member(m: u32, ) -> Weight { Weight::from_ref_time(18_105_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -192,7 +192,7 @@ impl WeightInfo for () { fn change_key(m: u32, ) -> Weight { Weight::from_ref_time(18_852_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -202,7 +202,7 @@ impl WeightInfo for () { fn set_prime(m: u32, ) -> Weight { Weight::from_ref_time(4_869_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -211,7 +211,7 @@ impl WeightInfo for () { fn clear_prime(m: u32, ) -> Weight { Weight::from_ref_time(1_593_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } } diff --git a/frame/merkle-mountain-range/src/default_weights.rs b/frame/merkle-mountain-range/src/default_weights.rs index e7a4b6ab31c4a..e513e2197f1c6 100644 --- a/frame/merkle-mountain-range/src/default_weights.rs +++ b/frame/merkle-mountain-range/src/default_weights.rs @@ -28,7 +28,7 @@ impl crate::WeightInfo for () { // Reading the parent hash. let leaf_weight = DbWeight::get().reads(1); // Blake2 hash cost. - let hash_weight = 2 * WEIGHT_PER_NANOS; + let hash_weight = 2u64 * WEIGHT_PER_NANOS; // No-op hook. let hook_weight = Weight::zero(); diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 0b580ec82b640..455326b71de8c 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -67,9 +67,9 @@ impl WeightInfo for SubstrateWeight { fn as_multi_create(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -79,9 +79,9 @@ impl WeightInfo for SubstrateWeight { fn as_multi_create_store(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -89,9 +89,9 @@ impl WeightInfo for SubstrateWeight { fn as_multi_approve(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -100,9 +100,9 @@ impl WeightInfo for SubstrateWeight { fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -112,9 +112,9 @@ impl WeightInfo for SubstrateWeight { fn as_multi_complete(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -123,7 +123,7 @@ impl WeightInfo for SubstrateWeight { fn approve_as_multi_create(s: u32, ) -> Weight { Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -132,7 +132,7 @@ impl WeightInfo for SubstrateWeight { fn approve_as_multi_approve(s: u32, ) -> Weight { Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -142,7 +142,7 @@ impl WeightInfo for SubstrateWeight { fn approve_as_multi_complete(s: u32, ) -> Weight { Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -151,7 +151,7 @@ impl WeightInfo for SubstrateWeight { fn cancel_as_multi(s: u32, ) -> Weight { Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -167,9 +167,9 @@ impl WeightInfo for () { fn as_multi_create(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(36_535_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -179,9 +179,9 @@ impl WeightInfo for () { fn as_multi_create_store(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(39_918_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -189,9 +189,9 @@ impl WeightInfo for () { fn as_multi_approve(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(25_524_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -200,9 +200,9 @@ impl WeightInfo for () { fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(39_923_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -212,9 +212,9 @@ impl WeightInfo for () { fn as_multi_complete(s: u32, z: u32, ) -> Weight { Weight::from_ref_time(45_877_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -223,7 +223,7 @@ impl WeightInfo for () { fn approve_as_multi_create(s: u32, ) -> Weight { Weight::from_ref_time(34_309_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -232,7 +232,7 @@ impl WeightInfo for () { fn approve_as_multi_approve(s: u32, ) -> Weight { Weight::from_ref_time(22_848_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -242,7 +242,7 @@ impl WeightInfo for () { fn approve_as_multi_complete(s: u32, ) -> Weight { Weight::from_ref_time(63_239_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -251,7 +251,7 @@ impl WeightInfo for () { fn cancel_as_multi(s: u32, ) -> Weight { Weight::from_ref_time(51_254_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index 1f0d2ce8cddc4..f24aeb6a0ee0d 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -148,7 +148,7 @@ impl WeightInfo for SubstrateWeight { fn pool_withdraw_unbonded(s: u32, ) -> Weight { Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -164,7 +164,7 @@ impl WeightInfo for SubstrateWeight { fn withdraw_unbonded_update(s: u32, ) -> Weight { Weight::from_ref_time(81_611_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) } @@ -236,7 +236,7 @@ impl WeightInfo for SubstrateWeight { fn nominate(n: u32, ) -> Weight { Weight::from_ref_time(48_829_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) @@ -255,7 +255,7 @@ impl WeightInfo for SubstrateWeight { fn set_metadata(n: u32, ) -> Weight { Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -373,7 +373,7 @@ impl WeightInfo for () { fn pool_withdraw_unbonded(s: u32, ) -> Weight { Weight::from_ref_time(41_928_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -389,7 +389,7 @@ impl WeightInfo for () { fn withdraw_unbonded_update(s: u32, ) -> Weight { Weight::from_ref_time(81_611_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) } @@ -461,7 +461,7 @@ impl WeightInfo for () { fn nominate(n: u32, ) -> Weight { Weight::from_ref_time(48_829_000 as RefTimeWeight) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) @@ -480,7 +480,7 @@ impl WeightInfo for () { fn set_metadata(n: u32, ) -> Weight { Weight::from_ref_time(14_519_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 312bc4e18f413..6d66f5251c78c 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -41,7 +41,7 @@ type Balance = u64; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max( - 2 * WEIGHT_PER_SECOND + 2u64 * WEIGHT_PER_SECOND ); } diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index d9ecf44ad8734..a578a6082cf3e 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -86,7 +86,7 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); } impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index 183b704ec705d..0d93611a1f097 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -66,7 +66,7 @@ impl WeightInfo for SubstrateWeight { fn note_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -75,7 +75,7 @@ impl WeightInfo for SubstrateWeight { fn note_requested_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -84,7 +84,7 @@ impl WeightInfo for SubstrateWeight { fn note_no_deposit_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -155,7 +155,7 @@ impl WeightInfo for () { fn note_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -164,7 +164,7 @@ impl WeightInfo for () { fn note_requested_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -173,7 +173,7 @@ impl WeightInfo for () { fn note_no_deposit_preimage(s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 119df271e0d55..5b332d98db09b 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -63,7 +63,7 @@ impl WeightInfo for SubstrateWeight { fn proxy(p: u32, ) -> Weight { Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) @@ -72,9 +72,9 @@ impl WeightInfo for SubstrateWeight { fn proxy_announced(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -83,9 +83,9 @@ impl WeightInfo for SubstrateWeight { fn remove_announcement(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { fn reject_announcement(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -106,9 +106,9 @@ impl WeightInfo for SubstrateWeight { fn announce(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -116,7 +116,7 @@ impl WeightInfo for SubstrateWeight { fn add_proxy(p: u32, ) -> Weight { Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -124,7 +124,7 @@ impl WeightInfo for SubstrateWeight { fn remove_proxy(p: u32, ) -> Weight { Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -132,7 +132,7 @@ impl WeightInfo for SubstrateWeight { fn remove_proxies(p: u32, ) -> Weight { Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -141,7 +141,7 @@ impl WeightInfo for SubstrateWeight { fn anonymous(p: u32, ) -> Weight { Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -149,7 +149,7 @@ impl WeightInfo for SubstrateWeight { fn kill_anonymous(p: u32, ) -> Weight { Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -161,7 +161,7 @@ impl WeightInfo for () { fn proxy(p: u32, ) -> Weight { Weight::from_ref_time(17_768_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } // Storage: Proxy Proxies (r:1 w:0) @@ -170,9 +170,9 @@ impl WeightInfo for () { fn proxy_announced(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(35_682_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -181,9 +181,9 @@ impl WeightInfo for () { fn remove_announcement(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(25_586_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -192,9 +192,9 @@ impl WeightInfo for () { fn reject_announcement(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(25_794_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -204,9 +204,9 @@ impl WeightInfo for () { fn announce(a: u32, p: u32, ) -> Weight { Weight::from_ref_time(33_002_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -214,7 +214,7 @@ impl WeightInfo for () { fn add_proxy(p: u32, ) -> Weight { Weight::from_ref_time(28_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -222,7 +222,7 @@ impl WeightInfo for () { fn remove_proxy(p: u32, ) -> Weight { Weight::from_ref_time(28_128_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -230,7 +230,7 @@ impl WeightInfo for () { fn remove_proxies(p: u32, ) -> Weight { Weight::from_ref_time(24_066_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -239,7 +239,7 @@ impl WeightInfo for () { fn anonymous(p: u32, ) -> Weight { Weight::from_ref_time(31_077_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -247,7 +247,7 @@ impl WeightInfo for () { fn kill_anonymous(p: u32, ) -> Weight { Weight::from_ref_time(24_657_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index a0309daea2263..052a9bfdcc9e1 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -72,7 +72,7 @@ impl WeightInfo for SubstrateWeight { fn remove_member(r: u32, ) -> Weight { Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) @@ -85,7 +85,7 @@ impl WeightInfo for SubstrateWeight { fn promote_member(r: u32, ) -> Weight { Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -96,7 +96,7 @@ impl WeightInfo for SubstrateWeight { fn demote_member(r: u32, ) -> Weight { Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -114,7 +114,7 @@ impl WeightInfo for SubstrateWeight { fn cleanup_poll(n: u32, ) -> Weight { Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } @@ -138,7 +138,7 @@ impl WeightInfo for () { fn remove_member(r: u32, ) -> Weight { Weight::from_ref_time(16_855_000 as RefTimeWeight) // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) @@ -151,7 +151,7 @@ impl WeightInfo for () { fn promote_member(r: u32, ) -> Weight { Weight::from_ref_time(11_936_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -162,7 +162,7 @@ impl WeightInfo for () { fn demote_member(r: u32, ) -> Weight { Weight::from_ref_time(17_582_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -180,7 +180,7 @@ impl WeightInfo for () { fn cleanup_poll(n: u32, ) -> Weight { Weight::from_ref_time(6_188_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) } diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 8b82454d5849d..06a4759938964 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -72,7 +72,7 @@ impl WeightInfo for SubstrateWeight { fn create_recovery(n: u32, ) -> Weight { Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -88,7 +88,7 @@ impl WeightInfo for SubstrateWeight { fn vouch_recovery(n: u32, ) -> Weight { Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -98,7 +98,7 @@ impl WeightInfo for SubstrateWeight { fn claim_recovery(n: u32, ) -> Weight { Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -107,7 +107,7 @@ impl WeightInfo for SubstrateWeight { fn close_recovery(n: u32, ) -> Weight { Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -116,7 +116,7 @@ impl WeightInfo for SubstrateWeight { fn remove_recovery(n: u32, ) -> Weight { Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -144,7 +144,7 @@ impl WeightInfo for () { fn create_recovery(n: u32, ) -> Weight { Weight::from_ref_time(28_217_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -160,7 +160,7 @@ impl WeightInfo for () { fn vouch_recovery(n: u32, ) -> Weight { Weight::from_ref_time(22_038_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -170,7 +170,7 @@ impl WeightInfo for () { fn claim_recovery(n: u32, ) -> Weight { Weight::from_ref_time(28_621_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -179,7 +179,7 @@ impl WeightInfo for () { fn close_recovery(n: u32, ) -> Weight { Weight::from_ref_time(33_287_000 as RefTimeWeight) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -188,7 +188,7 @@ impl WeightInfo for () { fn remove_recovery(n: u32, ) -> Weight { Weight::from_ref_time(31_964_000 as RefTimeWeight) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/referenda/src/branch.rs b/frame/referenda/src/branch.rs index 172b5af999df5..d3744979fc547 100644 --- a/frame/referenda/src/branch.rs +++ b/frame/referenda/src/branch.rs @@ -83,7 +83,7 @@ impl ServiceBranch { /// Return the maximum possible weight of the `nudge` function. pub fn max_weight_of_nudge, I: 'static>() -> frame_support::weights::Weight { - Weight::new() + Weight::zero() .max(T::WeightInfo::nudge_referendum_no_deposit()) .max(T::WeightInfo::nudge_referendum_preparing()) .max(T::WeightInfo::nudge_referendum_queued()) @@ -131,7 +131,7 @@ impl ServiceBranch { /// Return the maximum possible weight of the `place_decision_deposit` function. pub fn max_weight_of_deposit, I: 'static>() -> frame_support::weights::Weight { - Weight::new() + Weight::zero() .max(T::WeightInfo::place_decision_deposit_preparing()) .max(T::WeightInfo::place_decision_deposit_queued()) .max(T::WeightInfo::place_decision_deposit_not_queued()) @@ -172,7 +172,7 @@ impl OneFewerDecidingBranch { /// Return the maximum possible weight of the `one_fewer_deciding` function. pub fn max_weight, I: 'static>() -> frame_support::weights::Weight { - Weight::new() + Weight::zero() .max(T::WeightInfo::one_fewer_deciding_queue_empty()) .max(T::WeightInfo::one_fewer_deciding_passing()) .max(T::WeightInfo::one_fewer_deciding_failing()) diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index a098670ccf100..f33ef47cd67e8 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -54,7 +54,7 @@ impl WeightInfo for SubstrateWeight { fn store(l: u32, ) -> Weight { Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) } } @@ -65,7 +65,7 @@ impl WeightInfo for () { fn store(l: u32, ) -> Weight { Weight::from_ref_time(13_140_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) } } diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index 56f9298dd4b0c..999a810d3b7c1 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -460,7 +460,7 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { root(), Call::Logger(LoggerCall::log { i: 2600, - weight: max_weight / 2 - item_weight + Weight::one() + weight: max_weight / 2 - item_weight + Weight::from_ref_time(1) }) .into(), )); @@ -487,7 +487,8 @@ fn on_initialize_weight_is_correct() { None, 255, root(), - Call::Logger(LoggerCall::log { i: 3, weight: call_weight + Weight::one() }).into(), + Call::Logger(LoggerCall::log { i: 3, weight: call_weight + Weight::from_ref_time(1) }) + .into(), )); // Anon Periodic assert_ok!(Scheduler::do_schedule( diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index f201c89eaf278..9688c9f1a2f9f 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -70,7 +70,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { Weight::from_ref_time(9_994_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -83,7 +83,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_named_resolved(s: u32, ) -> Weight { Weight::from_ref_time(10_318_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -95,7 +95,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_periodic_resolved(s: u32, ) -> Weight { Weight::from_ref_time(11_675_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -107,7 +107,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_resolved(s: u32, ) -> Weight { Weight::from_ref_time(11_934_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -119,7 +119,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_named_aborted(s: u32, ) -> Weight { Weight::from_ref_time(7_279_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -130,7 +130,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_aborted(s: u32, ) -> Weight { Weight::from_ref_time(8_619_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -140,7 +140,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_periodic_named(s: u32, ) -> Weight { Weight::from_ref_time(16_129_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -150,7 +150,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_periodic(s: u32, ) -> Weight { Weight::from_ref_time(15_785_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -161,7 +161,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_named(s: u32, ) -> Weight { Weight::from_ref_time(15_778_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -170,7 +170,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize(s: u32, ) -> Weight { Weight::from_ref_time(15_912_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -178,7 +178,7 @@ impl WeightInfo for SubstrateWeight { fn schedule(s: u32, ) -> Weight { Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -187,7 +187,7 @@ impl WeightInfo for SubstrateWeight { fn cancel(s: u32, ) -> Weight { Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -196,7 +196,7 @@ impl WeightInfo for SubstrateWeight { fn schedule_named(s: u32, ) -> Weight { Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -205,7 +205,7 @@ impl WeightInfo for SubstrateWeight { fn cancel_named(s: u32, ) -> Weight { Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -220,7 +220,7 @@ impl WeightInfo for () { fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { Weight::from_ref_time(9_994_000 as RefTimeWeight) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -233,7 +233,7 @@ impl WeightInfo for () { fn on_initialize_named_resolved(s: u32, ) -> Weight { Weight::from_ref_time(10_318_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -245,7 +245,7 @@ impl WeightInfo for () { fn on_initialize_periodic_resolved(s: u32, ) -> Weight { Weight::from_ref_time(11_675_000 as RefTimeWeight) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -257,7 +257,7 @@ impl WeightInfo for () { fn on_initialize_resolved(s: u32, ) -> Weight { Weight::from_ref_time(11_934_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -269,7 +269,7 @@ impl WeightInfo for () { fn on_initialize_named_aborted(s: u32, ) -> Weight { Weight::from_ref_time(7_279_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -280,7 +280,7 @@ impl WeightInfo for () { fn on_initialize_aborted(s: u32, ) -> Weight { Weight::from_ref_time(8_619_000 as RefTimeWeight) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -290,7 +290,7 @@ impl WeightInfo for () { fn on_initialize_periodic_named(s: u32, ) -> Weight { Weight::from_ref_time(16_129_000 as RefTimeWeight) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -300,7 +300,7 @@ impl WeightInfo for () { fn on_initialize_periodic(s: u32, ) -> Weight { Weight::from_ref_time(15_785_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) @@ -311,7 +311,7 @@ impl WeightInfo for () { fn on_initialize_named(s: u32, ) -> Weight { Weight::from_ref_time(15_778_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -320,7 +320,7 @@ impl WeightInfo for () { fn on_initialize(s: u32, ) -> Weight { Weight::from_ref_time(15_912_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -328,7 +328,7 @@ impl WeightInfo for () { fn schedule(s: u32, ) -> Weight { Weight::from_ref_time(18_013_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -337,7 +337,7 @@ impl WeightInfo for () { fn cancel(s: u32, ) -> Weight { Weight::from_ref_time(18_131_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -346,7 +346,7 @@ impl WeightInfo for () { fn schedule_named(s: u32, ) -> Weight { Weight::from_ref_time(21_230_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -355,7 +355,7 @@ impl WeightInfo for () { fn cancel_named(s: u32, ) -> Weight { Weight::from_ref_time(20_139_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 626562f7a45f8..9717486644761 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -614,7 +614,7 @@ pub mod pallet { fn on_initialize(n: T::BlockNumber) -> Weight { let mut members = vec![]; - let mut weight = Weight::new(); + let mut weight = Weight::zero(); let weights = T::BlockWeights::get(); // Run a candidate/membership rotation diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index cda606008a9cc..107d494a9711b 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -4250,7 +4250,7 @@ mod election_data_provider { fn targets_2sec_block() { let mut validators = 1000; while ::WeightInfo::get_npos_targets(validators) < - 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + 2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND { validators += 1; } @@ -4268,7 +4268,7 @@ mod election_data_provider { let mut nominators = 1000; while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) < - 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + 2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND { nominators += 1; } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index c5f584054d1f4..82ab44f428372 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -121,7 +121,7 @@ impl WeightInfo for SubstrateWeight { fn withdraw_unbonded_update(s: u32, ) -> Weight { Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -164,7 +164,7 @@ impl WeightInfo for SubstrateWeight { fn kick(k: u32, ) -> Weight { Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) @@ -183,7 +183,7 @@ impl WeightInfo for SubstrateWeight { fn nominate(n: u32, ) -> Weight { Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) @@ -238,7 +238,7 @@ impl WeightInfo for SubstrateWeight { fn set_invulnerables(v: u32, ) -> Weight { Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) @@ -257,7 +257,7 @@ impl WeightInfo for SubstrateWeight { fn force_unstake(s: u32, ) -> Weight { Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -266,7 +266,7 @@ impl WeightInfo for SubstrateWeight { fn cancel_deferred_slash(s: u32, ) -> Weight { Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -283,7 +283,7 @@ impl WeightInfo for SubstrateWeight { fn payout_stakers_dead_controller(n: u32, ) -> Weight { Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -303,7 +303,7 @@ impl WeightInfo for SubstrateWeight { fn payout_stakers_alive_staked(n: u32, ) -> Weight { Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) @@ -318,7 +318,7 @@ impl WeightInfo for SubstrateWeight { fn rebond(l: u32, ) -> Weight { Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(9 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) } @@ -334,7 +334,7 @@ impl WeightInfo for SubstrateWeight { fn set_history_depth(e: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) @@ -355,7 +355,7 @@ impl WeightInfo for SubstrateWeight { fn reap_stash(s: u32, ) -> Weight { Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -381,9 +381,9 @@ impl WeightInfo for SubstrateWeight { fn new_era(v: u32, n: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(208 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) @@ -401,11 +401,11 @@ impl WeightInfo for SubstrateWeight { fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(202 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) @@ -415,7 +415,7 @@ impl WeightInfo for SubstrateWeight { fn get_npos_targets(v: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } @@ -507,7 +507,7 @@ impl WeightInfo for () { fn withdraw_unbonded_update(s: u32, ) -> Weight { Weight::from_ref_time(35_763_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -550,7 +550,7 @@ impl WeightInfo for () { fn kick(k: u32, ) -> Weight { Weight::from_ref_time(23_264_000 as RefTimeWeight) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).scalar_saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) @@ -569,7 +569,7 @@ impl WeightInfo for () { fn nominate(n: u32, ) -> Weight { Weight::from_ref_time(56_596_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) @@ -624,7 +624,7 @@ impl WeightInfo for () { fn set_invulnerables(v: u32, ) -> Weight { Weight::from_ref_time(4_318_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } // Storage: Staking Bonded (r:1 w:1) @@ -643,7 +643,7 @@ impl WeightInfo for () { fn force_unstake(s: u32, ) -> Weight { Weight::from_ref_time(65_265_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -652,7 +652,7 @@ impl WeightInfo for () { fn cancel_deferred_slash(s: u32, ) -> Weight { Weight::from_ref_time(903_312_000 as RefTimeWeight) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -669,7 +669,7 @@ impl WeightInfo for () { fn payout_stakers_dead_controller(n: u32, ) -> Weight { Weight::from_ref_time(87_569_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) @@ -689,7 +689,7 @@ impl WeightInfo for () { fn payout_stakers_alive_staked(n: u32, ) -> Weight { Weight::from_ref_time(98_839_000 as RefTimeWeight) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) @@ -704,7 +704,7 @@ impl WeightInfo for () { fn rebond(l: u32, ) -> Weight { Weight::from_ref_time(74_865_000 as RefTimeWeight) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(9 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) } @@ -720,7 +720,7 @@ impl WeightInfo for () { fn set_history_depth(e: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).scalar_saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) @@ -741,7 +741,7 @@ impl WeightInfo for () { fn reap_stash(s: u32, ) -> Weight { Weight::from_ref_time(70_933_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) @@ -767,9 +767,9 @@ impl WeightInfo for () { fn new_era(v: u32, n: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(208 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) @@ -787,11 +787,11 @@ impl WeightInfo for () { fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(202 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) @@ -801,7 +801,7 @@ impl WeightInfo for () { fn get_npos_targets(v: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 93b5bd3468f6b..e29115382819a 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -842,7 +842,7 @@ pub mod pallet { let items = items as u64; ::DbWeight::get() .reads_writes(1, 1) - .scalar_saturating_mul(items) + .saturating_mul(items) // we assume that the read/write per-byte weight is the same for child and top tree. .saturating_add(T::WeightInfo::process_top_key(size)) } @@ -1246,7 +1246,7 @@ mod mock { pub(crate) fn run_to_block(n: u32) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = Weight::new(); + let mut weight_sum = Weight::zero(); log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1622,7 +1622,7 @@ pub(crate) mod remote_tests { n: ::BlockNumber, ) -> (H256, Weight) { let mut root = Default::default(); - let mut weight_sum = Weight::new(); + let mut weight_sum = Weight::zero(); while System::::block_number() < n { System::::set_block_number(System::::block_number() + One::one()); System::::on_initialize(System::::block_number()); diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index 6fccc0b68f3d7..1967874157f75 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -90,7 +90,7 @@ impl WeightInfo for SubstrateWeight { fn process_top_key(v: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -132,7 +132,7 @@ impl WeightInfo for () { fn process_top_key(v: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 502c6476935a2..0508772cc88ec 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -62,8 +62,10 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = - Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1), + })); assert_ok!(Sudo::sudo(Origin::signed(1), call)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) @@ -97,8 +99,10 @@ fn sudo_unchecked_weight_basics() { assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = - Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1), + })); let sudo_unchecked_weight_call = SudoCall::sudo_unchecked_weight { call, weight: Weight::from_ref_time(1_000) }; let info = sudo_unchecked_weight_call.get_dispatch_info(); @@ -113,8 +117,10 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = - Box::new(Call::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + i: 42, + weight: Weight::from_ref_time(1), + })); assert_ok!(Sudo::sudo_unchecked_weight( Origin::signed(1), call, @@ -167,13 +173,17 @@ fn sudo_as_basics() { assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = - Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + i: 42, + weight: Weight::from_ref_time(1), + })); assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = - Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + i: 42, + weight: Weight::from_ref_time(1), + })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); // The correct user makes the call within `sudo_as`. @@ -188,8 +198,10 @@ fn sudo_as_emits_events_correctly() { System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = - Box::new(Call::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::one() })); + let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + i: 42, + weight: Weight::from_ref_time(1), + })); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) })); }); diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 94974888c1d5c..fc976b03556a4 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1643,7 +1643,7 @@ macro_rules! decl_module { pallet_name, ); - $crate::dispatch::Weight::new() + $crate::dispatch::Weight::zero() } #[cfg(feature = "try-runtime")] diff --git a/frame/support/src/migrations.rs b/frame/support/src/migrations.rs index b559d3aca615f..63679fd56d667 100644 --- a/frame/support/src/migrations.rs +++ b/frame/support/src/migrations.rs @@ -48,7 +48,7 @@ impl PalletVersionToStorageVersionHelpe #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl PalletVersionToStorageVersionHelper for T { fn migrate(db_weight: &RuntimeDbWeight) -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(T::migrate(db_weight)); )* ); diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index e8cd87823f3c3..25ec333a7dbe0 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -35,7 +35,7 @@ pub trait OnInitialize { /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. fn on_initialize(_n: BlockNumber) -> Weight { - Weight::new() + Weight::zero() } } @@ -44,7 +44,7 @@ pub trait OnInitialize { #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnInitialize for Tuple { fn on_initialize(n: BlockNumber) -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); weight } @@ -77,7 +77,7 @@ pub trait OnIdle { /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - /// in a block are applied but before `on_finalize` is executed. fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { - Weight::new() + Weight::zero() } } @@ -88,7 +88,7 @@ impl OnIdle for Tuple { fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; - let mut weight = Weight::new(); + let mut weight = Weight::zero(); let len = on_idle_functions.len(); let start_index = n % (len as u32).into(); let start_index = start_index.try_into().ok().expect( @@ -130,7 +130,7 @@ pub trait OnRuntimeUpgrade { /// /// Return the non-negotiable weight consumed for runtime upgrade. fn on_runtime_upgrade() -> Weight { - Weight::new() + Weight::zero() } /// Execute some pre-checks prior to a runtime upgrade. @@ -155,7 +155,7 @@ pub trait OnRuntimeUpgrade { #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { fn on_runtime_upgrade() -> Weight { - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } @@ -199,14 +199,14 @@ pub trait Hooks { /// Return the weight used, the hook will subtract it from current weight used /// and pass the result to the next `on_idle` hook if it exists. fn on_idle(_n: BlockNumber, _remaining_weight: Weight) -> Weight { - Weight::new() + Weight::zero() } /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. fn on_initialize(_n: BlockNumber) -> Weight { - Weight::new() + Weight::zero() } /// Perform a module upgrade. @@ -229,7 +229,7 @@ pub trait Hooks { /// logic as a free-function from your pallet, and pass it to `type Executive` from the /// top-level runtime. fn on_runtime_upgrade() -> Weight { - Weight::new() + Weight::zero() } /// Execute the sanity checks of this pallet, per block. diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 568cb5535ccda..dab7f4a2746cf 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -650,7 +650,7 @@ impl PerDispatchClass { impl PerDispatchClass { /// Returns the total weight consumed by all extrinsics in the block. pub fn total(&self) -> Weight { - let mut sum = Weight::new(); + let mut sum = Weight::zero(); for class in DispatchClass::all() { sum = sum.saturating_add(*self.get(*class)); } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 93a80d12db96b..51f707b2b0df5 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 5_489_273 /// 95th: 5_433_314 /// 75th: 5_354_812 - pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(5_346_284); + pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(5_346_284); } #[cfg(test)] @@ -68,8 +68,8 @@ mod test_weights { let w = super::BlockExecutionWeight::get(); // At least 100 µs. - assert!(w >= 100 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); + assert!(w >= 100u32 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); // At most 50 ms. - assert!(w <= 50 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); + assert!(w <= 50u32 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); } } diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index d223eb7c10efc..2bee1059b0dad 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -53,7 +53,7 @@ parameter_types! { /// 99th: 86_924 /// 95th: 86_828 /// 75th: 86_347 - pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(86_298); + pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(86_298); } #[cfg(test)] @@ -68,7 +68,7 @@ mod test_weights { let w = super::ExtrinsicBaseWeight::get(); // At least 10 µs. - assert!(w >= 10 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); + assert!(w >= 10u32 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); // At most 1 ms. assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); } diff --git a/frame/support/src/weights/weight_v2.rs b/frame/support/src/weights/weight_v2.rs index d02aac3268838..bcda6358c965f 100644 --- a/frame/support/src/weights/weight_v2.rs +++ b/frame/support/src/weights/weight_v2.rs @@ -18,8 +18,8 @@ use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; use sp_runtime::{ - traits::{Bounded, CheckedAdd, CheckedSub, One, Zero}, - Perquintill, RuntimeDebug, + traits::{Bounded, CheckedAdd, CheckedSub, Zero}, + RuntimeDebug, }; use super::*; @@ -50,11 +50,6 @@ pub struct Weight { } impl Weight { - /// Create a new Weight with zero. - pub const fn new() -> Self { - Self { ref_time: 0 } - } - /// Set the reference time part of the weight. pub const fn set_ref_time(mut self, c: RefTimeWeight) -> Self { self.ref_time = c; @@ -93,27 +88,84 @@ impl Weight { } } - /// Construct with reference time weight. + /// Construct [`Weight`] with reference time weight. pub const fn from_ref_time(ref_time: RefTimeWeight) -> Self { Self { ref_time } } - pub fn checked_mul(self, rhs: u64) -> Option { - let ref_time = self.ref_time.checked_mul(rhs)?; - Some(Self { ref_time }) + /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of + /// all fields instead of overflowing. + pub const fn saturating_add(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + } + + /// Saturating [`Weight`] subtraction. Computes `self - rhs`, saturating at the numeric bounds + /// of all fields instead of overflowing. + pub const fn saturating_sub(self, rhs: Self) -> Self { + Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + } + + /// Saturating [`Weight`] scalar multiplication. Computes `self.field * scalar` for all fields, + /// saturating at the numeric bounds of all fields instead of overflowing. + pub const fn saturating_mul(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time.saturating_mul(scalar) } } - pub fn checked_div(self, rhs: u64) -> Option { - let ref_time = self.ref_time.checked_div(rhs)?; - Some(Self { ref_time }) + /// Saturating [`Weight`] scalar division. Computes `self.field / scalar` for all fields, + /// saturating at the numeric bounds of all fields instead of overflowing. + pub const fn saturating_div(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time.saturating_div(scalar) } } - pub const fn scalar_saturating_mul(self, rhs: u64) -> Self { - Self { ref_time: self.ref_time.saturating_mul(rhs) } + /// Saturating [`Weight`] scalar exponentiation. Computes `self.field.pow(exp)` for all fields, + /// saturating at the numeric bounds of all fields instead of overflowing. + pub const fn saturating_pow(self, exp: u32) -> Self { + Self { ref_time: self.ref_time.saturating_pow(exp) } } - pub const fn scalar_div(self, rhs: u64) -> Self { - Self { ref_time: self.ref_time / rhs } + /// Increment [`Weight`] by `amount` via saturating addition. + pub fn saturating_accrue(&mut self, amount: Self) { + *self = self.saturating_add(amount); + } + + /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. + pub const fn checked_add(&self, rhs: &Self) -> Option { + match self.ref_time.checked_add(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + /// Checked [`Weight`] subtraction. Computes `self - rhs`, returning `None` if overflow + /// occurred. + pub const fn checked_sub(&self, rhs: &Self) -> Option { + match self.ref_time.checked_sub(rhs.ref_time) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + /// Checked [`Weight`] scalar multiplication. Computes `self.field * scalar` for each field, + /// returning `None` if overflow occurred. + pub const fn checked_mul(self, scalar: u64) -> Option { + match self.ref_time.checked_mul(scalar) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + /// Checked [`Weight`] scalar division. Computes `self.field / scalar` for each field, returning + /// `None` if overflow occurred. + pub const fn checked_div(self, scalar: u64) -> Option { + match self.ref_time.checked_div(scalar) { + Some(ref_time) => Some(Self { ref_time }), + None => None, + } + } + + /// Return a [`Weight`] where all fields are zero. + pub const fn zero() -> Self { + Self { ref_time: 0 } } } @@ -127,12 +179,6 @@ impl Zero for Weight { } } -impl One for Weight { - fn one() -> Self { - Self::one() - } -} - impl Add for Weight { type Output = Self; fn add(self, rhs: Self) -> Self { @@ -147,13 +193,6 @@ impl Sub for Weight { } } -impl Mul for Weight { - type Output = Self; - fn mul(self, b: Self) -> Self { - Self { ref_time: b.ref_time * self.ref_time } - } -} - impl Mul for Weight where T: Mul + Copy, @@ -164,26 +203,39 @@ where } } -impl Mul for Perbill { - type Output = Weight; - fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } +macro_rules! weight_mul_per_impl { + ($($t:ty),* $(,)?) => { + $( + impl Mul for $t { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: self * b.ref_time } + } + } + )* } } +weight_mul_per_impl!( + sp_runtime::Percent, + sp_runtime::PerU16, + sp_runtime::Permill, + sp_runtime::Perbill, + sp_runtime::Perquintill, +); -impl Mul for Perquintill { - type Output = Weight; - fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } - } -} - -impl Mul for u64 { - type Output = Weight; - fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } +macro_rules! weight_mul_primitive_impl { + ($($t:ty),* $(,)?) => { + $( + impl Mul for $t { + type Output = Weight; + fn mul(self, b: Weight) -> Weight { + Weight { ref_time: u64::from(self) * b.ref_time } + } + } + )* } } +weight_mul_primitive_impl!(u8, u16, u32, u64); impl Div for Weight where @@ -196,24 +248,6 @@ where } } -impl Saturating for Weight { - fn saturating_add(self, rhs: Self) -> Self { - self.saturating_add(rhs) - } - - fn saturating_sub(self, rhs: Self) -> Self { - self.saturating_sub(rhs) - } - - fn saturating_mul(self, rhs: Self) -> Self { - self.saturating_mul(rhs) - } - - fn saturating_pow(self, exp: usize) -> Self { - self.saturating_pow(exp) - } -} - impl CheckedAdd for Weight { fn checked_add(&self, rhs: &Self) -> Option { self.checked_add(rhs) @@ -344,53 +378,12 @@ impl sp_runtime::traits::Printable for Weight { } } -// Re-export common functions so you do not need to import trait. -impl Weight { - pub const fn zero() -> Self { - Self { ref_time: 0 } - } - - pub const fn one() -> Self { - Self { ref_time: 1 } - } - - pub const fn saturating_add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } - } - - pub const fn saturating_sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } - } - - pub const fn saturating_mul(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_mul(rhs.ref_time) } - } - - pub const fn saturating_pow(self, exp: usize) -> Self { - Self { ref_time: self.ref_time.saturating_pow(exp as u32) } - } - - pub const fn checked_add(&self, rhs: &Self) -> Option { - match self.ref_time.checked_add(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } - } - - pub const fn checked_sub(&self, rhs: &Self) -> Option { - match self.ref_time.checked_sub(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } - } -} - // TODO: Eventually remove these impl From> for PostDispatchInfo { fn from(maybe_actual_computation: Option) -> Self { let actual_weight = match maybe_actual_computation { - Some(actual_computation) => Some(Weight::new().set_ref_time(actual_computation)), + Some(actual_computation) => Some(Weight::zero().set_ref_time(actual_computation)), None => None, }; Self { actual_weight, pays_fee: Default::default() } @@ -401,7 +394,7 @@ impl From<(Option, Pays)> for PostDispatchInfo { fn from(post_weight_info: (Option, Pays)) -> Self { let (maybe_actual_time, pays_fee) = post_weight_info; let actual_weight = match maybe_actual_time { - Some(actual_time) => Some(Weight::new().set_ref_time(actual_time)), + Some(actual_time) => Some(Weight::zero().set_ref_time(actual_time)), None => None, }; Self { actual_weight, pays_fee } @@ -410,7 +403,7 @@ impl From<(Option, Pays)> for PostDispatchInfo { impl WeighData for RefTimeWeight { fn weigh_data(&self, _: T) -> Weight { - return Weight::new().set_ref_time(*self) + return Weight::zero().set_ref_time(*self) } } diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index b7232c430696d..d6434b6c63d8a 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -322,7 +322,7 @@ mod tests { new_test_ext().execute_with(|| { let max = DispatchInfo { weight: block_weights().get(DispatchClass::Normal).max_extrinsic.unwrap() + - Weight::one(), + Weight::from_ref_time(1), class: DispatchClass::Normal, ..Default::default() }; @@ -348,7 +348,7 @@ mod tests { let okay = DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { - weight: weight + Weight::one(), + weight: weight + Weight::from_ref_time(1), class: DispatchClass::Operational, ..Default::default() }; @@ -532,7 +532,7 @@ mod tests { let medium = DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; let big = DispatchInfo { - weight: normal_limit - base_extrinsic + Weight::one(), + weight: normal_limit - base_extrinsic + Weight::from_ref_time(1), ..Default::default() }; let len = 0_usize; @@ -551,7 +551,7 @@ mod tests { reset_check_weight(&small, false, Weight::zero()); reset_check_weight(&medium, false, Weight::zero()); - reset_check_weight(&big, true, Weight::one()); + reset_check_weight(&big, true, Weight::from_ref_time(1)); }) } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index d9be460b3a4fb..7626c7e8665e6 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -204,7 +204,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults(1u32 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -295,9 +295,9 @@ impl BlockWeights { /// is not suitable for production deployments. pub fn simple_max(block_weight: Weight) -> Self { Self::builder() - .base_block(Weight::new()) + .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = Weight::new(); + weights.base_extrinsic = Weight::zero(); }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = block_weight.into(); diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 4f7f168eb55ab..8b6c291096d5a 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -65,7 +65,7 @@ impl WeightInfo for SubstrateWeight { fn remark_with_event(b: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) @@ -79,7 +79,7 @@ impl WeightInfo for SubstrateWeight { fn set_storage(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) @@ -87,7 +87,7 @@ impl WeightInfo for SubstrateWeight { fn kill_storage(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) @@ -95,7 +95,7 @@ impl WeightInfo for SubstrateWeight { fn kill_prefix(p: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } @@ -110,7 +110,7 @@ impl WeightInfo for () { fn remark_with_event(b: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) @@ -124,7 +124,7 @@ impl WeightInfo for () { fn set_storage(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) @@ -132,7 +132,7 @@ impl WeightInfo for () { fn kill_storage(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) } // Storage: Skipped Metadata (r:0 w:0) @@ -140,7 +140,7 @@ impl WeightInfo for () { fn kill_prefix(p: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) } } diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 6fd4ccc7478f6..106e5e163f02d 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -60,7 +60,7 @@ impl WeightInfo for SubstrateWeight { fn report_awesome(r: u32, ) -> Weight { Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -77,9 +77,9 @@ impl WeightInfo for SubstrateWeight { fn tip_new(r: u32, t: u32, ) -> Weight { Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -88,7 +88,7 @@ impl WeightInfo for SubstrateWeight { fn tip(t: u32, ) -> Weight { Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -99,7 +99,7 @@ impl WeightInfo for SubstrateWeight { fn close_tip(t: u32, ) -> Weight { Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -108,7 +108,7 @@ impl WeightInfo for SubstrateWeight { fn slash_tip(t: u32, ) -> Weight { Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -121,7 +121,7 @@ impl WeightInfo for () { fn report_awesome(r: u32, ) -> Weight { Weight::from_ref_time(30_669_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -138,9 +138,9 @@ impl WeightInfo for () { fn tip_new(r: u32, t: u32, ) -> Weight { Weight::from_ref_time(20_385_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).scalar_saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -149,7 +149,7 @@ impl WeightInfo for () { fn tip(t: u32, ) -> Weight { Weight::from_ref_time(12_287_000 as RefTimeWeight) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -160,7 +160,7 @@ impl WeightInfo for () { fn close_tip(t: u32, ) -> Weight { Weight::from_ref_time(45_656_000 as RefTimeWeight) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -169,7 +169,7 @@ impl WeightInfo for () { fn slash_tip(t: u32, ) -> Weight { Weight::from_ref_time(18_525_000 as RefTimeWeight) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).scalar_saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index a296a52b5e840..a17d564d7a334 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -59,7 +59,7 @@ const CALL: &::Call = &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::zero()); } pub struct BlockWeights; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 9777d7d240491..16986203de1e5 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -845,7 +845,7 @@ mod tests { &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::new()); + static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::zero()); } pub struct BlockWeights; diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 54d5b0723aad6..04fa4e3048c22 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -61,7 +61,7 @@ impl WeightInfo for SubstrateWeight { fn store(l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -99,7 +99,7 @@ impl WeightInfo for () { fn store(l: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index eecf225beea9b..cd4d9cf036abe 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -500,7 +500,7 @@ impl, I: 'static> Pallet { /// Spend some money! returns number of approvals before spend. pub fn spend_funds() -> Weight { - let mut total_weight = Weight::new(); + let mut total_weight = Weight::zero(); let mut budget_remaining = Self::pot(); Self::deposit_event(Event::Spending { budget_remaining }); diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 74e6e9779000e..d2a5bc9f1f258 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -81,7 +81,7 @@ impl WeightInfo for SubstrateWeight { fn approve_proposal(p: u32, ) -> Weight { Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) } @@ -98,7 +98,7 @@ impl WeightInfo for SubstrateWeight { fn on_initialize_proposals(p: u32, ) -> Weight { Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) @@ -134,7 +134,7 @@ impl WeightInfo for () { fn approve_proposal(p: u32, ) -> Weight { Weight::from_ref_time(10_786_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) } @@ -151,7 +151,7 @@ impl WeightInfo for () { fn on_initialize_proposals(p: u32, ) -> Weight { Weight::from_ref_time(25_805_000 as RefTimeWeight) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).scalar_saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index 4ed01e463cc86..dae72accd2adc 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -105,11 +105,11 @@ impl WeightInfo for SubstrateWeight { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) @@ -150,7 +150,7 @@ impl WeightInfo for SubstrateWeight { fn redeposit(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) @@ -322,11 +322,11 @@ impl WeightInfo for () { fn destroy(n: u32, m: u32, a: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).scalar_saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).scalar_saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) @@ -367,7 +367,7 @@ impl WeightInfo for () { fn redeposit(i: u32, ) -> Weight { Weight::from_ref_time(0 as RefTimeWeight) // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).scalar_saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 3df12d69e84eb..2209bc65d4958 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -185,7 +185,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -208,7 +208,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. @@ -301,7 +301,7 @@ pub mod pallet { let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) - .fold(Weight::new(), |total: Weight, weight: Weight| total.saturating_add(weight)) + .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() @@ -324,7 +324,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::new(); + let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. @@ -429,7 +429,7 @@ pub mod pallet { ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); // Track the actual weight of each of the batch calls. - let mut weight = Weight::new(); + let mut weight = Weight::zero(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index e3b16b5244fc0..09a2c6a4e4d38 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -420,7 +420,7 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = Weight::from_ref_time(batch_calls.len() as u64); + let batch_len = batch_calls.len() as u64; let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); @@ -533,7 +533,7 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; - let batch_len = Weight::from_ref_time(batch_calls.len() as u64); + let batch_len = batch_calls.len() as u64; let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 0f0d171d8d4ee..1df9c2e67cc1f 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -60,7 +60,7 @@ impl WeightInfo for SubstrateWeight { fn batch(c: u32, ) -> Weight { Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { Weight::from_ref_time(4_182_000 as RefTimeWeight) @@ -69,7 +69,7 @@ impl WeightInfo for SubstrateWeight { fn batch_all(c: u32, ) -> Weight { Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { Weight::from_ref_time(12_049_000 as RefTimeWeight) @@ -78,7 +78,7 @@ impl WeightInfo for SubstrateWeight { fn force_batch(c: u32, ) -> Weight { Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } } @@ -88,7 +88,7 @@ impl WeightInfo for () { fn batch(c: u32, ) -> Weight { Weight::from_ref_time(23_113_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } fn as_derivative() -> Weight { Weight::from_ref_time(4_182_000 as RefTimeWeight) @@ -97,7 +97,7 @@ impl WeightInfo for () { fn batch_all(c: u32, ) -> Weight { Weight::from_ref_time(18_682_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } fn dispatch_as() -> Weight { Weight::from_ref_time(12_049_000 as RefTimeWeight) @@ -106,6 +106,6 @@ impl WeightInfo for () { fn force_batch(c: u32, ) -> Weight { Weight::from_ref_time(19_136_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).scalar_saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) } } diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index bd3af72cdd182..5988b0bd47d54 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -62,9 +62,9 @@ impl WeightInfo for SubstrateWeight { fn vest_locked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -73,9 +73,9 @@ impl WeightInfo for SubstrateWeight { fn vest_unlocked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) } @@ -85,9 +85,9 @@ impl WeightInfo for SubstrateWeight { fn vest_other_locked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -97,9 +97,9 @@ impl WeightInfo for SubstrateWeight { fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -109,9 +109,9 @@ impl WeightInfo for SubstrateWeight { fn vested_transfer(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -121,9 +121,9 @@ impl WeightInfo for SubstrateWeight { fn force_vested_transfer(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) } @@ -133,9 +133,9 @@ impl WeightInfo for SubstrateWeight { fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -145,9 +145,9 @@ impl WeightInfo for SubstrateWeight { fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -160,9 +160,9 @@ impl WeightInfo for () { fn vest_locked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_978_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -171,9 +171,9 @@ impl WeightInfo for () { fn vest_unlocked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_856_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) } @@ -183,9 +183,9 @@ impl WeightInfo for () { fn vest_other_locked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(33_522_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -195,9 +195,9 @@ impl WeightInfo for () { fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(32_558_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -207,9 +207,9 @@ impl WeightInfo for () { fn vested_transfer(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(49_260_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -219,9 +219,9 @@ impl WeightInfo for () { fn force_vested_transfer(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(49_166_000 as RefTimeWeight) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) } @@ -231,9 +231,9 @@ impl WeightInfo for () { fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(34_042_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } @@ -243,9 +243,9 @@ impl WeightInfo for () { fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { Weight::from_ref_time(33_937_000 as RefTimeWeight) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).scalar_saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).scalar_saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index 3e20cd29efb4f..24974bd726d43 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -99,7 +99,7 @@ fn test_whitelist_call_and_execute() { Whitelist::dispatch_whitelisted_call( Origin::root(), call_hash, - call_weight - Weight::one() + call_weight - Weight::from_ref_time(1) ), crate::Error::::InvalidCallWeightWitness, ); diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index bb2ed9700c833..88d1dedb2de47 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -82,7 +82,7 @@ impl WeightInfo for SubstrateWeight { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) } @@ -119,7 +119,7 @@ impl WeightInfo for () { fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { Weight::from_ref_time(25_325_000 as RefTimeWeight) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).scalar_saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) } diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index a1da5da0d0792..9d723c3424f7b 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -30,7 +30,7 @@ The file will contain the concrete weight value and various statistics about the /// 99th: 3_631_863 /// 95th: 3_595_674 /// 75th: 3_526_435 -pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul(3_532_484); +pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(3_532_484); ``` In this example it takes 3.5 ms to execute an empty block. That means that it always takes at least 3.5 ms to execute *any* block. @@ -59,7 +59,7 @@ The relevant section in the output file looks like this: /// 99th: 68_758 /// 95th: 67_843 /// 75th: 67_749 -pub const ExtrinsicBaseWeight: Weight = Weight::from_ref_time(67_745 * WEIGHT_PER_NANOS); +pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(67_745); ``` In this example it takes 67.7 µs to execute a NO-OP extrinsic. That means that it always takes at least 67.7 µs to execute *any* extrinsic. diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index d07533e9dbaa8..bddaa642a5a29 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -52,7 +52,7 @@ parameter_types! { /// 99th: {{underscore stats.p99}} /// 95th: {{underscore stats.p95}} /// 75th: {{underscore stats.p75}} - pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.scalar_saturating_mul({{underscore weight}}); + pub const {{long_name}}Weight: Weight = WEIGHT_PER_NANOS.saturating_mul({{underscore weight}}); } #[cfg(test)] diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index bf18e23367bc9..f6be16cf69ac9 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -36,7 +36,7 @@ impl {{pallet}}::WeightInfo for WeightInfo { Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).scalar_saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) {{/each}} {{#if (ne benchmark.base_reads "0")}} .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) From 41b6b389466991167831558e6742295455f15dfb Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 1 Sep 2022 23:39:30 +0200 Subject: [PATCH 084/348] Add `StorageNoopGuard` (#12163) * Add StorageNoopGuard Signed-off-by: Oliver Tale-Yazdi * Fix import Signed-off-by: Oliver Tale-Yazdi * Fix feature gate Signed-off-by: Oliver Tale-Yazdi * Fix feature gate Signed-off-by: Oliver Tale-Yazdi * Use sp-std Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/support/src/lib.rs | 2 + frame/support/src/storage/mod.rs | 1 + .../support/src/storage/storage_noop_guard.rs | 114 ++++++++++++++++++ 3 files changed, 117 insertions(+) create mode 100644 frame/support/src/storage/storage_noop_guard.rs diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index a81a266fb9a3d..c556322b6cf93 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -93,6 +93,8 @@ pub mod unsigned { }; } +#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +pub use self::storage::storage_noop_guard::StorageNoopGuard; pub use self::{ dispatch::{Callable, Parameter}, hash::{ diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index d9e50a1e1345e..b70436785af12 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -46,6 +46,7 @@ pub mod child; pub mod generator; pub mod hashed; pub mod migration; +pub mod storage_noop_guard; pub mod transactional; pub mod types; pub mod unhashed; diff --git a/frame/support/src/storage/storage_noop_guard.rs b/frame/support/src/storage/storage_noop_guard.rs new file mode 100644 index 0000000000000..afcc699d91313 --- /dev/null +++ b/frame/support/src/storage/storage_noop_guard.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Feature gated since it can panic. +#![cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + +//! Contains the [`crate::StorageNoopGuard`] for conveniently asserting +//! that no storage mutation has been made by a whole code block. + +/// Asserts that no storage changes took place between con- and destruction of [`Self`]. +/// +/// This is easier than wrapping the whole code-block inside a `assert_storage_noop!`. +/// +/// # Example +/// +/// ```should_panic +/// use frame_support::{StorageNoopGuard, storage::unhashed::put}; +/// +/// sp_io::TestExternalities::default().execute_with(|| { +/// let _guard = frame_support::StorageNoopGuard::default(); +/// put(b"key", b"value"); +/// // Panics since there are storage changes. +/// }); +/// ``` +#[must_use] +pub struct StorageNoopGuard(sp_std::vec::Vec); + +impl Default for StorageNoopGuard { + fn default() -> Self { + Self(frame_support::storage_root(frame_support::StateVersion::V1)) + } +} + +impl Drop for StorageNoopGuard { + fn drop(&mut self) { + // No need to double panic, eg. inside a test assertion failure. + if sp_std::thread::panicking() { + return + } + assert_eq!( + frame_support::storage_root(frame_support::StateVersion::V1), + self.0, + "StorageNoopGuard detected wrongful storage changes.", + ); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_io::TestExternalities; + + #[test] + #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + fn storage_noop_guard_panics_on_changed() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::default(); + frame_support::storage::unhashed::put(b"key", b"value"); + }); + } + + #[test] + fn storage_noop_guard_works_on_unchanged() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::default(); + frame_support::storage::unhashed::put(b"key", b"value"); + frame_support::storage::unhashed::kill(b"key"); + }); + } + + #[test] + #[should_panic(expected = "StorageNoopGuard detected wrongful storage changes.")] + fn storage_noop_guard_panics_on_early_drop() { + TestExternalities::default().execute_with(|| { + let guard = StorageNoopGuard::default(); + frame_support::storage::unhashed::put(b"key", b"value"); + sp_std::mem::drop(guard); + frame_support::storage::unhashed::kill(b"key"); + }); + } + + #[test] + fn storage_noop_guard_works_on_changed_forget() { + TestExternalities::default().execute_with(|| { + let guard = StorageNoopGuard::default(); + frame_support::storage::unhashed::put(b"key", b"value"); + sp_std::mem::forget(guard); + }); + } + + #[test] + #[should_panic(expected = "Something else")] + fn storage_noop_guard_does_not_double_panic() { + TestExternalities::default().execute_with(|| { + let _guard = StorageNoopGuard::default(); + frame_support::storage::unhashed::put(b"key", b"value"); + panic!("Something else"); + }); + } +} From e986f05062a5c7a8608967febcd5c1d2bb02eb40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 2 Sep 2022 09:27:00 +0200 Subject: [PATCH 085/348] Do not drop the `task_manager` for benchmarking stuff (#12147) We can not drop the `task_manager` for benchmarking stuff, because otherwise stuff that may needs this feature (like background signature verification) will fail. Besides the base path setup is moved to `SharedParams` directly. Meaning any call to `base_path` will now directly return a tmp path when `--dev` is given. --- bin/node/cli/src/command.rs | 30 +++++++++++++++----------- client/cli/src/commands/insert_key.rs | 2 +- client/cli/src/commands/run_cmd.rs | 2 +- client/cli/src/config.rs | 2 +- client/cli/src/params/shared_params.rs | 9 ++++++-- 5 files changed, 27 insertions(+), 18 deletions(-) diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 85e5415dbe139..1d88e9b539421 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -115,35 +115,39 @@ pub fn run() -> Result<()> { cmd.run::(config) }, BenchmarkCmd::Block(cmd) => { - let PartialComponents { client, .. } = new_partial(&config)?; - cmd.run(client) + // ensure that we keep the task manager alive + let partial = new_partial(&config)?; + cmd.run(partial.client) }, BenchmarkCmd::Storage(cmd) => { - let PartialComponents { client, backend, .. } = new_partial(&config)?; - let db = backend.expose_db(); - let storage = backend.expose_storage(); + // ensure that we keep the task manager alive + let partial = new_partial(&config)?; + let db = partial.backend.expose_db(); + let storage = partial.backend.expose_storage(); - cmd.run(config, client, db, storage) + cmd.run(config, partial.client, db, storage) }, BenchmarkCmd::Overhead(cmd) => { - let PartialComponents { client, .. } = new_partial(&config)?; - let ext_builder = RemarkBuilder::new(client.clone()); + // ensure that we keep the task manager alive + let partial = new_partial(&config)?; + let ext_builder = RemarkBuilder::new(partial.client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + cmd.run(config, partial.client, inherent_benchmark_data()?, &ext_builder) }, BenchmarkCmd::Extrinsic(cmd) => { - let PartialComponents { client, .. } = service::new_partial(&config)?; + // ensure that we keep the task manager alive + let partial = service::new_partial(&config)?; // Register the *Remark* and *TKA* builders. let ext_factory = ExtrinsicFactory(vec![ - Box::new(RemarkBuilder::new(client.clone())), + Box::new(RemarkBuilder::new(partial.client.clone())), Box::new(TransferKeepAliveBuilder::new( - client.clone(), + partial.client.clone(), Sr25519Keyring::Alice.to_account_id(), ExistentialDeposit::get(), )), ]); - cmd.run(client, inherent_benchmark_data()?, &ext_factory) + cmd.run(partial.client, inherent_benchmark_data()?, &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 68201d7b4bffc..5029ecd29248c 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -60,7 +60,7 @@ impl InsertKeyCmd { let suri = utils::read_uri(self.suri.as_ref())?; let base_path = self .shared_params - .base_path() + .base_path()? .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); let chain_id = self.shared_params.chain_id(self.shared_params.is_dev()); let chain_spec = cli.load_spec(&chain_id)?; diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index fd236aa0211dd..11cfb9d0ff651 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -485,7 +485,7 @@ impl CliConfiguration for RunCmd { Ok(if self.tmp { Some(BasePath::new_temp_dir()?) } else { - match self.shared_params().base_path() { + match self.shared_params().base_path()? { Some(r) => Some(r), // If `dev` is enabled, we use the temp base path. None if self.shared_params().is_dev() => Some(BasePath::new_temp_dir()?), diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 92091ea118ff1..bc5941914de89 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -125,7 +125,7 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `SharedParams`. fn base_path(&self) -> Result> { - Ok(self.shared_params().base_path()) + self.shared_params().base_path() } /// Returns `true` if the node is for development or not diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 67b18aa8b09e2..4a8fc4bcfffdd 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -82,8 +82,13 @@ pub struct SharedParams { impl SharedParams { /// Specify custom base path. - pub fn base_path(&self) -> Option { - self.base_path.clone().map(Into::into) + pub fn base_path(&self) -> Result, crate::Error> { + match &self.base_path { + Some(r) => Ok(Some(r.clone().into())), + // If `dev` is enabled, we use the temp base path. + None if self.is_dev() => Ok(Some(BasePath::new_temp_dir()?)), + None => Ok(None), + } } /// Specify the development chain. From 91b27bcabe205aeeee4486f07ad14365cf0d20f2 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Fri, 2 Sep 2022 18:54:30 +0200 Subject: [PATCH 086/348] Alliance pallet: fix func name, fix migration weights (#12174) * Alliance pallet: fix func name, fix migration weights * update comment order --- frame/alliance/src/benchmarking.rs | 2 +- frame/alliance/src/lib.rs | 16 ++++++++-------- frame/alliance/src/migration.rs | 19 +++++++++++++------ frame/alliance/src/tests.rs | 8 ++++---- 4 files changed, 26 insertions(+), 19 deletions(-) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index 60df1e1a84d37..d3917b9d2ed95 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -685,7 +685,7 @@ benchmarks_instance_pallet! { ).unwrap(); } - assert_eq!(Alliance::::votable_members_count(), cc); + assert_eq!(Alliance::::voting_members_count(), cc); assert_eq!(Alliance::::ally_members_count(), m); } diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 99cb46c9ef58d..b60077c4ef99e 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -684,7 +684,7 @@ pub mod pallet { Error::::BadWitness ); ensure!( - Self::votable_members_count() <= witness.voting_members, + Self::voting_members_count() <= witness.voting_members, Error::::BadWitness ); ensure!( @@ -697,7 +697,7 @@ pub mod pallet { T::ProposalProvider::veto_proposal(*hash); } - let mut members = Self::votable_members(); + let mut members = Self::voting_members(); T::MembershipChanged::change_members_sorted(&[], &members, &[]); members.append(&mut Self::members_of(MemberRole::Ally)); @@ -1052,7 +1052,7 @@ impl, I: 'static> Pallet { } /// Count of all members who have voting rights. - fn votable_members_count() -> u32 { + fn voting_members_count() -> u32 { Members::::decode_len(MemberRole::Founder) .unwrap_or(0) .saturating_add(Members::::decode_len(MemberRole::Fellow).unwrap_or(0)) as u32 @@ -1064,7 +1064,7 @@ impl, I: 'static> Pallet { } /// Collect all members who have voting rights into one list. - fn votable_members() -> Vec { + fn voting_members() -> Vec { let mut founders = Self::members_of(MemberRole::Founder); let mut fellows = Self::members_of(MemberRole::Fellow); founders.append(&mut fellows); @@ -1072,8 +1072,8 @@ impl, I: 'static> Pallet { } /// Collect all members who have voting rights into one sorted list. - fn votable_members_sorted() -> Vec { - let mut members = Self::votable_members(); + fn voting_members_sorted() -> Vec { + let mut members = Self::voting_members(); members.sort(); members } @@ -1089,7 +1089,7 @@ impl, I: 'static> Pallet { })?; if role == MemberRole::Founder || role == MemberRole::Fellow { - let members = Self::votable_members_sorted(); + let members = Self::voting_members_sorted(); T::MembershipChanged::change_members_sorted(&[who.clone()], &[], &members[..]); } Ok(()) @@ -1104,7 +1104,7 @@ impl, I: 'static> Pallet { })?; if matches!(role, MemberRole::Founder | MemberRole::Fellow) { - let members = Self::votable_members_sorted(); + let members = Self::voting_members_sorted(); T::MembershipChanged::change_members_sorted(&[], &[who.clone()], &members[..]); } Ok(()) diff --git a/frame/alliance/src/migration.rs b/frame/alliance/src/migration.rs index 7e3df5219f25b..8f98484240061 100644 --- a/frame/alliance/src/migration.rs +++ b/frame/alliance/src/migration.rs @@ -51,22 +51,29 @@ mod v0_to_v1 { use super::*; pub fn migrate, I: 'static>() -> Weight { - if migration::clear_storage_prefix( + log::info!(target: LOG_TARGET, "Running migration v0_to_v1."); + + let res = migration::clear_storage_prefix( >::name().as_bytes(), b"UpForKicking", b"", None, None, - ) - .maybe_cursor - .is_some() - { + ); + + log::info!( + target: LOG_TARGET, + "Cleared '{}' entries from 'UpForKicking' storage prefix", + res.unique + ); + + if res.maybe_cursor.is_some() { log::error!( target: LOG_TARGET, "Storage prefix 'UpForKicking' is not completely cleared." ); } - T::DbWeight::get().writes(1) + T::DbWeight::get().writes(res.unique.into()) } } diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 3bd82a8870efe..9c61561d7ddb5 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -29,7 +29,7 @@ type AllianceMotionEvent = pallet_collective::Event::NoVotingRights @@ -465,7 +465,7 @@ fn nominate_ally_works() { Error::::AlreadyMember ); - // only votable member(founder/fellow) have nominate right + // only voting member(founder/fellow) have nominate right assert_noop!( Alliance::nominate_ally(Origin::signed(5), 4), Error::::NoVotingRights From 82b538e3ae6e035b39a3ac8f069c9fb06757b6ac Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 2 Sep 2022 19:19:49 +0100 Subject: [PATCH 087/348] Remove RefTimeWeight (#12157) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update api * update * remove unused * remove `one` api * fix unused * fmt * add saturating accrue * remove `Weight::new()` * use some macros * div makes no sense * Update weight_v2.rs * missed some * more patch * fixes * more fixes * more fix * more fix * remove RefTimeWeight * Update frame/contracts/src/storage.rs Co-authored-by: Alexander Theißen * not needed * Fixes Co-authored-by: Alexander Theißen Co-authored-by: Keith Yeung --- .maintain/frame-weight-template.hbs | 26 +- frame/alliance/src/weights.rs | 350 ++-- frame/assets/src/weights.rs | 326 ++-- frame/bags-list/src/weights.rs | 38 +- frame/balances/src/weights.rs | 86 +- frame/benchmarking/src/weights.rs | 54 +- frame/bounties/src/weights.rs | 146 +- frame/child-bounties/src/weights.rs | 102 +- frame/collective/src/weights.rs | 210 +-- frame/contracts/src/lib.rs | 4 +- frame/contracts/src/schedule.rs | 114 +- frame/contracts/src/storage.rs | 6 +- frame/contracts/src/weights.rs | 1534 ++++++++--------- frame/conviction-voting/src/weights.rs | 110 +- frame/democracy/src/weights.rs | 398 ++--- .../src/weights.rs | 158 +- .../election-provider-support/src/weights.rs | 34 +- frame/elections-phragmen/src/weights.rs | 182 +- frame/examples/basic/src/weights.rs | 38 +- frame/gilt/src/weights.rs | 122 +- frame/identity/src/weights.rs | 310 ++-- frame/im-online/src/weights.rs | 22 +- frame/indices/src/weights.rs | 62 +- frame/lottery/src/weights.rs | 74 +- frame/membership/src/weights.rs | 110 +- frame/multisig/src/weights.rs | 170 +- frame/node-authorization/src/weights.rs | 2 +- frame/nomination-pools/src/weights.rs | 198 +-- frame/preimage/src/weights.rs | 158 +- frame/proxy/src/weights.rs | 174 +- frame/ranked-collective/src/weights.rs | 98 +- frame/recovery/src/weights.rs | 122 +- frame/referenda/src/weights.rs | 326 ++-- frame/remark/src/weights.rs | 14 +- frame/scheduler/src/weights.rs | 290 ++-- frame/session/src/weights.rs | 26 +- frame/staking/src/weights.rs | 458 ++--- frame/state-trie-migration/src/weights.rs | 70 +- frame/support/src/lib.rs | 2 +- frame/support/src/weights.rs | 6 +- frame/support/src/weights/weight_v2.rs | 48 +- .../test/tests/pallet_compatibility.rs | 13 +- .../tests/pallet_compatibility_instance.rs | 13 +- frame/system/src/weights.rs | 62 +- frame/timestamp/src/weights.rs | 18 +- frame/tips/src/weights.rs | 98 +- frame/transaction-payment/src/lib.rs | 5 +- frame/transaction-storage/src/weights.rs | 42 +- frame/treasury/src/weights.rs | 90 +- frame/uniques/src/weights.rs | 354 ++-- frame/utility/src/weights.rs | 34 +- frame/vesting/src/weights.rs | 162 +- frame/whitelist/src/weights.rs | 54 +- .../benchmarking-cli/src/pallet/template.hbs | 14 +- 54 files changed, 3863 insertions(+), 3874 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 5beeaf5f0521e..b0244fbdab85b 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -31,7 +31,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for {{pallet}}. @@ -64,22 +64,22 @@ impl WeightInfo for SubstrateWeight { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as u64)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as u64)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) {{/each}} } {{/each}} @@ -99,22 +99,22 @@ impl WeightInfo for () { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as u64)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as u64)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) {{/each}} } {{/each}} diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 2da5782f1bb4f..26946c71bf314 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_alliance. @@ -80,26 +80,26 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(37_864_000 as RefTimeWeight) + Weight::from_ref_time(37_864_000 as u64) // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_000 as u64).saturating_mul(x as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(y as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(46_813_000 as RefTimeWeight) + Weight::from_ref_time(46_813_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as u64).saturating_mul(y as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) @@ -107,11 +107,11 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_316_000 as RefTimeWeight) + Weight::from_ref_time(35_316_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -122,15 +122,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_245_000 as RefTimeWeight) + Weight::from_ref_time(36_245_000 as u64) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -142,17 +142,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_088_000 as RefTimeWeight) + Weight::from_ref_time(48_088_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(194_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -164,13 +164,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_374_000 as RefTimeWeight) + Weight::from_ref_time(43_374_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(101_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -183,13 +183,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(42_798_000 as RefTimeWeight) + Weight::from_ref_time(42_798_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: Alliance Members (r:3 w:3) @@ -206,68 +206,68 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_294_000 as u64).saturating_mul(x as u64)) // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(231_000 as u64).saturating_mul(y as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(z as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_371_000 as u64).saturating_mul(p as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_673_000 as u64).saturating_mul(c as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(11_581_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(m as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(p as u64))) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(c as u64))) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(m as u64))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_721_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_721_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_887_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_887_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(23_052_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(23_052_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(54_504_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(54_504_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_601_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(42_601_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_704_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(37_704_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -275,18 +275,18 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_859_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(40_859_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Alliance RetiringMembers (r:1 w:1) // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_447_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(43_447_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -295,35 +295,35 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_718_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(61_718_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(359_000 as RefTimeWeight) + Weight::from_ref_time(359_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as u64).saturating_mul(n as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_932_000 as u64).saturating_mul(n as u64)) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_649_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -339,26 +339,26 @@ impl WeightInfo for () { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(37_864_000 as RefTimeWeight) + Weight::from_ref_time(37_864_000 as u64) // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_000 as u64).saturating_mul(x as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(y as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:2 w:0) // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(46_813_000 as RefTimeWeight) + Weight::from_ref_time(46_813_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(125_000 as u64).saturating_mul(y as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion ProposalOf (r:1 w:1) @@ -366,11 +366,11 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_316_000 as RefTimeWeight) + Weight::from_ref_time(35_316_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -381,15 +381,15 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_245_000 as RefTimeWeight) + Weight::from_ref_time(36_245_000 as u64) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(336_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(109_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(178_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -401,17 +401,17 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_088_000 as RefTimeWeight) + Weight::from_ref_time(48_088_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(194_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(93_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -423,13 +423,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_374_000 as RefTimeWeight) + Weight::from_ref_time(43_374_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(101_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:1 w:0) // Storage: AllianceMotion Voting (r:1 w:1) @@ -442,13 +442,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(42_798_000 as RefTimeWeight) + Weight::from_ref_time(42_798_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: Alliance Members (r:3 w:3) @@ -465,68 +465,68 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 100]`. /// The range of component `m` is `[0, 100]`. fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_294_000 as u64).saturating_mul(x as u64)) // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as RefTimeWeight).saturating_mul(y as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(231_000 as u64).saturating_mul(y as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(z as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_371_000 as u64).saturating_mul(p as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(11_673_000 as u64).saturating_mul(c as u64)) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(11_581_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(m as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(p as u64))) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(c as u64))) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(m as u64))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_721_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_721_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_887_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_887_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(23_052_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(23_052_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(54_504_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(54_504_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_601_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(42_601_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Members (r:3 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_704_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(37_704_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:4 w:2) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -534,18 +534,18 @@ impl WeightInfo for () { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_859_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(40_859_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Alliance RetiringMembers (r:1 w:1) // Storage: Alliance Members (r:1 w:1) // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_447_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(43_447_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Alliance Members (r:3 w:1) // Storage: AllianceMotion Proposals (r:1 w:0) @@ -554,34 +554,34 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_718_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(61_718_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(359_000 as RefTimeWeight) + Weight::from_ref_time(359_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_376_000 as u64).saturating_mul(n as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(112_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(20_932_000 as u64).saturating_mul(n as u64)) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_649_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index d59567e00ef69..cba78f12c218c 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_assets. @@ -74,15 +74,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - Weight::from_ref_time(27_167_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(27_167_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - Weight::from_ref_time(15_473_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(15_473_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -90,168 +90,168 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(c as u64)) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_333_000 as u64).saturating_mul(s as u64)) // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(17_046_000 as u64).saturating_mul(a as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(a as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(c as u64))) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - Weight::from_ref_time(30_819_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_819_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - Weight::from_ref_time(35_212_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(35_212_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(47_401_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(47_401_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(42_300_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(42_300_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(47_946_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(47_946_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(21_670_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_670_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(21_503_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_503_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - Weight::from_ref_time(18_158_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_158_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - Weight::from_ref_time(18_525_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_525_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - Weight::from_ref_time(19_858_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_858_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(18_045_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_045_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_395_000 as RefTimeWeight) + Weight::from_ref_time(32_395_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(n as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(32_893_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_893_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(19_586_000 as RefTimeWeight) + Weight::from_ref_time(19_586_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - Weight::from_ref_time(32_478_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_478_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - Weight::from_ref_time(17_143_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(17_143_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(36_389_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(36_389_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - Weight::from_ref_time(61_854_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(61_854_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(36_759_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(36_759_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - Weight::from_ref_time(37_753_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_753_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -259,15 +259,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - Weight::from_ref_time(27_167_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(27_167_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - Weight::from_ref_time(15_473_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(15_473_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:5002 w:5001) @@ -275,167 +275,167 @@ impl WeightInfo for () { // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(c as u64)) // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_333_000 as u64).saturating_mul(s as u64)) // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(17_046_000 as u64).saturating_mul(a as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(a as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(c as u64))) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - Weight::from_ref_time(30_819_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_819_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - Weight::from_ref_time(35_212_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(35_212_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(47_401_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(47_401_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(42_300_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(42_300_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(47_946_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(47_946_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(21_670_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_670_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(21_503_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_503_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - Weight::from_ref_time(18_158_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_158_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - Weight::from_ref_time(18_525_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_525_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - Weight::from_ref_time(19_858_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_858_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(18_045_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_045_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn set_metadata(n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_395_000 as RefTimeWeight) + Weight::from_ref_time(32_395_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(n as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(32_893_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_893_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(19_586_000 as RefTimeWeight) + Weight::from_ref_time(19_586_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - Weight::from_ref_time(32_478_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_478_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - Weight::from_ref_time(17_143_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(17_143_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(36_389_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(36_389_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Approvals (r:1 w:1) // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - Weight::from_ref_time(61_854_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(61_854_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(36_759_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(36_759_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - Weight::from_ref_time(37_753_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_753_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index 049967e3024c0..46f001972c519 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bags_list. @@ -57,18 +57,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - Weight::from_ref_time(55_040_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(55_040_000 as u64) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - Weight::from_ref_time(53_671_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(53_671_000 as u64) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -76,9 +76,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - Weight::from_ref_time(56_410_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(56_410_000 as u64) + .saturating_add(T::DbWeight::get().reads(10 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } } @@ -89,18 +89,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:4 w:4) // Storage: BagsList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - Weight::from_ref_time(55_040_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(55_040_000 as u64) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - Weight::from_ref_time(53_671_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(53_671_000 as u64) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: BagsList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) @@ -108,8 +108,8 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: BagsList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - Weight::from_ref_time(56_410_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(56_410_000 as u64) + .saturating_add(RocksDbWeight::get().reads(10 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 17b2541e0a998..42d165e61a38e 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_balances. @@ -58,45 +58,45 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(41_860_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(41_860_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(32_760_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_760_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - Weight::from_ref_time(22_279_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_279_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - Weight::from_ref_time(25_488_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_488_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - Weight::from_ref_time(42_190_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(42_190_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - Weight::from_ref_time(37_789_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(37_789_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - Weight::from_ref_time(20_056_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(20_056_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -104,44 +104,44 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(41_860_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(41_860_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(32_760_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(32_760_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - Weight::from_ref_time(22_279_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_279_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - Weight::from_ref_time(25_488_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_488_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - Weight::from_ref_time(42_190_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(42_190_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - Weight::from_ref_time(37_789_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(37_789_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - Weight::from_ref_time(20_056_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(20_056_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 9a40e488b7243..62117a6f65b07 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_benchmarking. @@ -58,75 +58,75 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn addition(_i: u32, ) -> Weight { - Weight::from_ref_time(103_000 as RefTimeWeight) + Weight::from_ref_time(103_000 as u64) } fn subtraction(_i: u32, ) -> Weight { - Weight::from_ref_time(105_000 as RefTimeWeight) + Weight::from_ref_time(105_000 as u64) } fn multiplication(_i: u32, ) -> Weight { - Weight::from_ref_time(113_000 as RefTimeWeight) + Weight::from_ref_time(113_000 as u64) } fn division(_i: u32, ) -> Weight { - Weight::from_ref_time(102_000 as RefTimeWeight) + Weight::from_ref_time(102_000 as u64) } fn hashing(_i: u32, ) -> Weight { - Weight::from_ref_time(20_865_902_000 as RefTimeWeight) + Weight::from_ref_time(20_865_902_000 as u64) } fn sr25519_verification(i: u32, ) -> Weight { - Weight::from_ref_time(319_000 as RefTimeWeight) + Weight::from_ref_time(319_000 as u64) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } } // For backwards compatibility and tests impl WeightInfo for () { fn addition(_i: u32, ) -> Weight { - Weight::from_ref_time(103_000 as RefTimeWeight) + Weight::from_ref_time(103_000 as u64) } fn subtraction(_i: u32, ) -> Weight { - Weight::from_ref_time(105_000 as RefTimeWeight) + Weight::from_ref_time(105_000 as u64) } fn multiplication(_i: u32, ) -> Weight { - Weight::from_ref_time(113_000 as RefTimeWeight) + Weight::from_ref_time(113_000 as u64) } fn division(_i: u32, ) -> Weight { - Weight::from_ref_time(102_000 as RefTimeWeight) + Weight::from_ref_time(102_000 as u64) } fn hashing(_i: u32, ) -> Weight { - Weight::from_ref_time(20_865_902_000 as RefTimeWeight) + Weight::from_ref_time(20_865_902_000 as u64) } fn sr25519_verification(i: u32, ) -> Weight { - Weight::from_ref_time(319_000 as RefTimeWeight) + Weight::from_ref_time(319_000 as u64) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64)) } // Storage: Skipped Metadata (r:0 w:0) fn storage_read(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) fn storage_write(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 0bb9a16d3cd8a..7255ece5c223a 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_bounties. @@ -65,88 +65,88 @@ impl WeightInfo for SubstrateWeight { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - Weight::from_ref_time(28_903_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(28_903_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - Weight::from_ref_time(10_997_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(10_997_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(8_967_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(8_967_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(28_665_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_665_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(25_141_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(25_141_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - Weight::from_ref_time(21_295_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_295_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - Weight::from_ref_time(67_951_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(67_951_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - Weight::from_ref_time(33_654_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(33_654_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - Weight::from_ref_time(50_582_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(50_582_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - Weight::from_ref_time(18_322_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_322_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(29_233_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(b as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(b as u64))) } } @@ -157,87 +157,87 @@ impl WeightInfo for () { // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) fn propose_bounty(_d: u32, ) -> Weight { - Weight::from_ref_time(28_903_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(28_903_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - Weight::from_ref_time(10_997_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(10_997_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(8_967_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(8_967_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(28_665_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_665_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(25_141_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(25_141_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - Weight::from_ref_time(21_295_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_295_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - Weight::from_ref_time(67_951_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(67_951_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - Weight::from_ref_time(33_654_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(33_654_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - Weight::from_ref_time(50_582_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(50_582_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - Weight::from_ref_time(18_322_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_322_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:2 w:2) fn spend_funds(b: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(29_233_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(b as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(b as u64))) } } diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index d806a676356b6..564fb0ae23d15 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_child_bounties. @@ -64,51 +64,51 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - Weight::from_ref_time(51_064_000 as RefTimeWeight) + Weight::from_ref_time(51_064_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(d as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(15_286_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(15_286_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(29_929_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_929_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(32_449_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_449_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - Weight::from_ref_time(23_793_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(23_793_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - Weight::from_ref_time(67_529_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(67_529_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -117,9 +117,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - Weight::from_ref_time(48_436_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(48_436_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -128,9 +128,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - Weight::from_ref_time(58_044_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) + Weight::from_ref_time(58_044_000 as u64) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(7 as u64)) } } @@ -143,51 +143,51 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) fn add_child_bounty(d: u32, ) -> Weight { - Weight::from_ref_time(51_064_000 as RefTimeWeight) + Weight::from_ref_time(51_064_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(d as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(15_286_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(15_286_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(29_929_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_929_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(32_449_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_449_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - Weight::from_ref_time(23_793_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(23_793_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:3 w:3) // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - Weight::from_ref_time(67_529_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(67_529_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -196,9 +196,9 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - Weight::from_ref_time(48_436_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(48_436_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) @@ -207,8 +207,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - Weight::from_ref_time(58_044_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) + Weight::from_ref_time(58_044_000 as u64) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(7 as u64)) } } diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index fcc0e7134f51c..50029046230af 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_collective. @@ -64,36 +64,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_280_000 as u64).saturating_mul(m as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(126_000 as u64).saturating_mul(n as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(13_310_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(16_819_000 as RefTimeWeight) + Weight::from_ref_time(16_819_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(18_849_000 as RefTimeWeight) + Weight::from_ref_time(18_849_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -101,52 +101,52 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_204_000 as RefTimeWeight) + Weight::from_ref_time(22_204_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - Weight::from_ref_time(30_941_000 as RefTimeWeight) + Weight::from_ref_time(30_941_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(32_485_000 as RefTimeWeight) + Weight::from_ref_time(32_485_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(39_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_487_000 as RefTimeWeight) + Weight::from_ref_time(33_487_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(157_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -154,13 +154,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_494_000 as RefTimeWeight) + Weight::from_ref_time(33_494_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(58_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -168,25 +168,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_566_000 as RefTimeWeight) + Weight::from_ref_time(36_566_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(20_159_000 as RefTimeWeight) + Weight::from_ref_time(20_159_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } } @@ -197,36 +197,36 @@ impl WeightInfo for () { // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_280_000 as u64).saturating_mul(m as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(126_000 as u64).saturating_mul(n as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(13_310_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(p as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Council Members (r:1 w:0) fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(16_819_000 as RefTimeWeight) + Weight::from_ref_time(16_819_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(18_849_000 as RefTimeWeight) + Weight::from_ref_time(18_849_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) @@ -234,52 +234,52 @@ impl WeightInfo for () { // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_204_000 as RefTimeWeight) + Weight::from_ref_time(22_204_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(8_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) fn vote(m: u32, ) -> Weight { - Weight::from_ref_time(30_941_000 as RefTimeWeight) + Weight::from_ref_time(30_941_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(32_485_000 as RefTimeWeight) + Weight::from_ref_time(32_485_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(39_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_487_000 as RefTimeWeight) + Weight::from_ref_time(33_487_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(157_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -287,13 +287,13 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_494_000 as RefTimeWeight) + Weight::from_ref_time(33_494_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(58_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Council Voting (r:1 w:1) // Storage: Council Members (r:1 w:0) @@ -301,24 +301,24 @@ impl WeightInfo for () { // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_566_000 as RefTimeWeight) + Weight::from_ref_time(36_566_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(63_000 as u64).saturating_mul(m as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(20_159_000 as RefTimeWeight) + Weight::from_ref_time(20_159_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index ee0db17ade95b..7fda4bca36d2e 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -110,7 +110,7 @@ use frame_support::{ dispatch::Dispatchable, ensure, traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, - weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, Weight}, + weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, Weight}, BoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -214,7 +214,7 @@ impl, const P: u32> Get for DefaultContractAccessWe .get(DispatchClass::Normal) .max_total .unwrap_or(block_weights.max_block) / - RefTimeWeight::from(P) + u64::from(P) } } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index b5c80642a5356..63867a0cc7448 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -21,7 +21,7 @@ use crate::{weights::WeightInfo, Config}; use codec::{Decode, Encode}; -use frame_support::{weights::RefTimeWeight, DefaultNoBound}; +use frame_support::DefaultNoBound; use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; use scale_info::TypeInfo; #[cfg(feature = "std")] @@ -255,166 +255,166 @@ pub struct InstructionWeights { #[scale_info(skip_type_params(T))] pub struct HostFnWeights { /// Weight of calling `seal_caller`. - pub caller: RefTimeWeight, + pub caller: u64, /// Weight of calling `seal_is_contract`. - pub is_contract: RefTimeWeight, + pub is_contract: u64, /// Weight of calling `seal_code_hash`. - pub code_hash: RefTimeWeight, + pub code_hash: u64, /// Weight of calling `seal_own_code_hash`. - pub own_code_hash: RefTimeWeight, + pub own_code_hash: u64, /// Weight of calling `seal_caller_is_origin`. - pub caller_is_origin: RefTimeWeight, + pub caller_is_origin: u64, /// Weight of calling `seal_address`. - pub address: RefTimeWeight, + pub address: u64, /// Weight of calling `seal_gas_left`. - pub gas_left: RefTimeWeight, + pub gas_left: u64, /// Weight of calling `seal_balance`. - pub balance: RefTimeWeight, + pub balance: u64, /// Weight of calling `seal_value_transferred`. - pub value_transferred: RefTimeWeight, + pub value_transferred: u64, /// Weight of calling `seal_minimum_balance`. - pub minimum_balance: RefTimeWeight, + pub minimum_balance: u64, /// Weight of calling `seal_block_number`. - pub block_number: RefTimeWeight, + pub block_number: u64, /// Weight of calling `seal_now`. - pub now: RefTimeWeight, + pub now: u64, /// Weight of calling `seal_weight_to_fee`. - pub weight_to_fee: RefTimeWeight, + pub weight_to_fee: u64, /// Weight of calling `gas`. - pub gas: RefTimeWeight, + pub gas: u64, /// Weight of calling `seal_input`. - pub input: RefTimeWeight, + pub input: u64, /// Weight per input byte copied to contract memory by `seal_input`. - pub input_per_byte: RefTimeWeight, + pub input_per_byte: u64, /// Weight of calling `seal_return`. - pub r#return: RefTimeWeight, + pub r#return: u64, /// Weight per byte returned through `seal_return`. - pub return_per_byte: RefTimeWeight, + pub return_per_byte: u64, /// Weight of calling `seal_terminate`. - pub terminate: RefTimeWeight, + pub terminate: u64, /// Weight of calling `seal_random`. - pub random: RefTimeWeight, + pub random: u64, /// Weight of calling `seal_reposit_event`. - pub deposit_event: RefTimeWeight, + pub deposit_event: u64, /// Weight per topic supplied to `seal_deposit_event`. - pub deposit_event_per_topic: RefTimeWeight, + pub deposit_event_per_topic: u64, /// Weight per byte of an event deposited through `seal_deposit_event`. - pub deposit_event_per_byte: RefTimeWeight, + pub deposit_event_per_byte: u64, /// Weight of calling `seal_debug_message`. - pub debug_message: RefTimeWeight, + pub debug_message: u64, /// Weight of calling `seal_set_storage`. - pub set_storage: RefTimeWeight, + pub set_storage: u64, /// Weight per written byten of an item stored with `seal_set_storage`. - pub set_storage_per_new_byte: RefTimeWeight, + pub set_storage_per_new_byte: u64, /// Weight per overwritten byte of an item stored with `seal_set_storage`. - pub set_storage_per_old_byte: RefTimeWeight, + pub set_storage_per_old_byte: u64, /// Weight of calling `seal_set_code_hash`. - pub set_code_hash: RefTimeWeight, + pub set_code_hash: u64, /// Weight of calling `seal_clear_storage`. - pub clear_storage: RefTimeWeight, + pub clear_storage: u64, /// Weight of calling `seal_clear_storage` per byte of the stored item. - pub clear_storage_per_byte: RefTimeWeight, + pub clear_storage_per_byte: u64, /// Weight of calling `seal_contains_storage`. - pub contains_storage: RefTimeWeight, + pub contains_storage: u64, /// Weight of calling `seal_contains_storage` per byte of the stored item. - pub contains_storage_per_byte: RefTimeWeight, + pub contains_storage_per_byte: u64, /// Weight of calling `seal_get_storage`. - pub get_storage: RefTimeWeight, + pub get_storage: u64, /// Weight per byte of an item received via `seal_get_storage`. - pub get_storage_per_byte: RefTimeWeight, + pub get_storage_per_byte: u64, /// Weight of calling `seal_take_storage`. - pub take_storage: RefTimeWeight, + pub take_storage: u64, /// Weight per byte of an item received via `seal_take_storage`. - pub take_storage_per_byte: RefTimeWeight, + pub take_storage_per_byte: u64, /// Weight of calling `seal_transfer`. - pub transfer: RefTimeWeight, + pub transfer: u64, /// Weight of calling `seal_call`. - pub call: RefTimeWeight, + pub call: u64, /// Weight of calling `seal_delegate_call`. - pub delegate_call: RefTimeWeight, + pub delegate_call: u64, /// Weight surcharge that is claimed if `seal_call` does a balance transfer. - pub call_transfer_surcharge: RefTimeWeight, + pub call_transfer_surcharge: u64, /// Weight per byte that is cloned by supplying the `CLONE_INPUT` flag. - pub call_per_cloned_byte: RefTimeWeight, + pub call_per_cloned_byte: u64, /// Weight of calling `seal_instantiate`. - pub instantiate: RefTimeWeight, + pub instantiate: u64, /// Weight surcharge that is claimed if `seal_instantiate` does a balance transfer. - pub instantiate_transfer_surcharge: RefTimeWeight, + pub instantiate_transfer_surcharge: u64, /// Weight per salt byte supplied to `seal_instantiate`. - pub instantiate_per_salt_byte: RefTimeWeight, + pub instantiate_per_salt_byte: u64, /// Weight of calling `seal_hash_sha_256`. - pub hash_sha2_256: RefTimeWeight, + pub hash_sha2_256: u64, /// Weight per byte hashed by `seal_hash_sha_256`. - pub hash_sha2_256_per_byte: RefTimeWeight, + pub hash_sha2_256_per_byte: u64, /// Weight of calling `seal_hash_keccak_256`. - pub hash_keccak_256: RefTimeWeight, + pub hash_keccak_256: u64, /// Weight per byte hashed by `seal_hash_keccak_256`. - pub hash_keccak_256_per_byte: RefTimeWeight, + pub hash_keccak_256_per_byte: u64, /// Weight of calling `seal_hash_blake2_256`. - pub hash_blake2_256: RefTimeWeight, + pub hash_blake2_256: u64, /// Weight per byte hashed by `seal_hash_blake2_256`. - pub hash_blake2_256_per_byte: RefTimeWeight, + pub hash_blake2_256_per_byte: u64, /// Weight of calling `seal_hash_blake2_128`. - pub hash_blake2_128: RefTimeWeight, + pub hash_blake2_128: u64, /// Weight per byte hashed by `seal_hash_blake2_128`. - pub hash_blake2_128_per_byte: RefTimeWeight, + pub hash_blake2_128_per_byte: u64, /// Weight of calling `seal_ecdsa_recover`. - pub ecdsa_recover: RefTimeWeight, + pub ecdsa_recover: u64, /// Weight of calling `seal_ecdsa_to_eth_address`. - pub ecdsa_to_eth_address: RefTimeWeight, + pub ecdsa_to_eth_address: u64, /// The type parameter is used in the default implementation. #[codec(skip)] @@ -441,13 +441,13 @@ macro_rules! cost_args { macro_rules! cost_batched_args { ($name:ident, $( $arg: expr ),+) => { - cost_args!($name, $( $arg ),+) / RefTimeWeight::from(API_BENCHMARK_BATCH_SIZE) + cost_args!($name, $( $arg ),+) / u64::from(API_BENCHMARK_BATCH_SIZE) } } macro_rules! cost_instr_no_params_with_batch_size { ($name:ident, $batch_size:expr) => { - (cost_args!($name, 1) / RefTimeWeight::from($batch_size)) as u32 + (cost_args!($name, 1) / u64::from($batch_size)) as u32 }; } diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 9c340b5c93deb..ec49a6325c125 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -28,7 +28,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchError, DispatchResult}, storage::child::{self, ChildInfo}, - weights::{RefTimeWeight, Weight}, + weights::Weight, }; use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; @@ -230,7 +230,7 @@ where let weight_per_key = (T::WeightInfo::on_initialize_per_trie_key(1) - T::WeightInfo::on_initialize_per_trie_key(0)) .ref_time(); - let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as RefTimeWeight); + let decoding_weight = weight_per_queue_item.saturating_mul(queue_len as u64); // `weight_per_key` being zero makes no sense and would constitute a failure to // benchmark properly. We opt for not removing any keys at all in this case. @@ -286,7 +286,7 @@ where >::put(queue); let ref_time_weight = weight_limit .ref_time() - .saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as RefTimeWeight)); + .saturating_sub(weight_per_key.saturating_mul(u64::from(remaining_key_budget))); Weight::from_ref_time(ref_time_weight) } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index daee41bc63f32..f1f80ba7a43df 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_contracts. @@ -166,37 +166,37 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(1_654_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_654_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(8_564_000 as RefTimeWeight) + Weight::from_ref_time(8_564_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(k as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_944_000 as u64).saturating_mul(q as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(19_016_000 as RefTimeWeight) + Weight::from_ref_time(19_016_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -204,11 +204,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(205_194_000 as RefTimeWeight) + Weight::from_ref_time(205_194_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(53_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -220,13 +220,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(288_487_000 as RefTimeWeight) + Weight::from_ref_time(288_487_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -236,46 +236,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(186_136_000 as RefTimeWeight) + Weight::from_ref_time(186_136_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - Weight::from_ref_time(149_232_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(149_232_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(51_721_000 as RefTimeWeight) + Weight::from_ref_time(51_721_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(30_016_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(30_016_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - Weight::from_ref_time(27_192_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(27_192_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -283,11 +283,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(206_405_000 as RefTimeWeight) + Weight::from_ref_time(206_405_000 as u64) // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_987_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -295,12 +295,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(106_220_000 as RefTimeWeight) + Weight::from_ref_time(106_220_000 as u64) // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_648_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -308,12 +308,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(104_498_000 as RefTimeWeight) + Weight::from_ref_time(104_498_000 as u64) // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(368_901_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -321,11 +321,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(208_696_000 as RefTimeWeight) + Weight::from_ref_time(208_696_000 as u64) // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(44_445_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -333,11 +333,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(205_612_000 as RefTimeWeight) + Weight::from_ref_time(205_612_000 as u64) // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -345,11 +345,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(206_947_000 as RefTimeWeight) + Weight::from_ref_time(206_947_000 as u64) // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_789_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -357,11 +357,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(208_692_000 as RefTimeWeight) + Weight::from_ref_time(208_692_000 as u64) // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_600_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -369,11 +369,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_811_000 as RefTimeWeight) + Weight::from_ref_time(209_811_000 as u64) // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(116_831_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -381,11 +381,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(207_406_000 as RefTimeWeight) + Weight::from_ref_time(207_406_000 as u64) // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_702_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -393,11 +393,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_260_000 as RefTimeWeight) + Weight::from_ref_time(209_260_000 as u64) // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_479_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -405,11 +405,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(206_448_000 as RefTimeWeight) + Weight::from_ref_time(206_448_000 as u64) // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_134_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -417,11 +417,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(206_969_000 as RefTimeWeight) + Weight::from_ref_time(206_969_000 as u64) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_251_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -430,11 +430,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(211_611_000 as RefTimeWeight) + Weight::from_ref_time(211_611_000 as u64) // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_675_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -442,11 +442,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(134_484_000 as RefTimeWeight) + Weight::from_ref_time(134_484_000 as u64) // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_329_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -454,11 +454,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(208_556_000 as RefTimeWeight) + Weight::from_ref_time(208_556_000 as u64) // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_328_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -466,11 +466,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(268_886_000 as RefTimeWeight) + Weight::from_ref_time(268_886_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_627_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -478,9 +478,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - Weight::from_ref_time(203_591_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(203_591_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -488,11 +488,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(204_258_000 as RefTimeWeight) + Weight::from_ref_time(204_258_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(183_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -502,13 +502,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(206_625_000 as RefTimeWeight) + Weight::from_ref_time(206_625_000 as u64) // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(59_377_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -517,11 +517,11 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(208_866_000 as RefTimeWeight) + Weight::from_ref_time(208_866_000 as u64) // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(133_438_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -529,11 +529,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(220_860_000 as RefTimeWeight) + Weight::from_ref_time(220_860_000 as u64) // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(239_951_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -543,15 +543,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(439_782_000 as RefTimeWeight) + Weight::from_ref_time(439_782_000 as u64) // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_687_000 as u64).saturating_mul(t as u64)) // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(67_636_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -559,128 +559,128 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(140_280_000 as RefTimeWeight) + Weight::from_ref_time(140_280_000 as u64) // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_717_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(161_247_000 as RefTimeWeight) + Weight::from_ref_time(161_247_000 as u64) // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(423_997_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_247_000 as RefTimeWeight) + Weight::from_ref_time(529_247_000 as u64) // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(85_282_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(55 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(53 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_812_000 as RefTimeWeight) + Weight::from_ref_time(529_812_000 as u64) // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(74_554_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(55 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(53 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(184_803_000 as RefTimeWeight) + Weight::from_ref_time(184_803_000 as u64) // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(404_933_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(500_958_000 as RefTimeWeight) + Weight::from_ref_time(500_958_000 as u64) // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(52 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(75_996_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(55 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(52 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(177_682_000 as RefTimeWeight) + Weight::from_ref_time(177_682_000 as u64) // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(338_172_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(465_285_000 as RefTimeWeight) + Weight::from_ref_time(465_285_000 as u64) // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(155_106_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(55 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(179_118_000 as RefTimeWeight) + Weight::from_ref_time(179_118_000 as u64) // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(311_083_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_056_000 as RefTimeWeight) + Weight::from_ref_time(423_056_000 as u64) // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(54 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_665_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(54 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_884_000 as RefTimeWeight) + Weight::from_ref_time(188_884_000 as u64) // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(432_781_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_408_000 as RefTimeWeight) + Weight::from_ref_time(532_408_000 as u64) // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(164_943_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(55 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(53 as u64)) + .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -688,13 +688,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(127_181_000 as RefTimeWeight) + Weight::from_ref_time(127_181_000 as u64) // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -702,13 +702,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -716,11 +716,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads((79 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -729,15 +729,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_196_444_000 as RefTimeWeight) + Weight::from_ref_time(9_196_444_000 as u64) // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as u64).saturating_mul(t as u64)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(85 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(81 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(9_718_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(85 as u64)) + .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) + .saturating_add(T::DbWeight::get().writes(81 as u64)) + .saturating_add(T::DbWeight::get().writes((81 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -747,13 +747,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().reads((320 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((320 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -764,15 +764,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_282_498_000 as RefTimeWeight) + Weight::from_ref_time(12_282_498_000 as u64) // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(720_795_000 as u64).saturating_mul(t as u64)) // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(167 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(165 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(124_274_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(167 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) + .saturating_add(T::DbWeight::get().writes(165 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -780,11 +780,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(203_959_000 as RefTimeWeight) + Weight::from_ref_time(203_959_000 as u64) // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_311_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -792,11 +792,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(349_915_000 as RefTimeWeight) + Weight::from_ref_time(349_915_000 as u64) // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(320_652_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -804,11 +804,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(209_219_000 as RefTimeWeight) + Weight::from_ref_time(209_219_000 as u64) // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_728_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -816,11 +816,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(208_860_000 as RefTimeWeight) + Weight::from_ref_time(208_860_000 as u64) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(245_718_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -828,11 +828,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(206_165_000 as RefTimeWeight) + Weight::from_ref_time(206_165_000 as u64) // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_644_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -840,11 +840,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(255_955_000 as RefTimeWeight) + Weight::from_ref_time(255_955_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_090_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -852,11 +852,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(208_153_000 as RefTimeWeight) + Weight::from_ref_time(208_153_000 as u64) // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_264_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -864,11 +864,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(278_368_000 as RefTimeWeight) + Weight::from_ref_time(278_368_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_006_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -876,11 +876,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(331_955_000 as RefTimeWeight) + Weight::from_ref_time(331_955_000 as u64) // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -888,11 +888,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(207_838_000 as RefTimeWeight) + Weight::from_ref_time(207_838_000 as u64) // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -901,317 +901,317 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(774_380_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads((79 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(73_955_000 as RefTimeWeight) + Weight::from_ref_time(73_955_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(612_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(74_057_000 as RefTimeWeight) + Weight::from_ref_time(74_057_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(74_137_000 as RefTimeWeight) + Weight::from_ref_time(74_137_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(73_844_000 as RefTimeWeight) + Weight::from_ref_time(73_844_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_773_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as RefTimeWeight) + Weight::from_ref_time(73_979_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_952_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(73_924_000 as RefTimeWeight) + Weight::from_ref_time(73_924_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(941_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_574_000 as RefTimeWeight) + Weight::from_ref_time(73_574_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_439_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(73_343_000 as RefTimeWeight) + Weight::from_ref_time(73_343_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_603_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(76_267_000 as RefTimeWeight) + Weight::from_ref_time(76_267_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(74_877_000 as RefTimeWeight) + Weight::from_ref_time(74_877_000 as u64) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_144_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(88_665_000 as RefTimeWeight) + Weight::from_ref_time(88_665_000 as u64) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_142_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(98_600_000 as RefTimeWeight) + Weight::from_ref_time(98_600_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(469_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(74_555_000 as RefTimeWeight) + Weight::from_ref_time(74_555_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(624_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(74_329_000 as RefTimeWeight) + Weight::from_ref_time(74_329_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(688_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(74_612_000 as RefTimeWeight) + Weight::from_ref_time(74_612_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(909_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(76_906_000 as RefTimeWeight) + Weight::from_ref_time(76_906_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_192_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(76_979_000 as RefTimeWeight) + Weight::from_ref_time(76_979_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_361_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(74_370_000 as RefTimeWeight) + Weight::from_ref_time(74_370_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(661_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(73_584_000 as RefTimeWeight) + Weight::from_ref_time(73_584_000 as u64) // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_114_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(74_206_000 as RefTimeWeight) + Weight::from_ref_time(74_206_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(884_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(73_992_000 as RefTimeWeight) + Weight::from_ref_time(73_992_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(893_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(73_985_000 as RefTimeWeight) + Weight::from_ref_time(73_985_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(74_117_000 as RefTimeWeight) + Weight::from_ref_time(74_117_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(73_981_000 as RefTimeWeight) + Weight::from_ref_time(73_981_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(866_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(74_104_000 as RefTimeWeight) + Weight::from_ref_time(74_104_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(74_293_000 as RefTimeWeight) + Weight::from_ref_time(74_293_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(878_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(74_055_000 as RefTimeWeight) + Weight::from_ref_time(74_055_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(73_710_000 as RefTimeWeight) + Weight::from_ref_time(73_710_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(73_917_000 as RefTimeWeight) + Weight::from_ref_time(73_917_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(74_048_000 as RefTimeWeight) + Weight::from_ref_time(74_048_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(74_029_000 as RefTimeWeight) + Weight::from_ref_time(74_029_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_349_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(74_267_000 as RefTimeWeight) + Weight::from_ref_time(74_267_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(73_952_000 as RefTimeWeight) + Weight::from_ref_time(73_952_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(73_851_000 as RefTimeWeight) + Weight::from_ref_time(73_851_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_368_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(74_034_000 as RefTimeWeight) + Weight::from_ref_time(74_034_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_348_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as RefTimeWeight) + Weight::from_ref_time(73_979_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(74_000_000 as RefTimeWeight) + Weight::from_ref_time(74_000_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_328_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(73_883_000 as RefTimeWeight) + Weight::from_ref_time(73_883_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_331_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(74_216_000 as RefTimeWeight) + Weight::from_ref_time(74_216_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(73_989_000 as RefTimeWeight) + Weight::from_ref_time(73_989_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_998_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(73_857_000 as RefTimeWeight) + Weight::from_ref_time(73_857_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_073_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(73_801_000 as RefTimeWeight) + Weight::from_ref_time(73_801_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_027_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(74_130_000 as RefTimeWeight) + Weight::from_ref_time(74_130_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_064_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(74_071_000 as RefTimeWeight) + Weight::from_ref_time(74_071_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_327_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(74_201_000 as RefTimeWeight) + Weight::from_ref_time(74_201_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(74_241_000 as RefTimeWeight) + Weight::from_ref_time(74_241_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_321_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(74_331_000 as RefTimeWeight) + Weight::from_ref_time(74_331_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_347_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(73_674_000 as RefTimeWeight) + Weight::from_ref_time(73_674_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_359_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(73_807_000 as RefTimeWeight) + Weight::from_ref_time(73_807_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(73_725_000 as RefTimeWeight) + Weight::from_ref_time(73_725_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(73_755_000 as RefTimeWeight) + Weight::from_ref_time(73_755_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } } @@ -1219,37 +1219,37 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(1_654_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_654_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(8_564_000 as RefTimeWeight) + Weight::from_ref_time(8_564_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(k as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 1024]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_944_000 as u64).saturating_mul(q as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(19_016_000 as RefTimeWeight) + Weight::from_ref_time(19_016_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) @@ -1257,11 +1257,11 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(205_194_000 as RefTimeWeight) + Weight::from_ref_time(205_194_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(53_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1273,13 +1273,13 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(288_487_000 as RefTimeWeight) + Weight::from_ref_time(288_487_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1289,46 +1289,46 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(186_136_000 as RefTimeWeight) + Weight::from_ref_time(186_136_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) fn call() -> Weight { - Weight::from_ref_time(149_232_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(149_232_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(51_721_000 as RefTimeWeight) + Weight::from_ref_time(51_721_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(30_016_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(30_016_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) fn set_code() -> Weight { - Weight::from_ref_time(27_192_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(27_192_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1336,11 +1336,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(206_405_000 as RefTimeWeight) + Weight::from_ref_time(206_405_000 as u64) // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_987_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1348,12 +1348,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(106_220_000 as RefTimeWeight) + Weight::from_ref_time(106_220_000 as u64) // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_648_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1361,12 +1361,12 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(104_498_000 as RefTimeWeight) + Weight::from_ref_time(104_498_000 as u64) // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(368_901_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1374,11 +1374,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(208_696_000 as RefTimeWeight) + Weight::from_ref_time(208_696_000 as u64) // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(44_445_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1386,11 +1386,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(205_612_000 as RefTimeWeight) + Weight::from_ref_time(205_612_000 as u64) // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1398,11 +1398,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(206_947_000 as RefTimeWeight) + Weight::from_ref_time(206_947_000 as u64) // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_789_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1410,11 +1410,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(208_692_000 as RefTimeWeight) + Weight::from_ref_time(208_692_000 as u64) // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_600_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1422,11 +1422,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_811_000 as RefTimeWeight) + Weight::from_ref_time(209_811_000 as u64) // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(116_831_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1434,11 +1434,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(207_406_000 as RefTimeWeight) + Weight::from_ref_time(207_406_000 as u64) // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_702_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1446,11 +1446,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_260_000 as RefTimeWeight) + Weight::from_ref_time(209_260_000 as u64) // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_479_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1458,11 +1458,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(206_448_000 as RefTimeWeight) + Weight::from_ref_time(206_448_000 as u64) // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_134_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1470,11 +1470,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(206_969_000 as RefTimeWeight) + Weight::from_ref_time(206_969_000 as u64) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_251_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1483,11 +1483,11 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(211_611_000 as RefTimeWeight) + Weight::from_ref_time(211_611_000 as u64) // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_675_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1495,11 +1495,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(134_484_000 as RefTimeWeight) + Weight::from_ref_time(134_484_000 as u64) // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(19_329_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1507,11 +1507,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(208_556_000 as RefTimeWeight) + Weight::from_ref_time(208_556_000 as u64) // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(40_328_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1519,11 +1519,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(268_886_000 as RefTimeWeight) + Weight::from_ref_time(268_886_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_627_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1531,9 +1531,9 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 1]`. fn seal_return(_r: u32, ) -> Weight { - Weight::from_ref_time(203_591_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(203_591_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1541,11 +1541,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(204_258_000 as RefTimeWeight) + Weight::from_ref_time(204_258_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(183_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1555,13 +1555,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(206_625_000 as RefTimeWeight) + Weight::from_ref_time(206_625_000 as u64) // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(59_377_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1570,11 +1570,11 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(208_866_000 as RefTimeWeight) + Weight::from_ref_time(208_866_000 as u64) // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(133_438_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1582,11 +1582,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(220_860_000 as RefTimeWeight) + Weight::from_ref_time(220_860_000 as u64) // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(239_951_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1596,15 +1596,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(439_782_000 as RefTimeWeight) + Weight::from_ref_time(439_782_000 as u64) // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_687_000 as u64).saturating_mul(t as u64)) // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(67_636_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1612,128 +1612,128 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(140_280_000 as RefTimeWeight) + Weight::from_ref_time(140_280_000 as u64) // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_717_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(161_247_000 as RefTimeWeight) + Weight::from_ref_time(161_247_000 as u64) // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(423_997_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_247_000 as RefTimeWeight) + Weight::from_ref_time(529_247_000 as u64) // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(85_282_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(55 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(53 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_812_000 as RefTimeWeight) + Weight::from_ref_time(529_812_000 as u64) // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(74_554_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(55 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(53 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(184_803_000 as RefTimeWeight) + Weight::from_ref_time(184_803_000 as u64) // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(404_933_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(500_958_000 as RefTimeWeight) + Weight::from_ref_time(500_958_000 as u64) // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(52 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(75_996_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(55 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(52 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(177_682_000 as RefTimeWeight) + Weight::from_ref_time(177_682_000 as u64) // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(338_172_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(465_285_000 as RefTimeWeight) + Weight::from_ref_time(465_285_000 as u64) // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(155_106_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(55 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(179_118_000 as RefTimeWeight) + Weight::from_ref_time(179_118_000 as u64) // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(311_083_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_056_000 as RefTimeWeight) + Weight::from_ref_time(423_056_000 as u64) // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(54 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(69_665_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(54 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_884_000 as RefTimeWeight) + Weight::from_ref_time(188_884_000 as u64) // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(432_781_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_408_000 as RefTimeWeight) + Weight::from_ref_time(532_408_000 as u64) // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(55 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(53 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(164_943_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(55 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(53 as u64)) + .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1741,13 +1741,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(127_181_000 as RefTimeWeight) + Weight::from_ref_time(127_181_000 as u64) // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_500_589_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1755,13 +1755,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((80 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(14_860_909_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1769,11 +1769,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(14_797_140_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads((79 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1782,15 +1782,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_196_444_000 as RefTimeWeight) + Weight::from_ref_time(9_196_444_000 as u64) // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_458_153_000 as u64).saturating_mul(t as u64)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(85 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(81 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((81 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(9_718_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(85 as u64)) + .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) + .saturating_add(RocksDbWeight::get().writes(81 as u64)) + .saturating_add(RocksDbWeight::get().writes((81 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1800,13 +1800,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((320 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(21_201_529_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().reads((320 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((320 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1817,15 +1817,15 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_282_498_000 as RefTimeWeight) + Weight::from_ref_time(12_282_498_000 as u64) // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(720_795_000 as u64).saturating_mul(t as u64)) // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(167 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(165 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(t as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(124_274_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(167 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) + .saturating_add(RocksDbWeight::get().writes(165 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1833,11 +1833,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(203_959_000 as RefTimeWeight) + Weight::from_ref_time(203_959_000 as u64) // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_311_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1845,11 +1845,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(349_915_000 as RefTimeWeight) + Weight::from_ref_time(349_915_000 as u64) // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(320_652_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1857,11 +1857,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(209_219_000 as RefTimeWeight) + Weight::from_ref_time(209_219_000 as u64) // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_728_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1869,11 +1869,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(208_860_000 as RefTimeWeight) + Weight::from_ref_time(208_860_000 as u64) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(245_718_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1881,11 +1881,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(206_165_000 as RefTimeWeight) + Weight::from_ref_time(206_165_000 as u64) // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_644_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1893,11 +1893,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(255_955_000 as RefTimeWeight) + Weight::from_ref_time(255_955_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_090_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1905,11 +1905,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(208_153_000 as RefTimeWeight) + Weight::from_ref_time(208_153_000 as u64) // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_264_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1917,11 +1917,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(278_368_000 as RefTimeWeight) + Weight::from_ref_time(278_368_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_006_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1929,11 +1929,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(331_955_000 as RefTimeWeight) + Weight::from_ref_time(331_955_000 as u64) // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_069_955_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1941,11 +1941,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(207_838_000 as RefTimeWeight) + Weight::from_ref_time(207_838_000 as u64) // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_058_503_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1954,316 +1954,316 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((79 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(774_380_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads((79 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(73_955_000 as RefTimeWeight) + Weight::from_ref_time(73_955_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(612_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(74_057_000 as RefTimeWeight) + Weight::from_ref_time(74_057_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(74_137_000 as RefTimeWeight) + Weight::from_ref_time(74_137_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(73_844_000 as RefTimeWeight) + Weight::from_ref_time(73_844_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_773_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as RefTimeWeight) + Weight::from_ref_time(73_979_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_952_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(73_924_000 as RefTimeWeight) + Weight::from_ref_time(73_924_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(941_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_574_000 as RefTimeWeight) + Weight::from_ref_time(73_574_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_439_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(73_343_000 as RefTimeWeight) + Weight::from_ref_time(73_343_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_603_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(76_267_000 as RefTimeWeight) + Weight::from_ref_time(76_267_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(74_877_000 as RefTimeWeight) + Weight::from_ref_time(74_877_000 as u64) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(7_144_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(88_665_000 as RefTimeWeight) + Weight::from_ref_time(88_665_000 as u64) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_142_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(98_600_000 as RefTimeWeight) + Weight::from_ref_time(98_600_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(469_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(74_555_000 as RefTimeWeight) + Weight::from_ref_time(74_555_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(624_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(74_329_000 as RefTimeWeight) + Weight::from_ref_time(74_329_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(688_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(74_612_000 as RefTimeWeight) + Weight::from_ref_time(74_612_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(909_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(76_906_000 as RefTimeWeight) + Weight::from_ref_time(76_906_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_192_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(76_979_000 as RefTimeWeight) + Weight::from_ref_time(76_979_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_361_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(74_370_000 as RefTimeWeight) + Weight::from_ref_time(74_370_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(661_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(73_584_000 as RefTimeWeight) + Weight::from_ref_time(73_584_000 as u64) // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_114_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(74_206_000 as RefTimeWeight) + Weight::from_ref_time(74_206_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(884_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(73_992_000 as RefTimeWeight) + Weight::from_ref_time(73_992_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(893_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(73_985_000 as RefTimeWeight) + Weight::from_ref_time(73_985_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(74_117_000 as RefTimeWeight) + Weight::from_ref_time(74_117_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(73_981_000 as RefTimeWeight) + Weight::from_ref_time(73_981_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(866_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(74_104_000 as RefTimeWeight) + Weight::from_ref_time(74_104_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(74_293_000 as RefTimeWeight) + Weight::from_ref_time(74_293_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(878_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(74_055_000 as RefTimeWeight) + Weight::from_ref_time(74_055_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(73_710_000 as RefTimeWeight) + Weight::from_ref_time(73_710_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(73_917_000 as RefTimeWeight) + Weight::from_ref_time(73_917_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(74_048_000 as RefTimeWeight) + Weight::from_ref_time(74_048_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(74_029_000 as RefTimeWeight) + Weight::from_ref_time(74_029_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_349_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(74_267_000 as RefTimeWeight) + Weight::from_ref_time(74_267_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(73_952_000 as RefTimeWeight) + Weight::from_ref_time(73_952_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(73_851_000 as RefTimeWeight) + Weight::from_ref_time(73_851_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_368_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(74_034_000 as RefTimeWeight) + Weight::from_ref_time(74_034_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_348_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as RefTimeWeight) + Weight::from_ref_time(73_979_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(74_000_000 as RefTimeWeight) + Weight::from_ref_time(74_000_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_328_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(73_883_000 as RefTimeWeight) + Weight::from_ref_time(73_883_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_331_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(74_216_000 as RefTimeWeight) + Weight::from_ref_time(74_216_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(73_989_000 as RefTimeWeight) + Weight::from_ref_time(73_989_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_998_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(73_857_000 as RefTimeWeight) + Weight::from_ref_time(73_857_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_073_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(73_801_000 as RefTimeWeight) + Weight::from_ref_time(73_801_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_027_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(74_130_000 as RefTimeWeight) + Weight::from_ref_time(74_130_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_064_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(74_071_000 as RefTimeWeight) + Weight::from_ref_time(74_071_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_327_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(74_201_000 as RefTimeWeight) + Weight::from_ref_time(74_201_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(74_241_000 as RefTimeWeight) + Weight::from_ref_time(74_241_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_321_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(74_331_000 as RefTimeWeight) + Weight::from_ref_time(74_331_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_347_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(73_674_000 as RefTimeWeight) + Weight::from_ref_time(73_674_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_359_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(73_807_000 as RefTimeWeight) + Weight::from_ref_time(73_807_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(73_725_000 as RefTimeWeight) + Weight::from_ref_time(73_725_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(73_755_000 as RefTimeWeight) + Weight::from_ref_time(73_755_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) } } diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index dbc7ce06ee825..6428d82c94c39 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_conviction_voting. @@ -62,9 +62,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - Weight::from_ref_time(148_804_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(148_804_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -72,24 +72,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - Weight::from_ref_time(313_333_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(313_333_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - Weight::from_ref_time(300_591_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(300_591_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - Weight::from_ref_time(53_887_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(53_887_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -97,33 +97,33 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(51_518_000 as RefTimeWeight) + Weight::from_ref_time(51_518_000 as u64) // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(27_235_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_885_000 as RefTimeWeight) + Weight::from_ref_time(37_885_000 as u64) // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(24_395_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - Weight::from_ref_time(67_703_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(67_703_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } } @@ -135,9 +135,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - Weight::from_ref_time(148_804_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(148_804_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: ConvictionVoting VotingFor (r:1 w:1) @@ -145,24 +145,24 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - Weight::from_ref_time(313_333_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(313_333_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - Weight::from_ref_time(300_591_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(300_591_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - Weight::from_ref_time(53_887_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(53_887_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) @@ -170,32 +170,32 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(51_518_000 as RefTimeWeight) + Weight::from_ref_time(51_518_000 as u64) // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(27_235_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_885_000 as RefTimeWeight) + Weight::from_ref_time(37_885_000 as u64) // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(24_395_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - Weight::from_ref_time(67_703_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(67_703_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } } diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index ad11526736c9c..272921ed3a15d 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. @@ -80,44 +80,44 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(48_328_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as RefTimeWeight) + Weight::from_ref_time(30_923_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as RefTimeWeight) + Weight::from_ref_time(40_345_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as RefTimeWeight) + Weight::from_ref_time(39_853_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(19_364_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -126,82 +126,82 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as RefTimeWeight) + Weight::from_ref_time(57_708_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as RefTimeWeight) + Weight::from_ref_time(10_714_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_697_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_831_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(20_271_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as RefTimeWeight) + Weight::from_ref_time(21_319_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as RefTimeWeight) + Weight::from_ref_time(43_960_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_475_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as RefTimeWeight) + Weight::from_ref_time(24_320_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as RefTimeWeight) + Weight::from_ref_time(3_428_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -210,103 +210,103 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as RefTimeWeight) + Weight::from_ref_time(7_867_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as RefTimeWeight) + Weight::from_ref_time(37_902_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as RefTimeWeight) + Weight::from_ref_time(21_272_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(4_913_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as RefTimeWeight) + Weight::from_ref_time(27_986_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as RefTimeWeight) + Weight::from_ref_time(20_058_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as RefTimeWeight) + Weight::from_ref_time(28_619_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as RefTimeWeight) + Weight::from_ref_time(26_619_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as RefTimeWeight) + Weight::from_ref_time(25_373_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as RefTimeWeight) + Weight::from_ref_time(15_961_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as RefTimeWeight) + Weight::from_ref_time(15_992_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -317,44 +317,44 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(48_328_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as RefTimeWeight) + Weight::from_ref_time(30_923_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as RefTimeWeight) + Weight::from_ref_time(40_345_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as RefTimeWeight) + Weight::from_ref_time(39_853_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(19_364_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) @@ -363,82 +363,82 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as RefTimeWeight) + Weight::from_ref_time(57_708_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as RefTimeWeight) + Weight::from_ref_time(10_714_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_697_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_831_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(20_271_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as RefTimeWeight) + Weight::from_ref_time(21_319_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as RefTimeWeight) + Weight::from_ref_time(43_960_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_475_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as RefTimeWeight) + Weight::from_ref_time(24_320_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as RefTimeWeight) + Weight::from_ref_time(3_428_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) @@ -447,102 +447,102 @@ impl WeightInfo for () { // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as RefTimeWeight) + Weight::from_ref_time(7_867_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as RefTimeWeight) + Weight::from_ref_time(37_902_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as RefTimeWeight) + Weight::from_ref_time(21_272_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(4_913_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as RefTimeWeight) + Weight::from_ref_time(27_986_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as RefTimeWeight) + Weight::from_ref_time(20_058_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as RefTimeWeight) + Weight::from_ref_time(28_619_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as RefTimeWeight) + Weight::from_ref_time(26_619_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as RefTimeWeight) + Weight::from_ref_time(25_373_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as RefTimeWeight) + Weight::from_ref_time(15_961_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as RefTimeWeight) + Weight::from_ref_time(15_992_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 0e7af39badd44..5e5191242bd77 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_multi_phase. @@ -68,46 +68,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - Weight::from_ref_time(13_495_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) + Weight::from_ref_time(13_495_000 as u64) + .saturating_add(T::DbWeight::get().reads(8 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - Weight::from_ref_time(14_114_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(14_114_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - Weight::from_ref_time(13_756_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_756_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - Weight::from_ref_time(28_467_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_467_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - Weight::from_ref_time(21_991_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_991_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - Weight::from_ref_time(3_186_000 as RefTimeWeight) + Weight::from_ref_time(3_186_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(202_000 as u64).saturating_mul(v as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(60_000 as u64).saturating_mul(t as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -119,13 +119,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(137_653_000 as RefTimeWeight) + Weight::from_ref_time(137_653_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(640_000 as u64).saturating_mul(a as u64)) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(d as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -134,9 +134,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(49_313_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(49_313_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -146,33 +146,33 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(v as u64)) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(t as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(6_907_000 as u64).saturating_mul(a as u64)) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(d as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(844_000 as u64).saturating_mul(v as u64)) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(t as u64)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_421_000 as u64).saturating_mul(a as u64)) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_167_000 as u64).saturating_mul(d as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) } } @@ -187,46 +187,46 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - Weight::from_ref_time(13_495_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) + Weight::from_ref_time(13_495_000 as u64) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - Weight::from_ref_time(14_114_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(14_114_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - Weight::from_ref_time(13_756_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_756_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - Weight::from_ref_time(28_467_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_467_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - Weight::from_ref_time(21_991_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(21_991_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - Weight::from_ref_time(3_186_000 as RefTimeWeight) + Weight::from_ref_time(3_186_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(202_000 as u64).saturating_mul(v as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(60_000 as u64).saturating_mul(t as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) @@ -238,13 +238,13 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(137_653_000 as RefTimeWeight) + Weight::from_ref_time(137_653_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(640_000 as u64).saturating_mul(a as u64)) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(d as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(8 as u64)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) @@ -253,9 +253,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(49_313_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(49_313_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) // Storage: ElectionProviderMultiPhase Round (r:1 w:0) @@ -265,32 +265,32 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(v as u64)) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(t as u64)) // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(6_907_000 as u64).saturating_mul(a as u64)) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(d as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(844_000 as u64).saturating_mul(v as u64)) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(t as u64)) // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_421_000 as u64).saturating_mul(a as u64)) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_167_000 as u64).saturating_mul(d as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) } } diff --git a/frame/election-provider-support/src/weights.rs b/frame/election-provider-support/src/weights.rs index ed7360940ba58..44075ba871228 100644 --- a/frame/election-provider-support/src/weights.rs +++ b/frame/election-provider-support/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_election_provider_support_benchmarking. @@ -53,43 +53,43 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_973_000 as u64).saturating_mul(v as u64)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_334_000 as u64).saturating_mul(t as u64)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as u64).saturating_mul(d as u64)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_073_000 as u64).saturating_mul(v as u64)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(65_000 as u64).saturating_mul(t as u64)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { fn phragmen(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 667_000 - .saturating_add(Weight::from_ref_time(32_973_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(32_973_000 as u64).saturating_mul(v as u64)) // Standard Error: 1_334_000 - .saturating_add(Weight::from_ref_time(1_334_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_334_000 as u64).saturating_mul(t as u64)) // Standard Error: 60_644_000 - .saturating_add(Weight::from_ref_time(2_636_364_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_636_364_000 as u64).saturating_mul(d as u64)) } fn phragmms(v: u32, t: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 73_000 - .saturating_add(Weight::from_ref_time(21_073_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(21_073_000 as u64).saturating_mul(v as u64)) // Standard Error: 146_000 - .saturating_add(Weight::from_ref_time(65_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(65_000 as u64).saturating_mul(t as u64)) // Standard Error: 6_649_000 - .saturating_add(Weight::from_ref_time(1_711_424_000 as RefTimeWeight).saturating_mul(d as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_711_424_000 as u64).saturating_mul(d as u64)) } } diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 051aaa793588c..52a5dedc723d4 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_elections_phragmen. @@ -70,11 +70,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - Weight::from_ref_time(27_011_000 as RefTimeWeight) + Weight::from_ref_time(27_011_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(214_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -83,11 +83,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - Weight::from_ref_time(40_240_000 as RefTimeWeight) + Weight::from_ref_time(40_240_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(244_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -96,38 +96,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - Weight::from_ref_time(40_394_000 as RefTimeWeight) + Weight::from_ref_time(40_394_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(217_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - Weight::from_ref_time(37_651_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_651_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - Weight::from_ref_time(42_217_000 as RefTimeWeight) + Weight::from_ref_time(42_217_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(50_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - Weight::from_ref_time(46_459_000 as RefTimeWeight) + Weight::from_ref_time(46_459_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(26_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -135,19 +135,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - Weight::from_ref_time(45_189_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(45_189_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - Weight::from_ref_time(34_516_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(34_516_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) + Weight::from_ref_time(2_000_000_000_000 as u64) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -156,9 +156,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - Weight::from_ref_time(51_838_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(51_838_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -169,12 +169,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[5000, 10000]`. /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(63_721_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -189,15 +189,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_534_000 as u64).saturating_mul(v as u64)) // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(280 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_453_000 as u64).saturating_mul(e as u64)) + .saturating_add(T::DbWeight::get().reads(280 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(c as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } } @@ -210,11 +210,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - Weight::from_ref_time(27_011_000 as RefTimeWeight) + Weight::from_ref_time(27_011_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(214_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -223,11 +223,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - Weight::from_ref_time(40_240_000 as RefTimeWeight) + Weight::from_ref_time(40_240_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(244_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:0) // Storage: Elections Members (r:1 w:0) @@ -236,38 +236,38 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - Weight::from_ref_time(40_394_000 as RefTimeWeight) + Weight::from_ref_time(40_394_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(217_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - Weight::from_ref_time(37_651_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_651_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - Weight::from_ref_time(42_217_000 as RefTimeWeight) + Weight::from_ref_time(42_217_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(50_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - Weight::from_ref_time(46_459_000 as RefTimeWeight) + Weight::from_ref_time(46_459_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(26_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Elections Members (r:1 w:1) // Storage: Elections RunnersUp (r:1 w:1) @@ -275,19 +275,19 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - Weight::from_ref_time(45_189_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(45_189_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - Weight::from_ref_time(34_516_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(34_516_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { - Weight::from_ref_time(2_000_000_000_000 as RefTimeWeight) + Weight::from_ref_time(2_000_000_000_000 as u64) } // Storage: Elections Members (r:1 w:1) // Storage: System Account (r:1 w:1) @@ -296,9 +296,9 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - Weight::from_ref_time(51_838_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(51_838_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Elections Voting (r:5001 w:5000) // Storage: Elections Members (r:1 w:0) @@ -309,12 +309,12 @@ impl WeightInfo for () { /// The range of component `v` is `[5000, 10000]`. /// The range of component `d` is `[1, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(63_721_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } // Storage: Elections Candidates (r:1 w:1) // Storage: Elections Members (r:1 w:1) @@ -329,14 +329,14 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_534_000 as u64).saturating_mul(v as u64)) // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(280 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(c as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_453_000 as u64).saturating_mul(e as u64)) + .saturating_add(RocksDbWeight::get().reads(280 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(c as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } } diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index d27915ef1b02a..986648b4302bc 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -49,7 +49,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. @@ -63,39 +63,39 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(5_834_000 as u64) + .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(51_353_000 as u64) + .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as RefTimeWeight) + Weight::from_ref_time(2_569_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(24_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(5_834_000 as u64) + .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as RefTimeWeight) - .saturating_add(Weight::from_ref_time(14_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(51_353_000 as u64) + .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as RefTimeWeight) + Weight::from_ref_time(2_569_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) } } diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 124ee7593a778..15eeb7c5104cd 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_gilt. @@ -60,70 +60,70 @@ impl WeightInfo for SubstrateWeight { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - Weight::from_ref_time(41_605_000 as RefTimeWeight) + Weight::from_ref_time(41_605_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(62_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - Weight::from_ref_time(97_715_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(97_715_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - Weight::from_ref_time(42_061_000 as RefTimeWeight) + Weight::from_ref_time(42_061_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - Weight::from_ref_time(5_026_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(5_026_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(47_753_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(47_753_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - Weight::from_ref_time(1_663_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_663_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - Weight::from_ref_time(40_797_000 as RefTimeWeight) + Weight::from_ref_time(40_797_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_122_000 as u64).saturating_mul(b as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(b as u64))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - Weight::from_ref_time(14_944_000 as RefTimeWeight) + Weight::from_ref_time(14_944_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_135_000 as u64).saturating_mul(q as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(q as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(q as u64))) } } @@ -132,69 +132,69 @@ impl WeightInfo for () { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid(l: u32, ) -> Weight { - Weight::from_ref_time(41_605_000 as RefTimeWeight) + Weight::from_ref_time(41_605_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(62_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - Weight::from_ref_time(97_715_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(97_715_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn retract_bid(l: u32, ) -> Weight { - Weight::from_ref_time(42_061_000 as RefTimeWeight) + Weight::from_ref_time(42_061_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - Weight::from_ref_time(5_026_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(5_026_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(47_753_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(47_753_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - Weight::from_ref_time(1_663_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_663_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_item(b: u32, ) -> Weight { - Weight::from_ref_time(40_797_000 as RefTimeWeight) + Weight::from_ref_time(40_797_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(b as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(4_122_000 as u64).saturating_mul(b as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(b as u64))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt Active (r:0 w:1) fn pursue_target_per_queue(q: u32, ) -> Weight { - Weight::from_ref_time(14_944_000 as RefTimeWeight) + Weight::from_ref_time(14_944_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as RefTimeWeight).saturating_mul(q as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(q as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_135_000 as u64).saturating_mul(q as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(q as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(q as u64))) } } diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index 1aa8d7963223e..d65499034d74a 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_identity. @@ -69,48 +69,48 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - Weight::from_ref_time(16_649_000 as RefTimeWeight) + Weight::from_ref_time(16_649_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(241_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(31_322_000 as RefTimeWeight) + Weight::from_ref_time(31_322_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(252_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(312_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - Weight::from_ref_time(30_012_000 as RefTimeWeight) + Weight::from_ref_time(30_012_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(3_005_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - Weight::from_ref_time(29_623_000 as RefTimeWeight) + Weight::from_ref_time(29_623_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_100_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -119,81 +119,81 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_370_000 as RefTimeWeight) + Weight::from_ref_time(34_370_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(186_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_114_000 as u64).saturating_mul(s as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(189_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_759_000 as RefTimeWeight) + Weight::from_ref_time(34_759_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(251_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(340_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(32_254_000 as RefTimeWeight) + Weight::from_ref_time(32_254_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(159_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(347_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - Weight::from_ref_time(7_858_000 as RefTimeWeight) + Weight::from_ref_time(7_858_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - Weight::from_ref_time(8_011_000 as RefTimeWeight) + Weight::from_ref_time(8_011_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - Weight::from_ref_time(7_970_000 as RefTimeWeight) + Weight::from_ref_time(7_970_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(24_730_000 as RefTimeWeight) + Weight::from_ref_time(24_730_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(341_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -203,58 +203,58 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(44_988_000 as RefTimeWeight) + Weight::from_ref_time(44_988_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_126_000 as u64).saturating_mul(s as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - Weight::from_ref_time(36_768_000 as RefTimeWeight) + Weight::from_ref_time(36_768_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - Weight::from_ref_time(13_474_000 as RefTimeWeight) + Weight::from_ref_time(13_474_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - Weight::from_ref_time(37_720_000 as RefTimeWeight) + Weight::from_ref_time(37_720_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - Weight::from_ref_time(26_848_000 as RefTimeWeight) + Weight::from_ref_time(26_848_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -263,48 +263,48 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - Weight::from_ref_time(16_649_000 as RefTimeWeight) + Weight::from_ref_time(16_649_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(241_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(31_322_000 as RefTimeWeight) + Weight::from_ref_time(31_322_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(252_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(312_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn set_subs_new(s: u32, ) -> Weight { - Weight::from_ref_time(30_012_000 as RefTimeWeight) + Weight::from_ref_time(30_012_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(3_005_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn set_subs_old(p: u32, ) -> Weight { - Weight::from_ref_time(29_623_000 as RefTimeWeight) + Weight::from_ref_time(29_623_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_100_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -313,81 +313,81 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_370_000 as RefTimeWeight) + Weight::from_ref_time(34_370_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(186_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_114_000 as u64).saturating_mul(s as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(189_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_759_000 as RefTimeWeight) + Weight::from_ref_time(34_759_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(251_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(340_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. /// The range of component `x` is `[1, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(32_254_000 as RefTimeWeight) + Weight::from_ref_time(32_254_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(159_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(347_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - Weight::from_ref_time(7_858_000 as RefTimeWeight) + Weight::from_ref_time(7_858_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - Weight::from_ref_time(8_011_000 as RefTimeWeight) + Weight::from_ref_time(8_011_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(187_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - Weight::from_ref_time(7_970_000 as RefTimeWeight) + Weight::from_ref_time(7_970_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. /// The range of component `x` is `[1, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(24_730_000 as RefTimeWeight) + Weight::from_ref_time(24_730_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(r as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(341_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity SubsOf (r:1 w:1) // Storage: Identity IdentityOf (r:1 w:1) @@ -397,57 +397,57 @@ impl WeightInfo for () { /// The range of component `s` is `[1, 100]`. /// The range of component `x` is `[1, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(44_988_000 as RefTimeWeight) + Weight::from_ref_time(44_988_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(r as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_126_000 as u64).saturating_mul(s as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(x as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn add_sub(s: u32, ) -> Weight { - Weight::from_ref_time(36_768_000 as RefTimeWeight) + Weight::from_ref_time(36_768_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - Weight::from_ref_time(13_474_000 as RefTimeWeight) + Weight::from_ref_time(13_474_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - Weight::from_ref_time(37_720_000 as RefTimeWeight) + Weight::from_ref_time(37_720_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 99]`. fn quit_sub(s: u32, ) -> Weight { - Weight::from_ref_time(26_848_000 as RefTimeWeight) + Weight::from_ref_time(26_848_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index acad1e1165d4c..3d20c9a0e614c 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_im_online. @@ -56,13 +56,13 @@ impl WeightInfo for SubstrateWeight { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - Weight::from_ref_time(79_225_000 as RefTimeWeight) + Weight::from_ref_time(79_225_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(41_000 as u64).saturating_mul(k as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(293_000 as u64).saturating_mul(e as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -74,12 +74,12 @@ impl WeightInfo for () { // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - Weight::from_ref_time(79_225_000 as RefTimeWeight) + Weight::from_ref_time(79_225_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(41_000 as u64).saturating_mul(k as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(293_000 as u64).saturating_mul(e as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 2475c86fd499b..64bfa744d9b46 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_indices. @@ -56,35 +56,35 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - Weight::from_ref_time(25_929_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_929_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(32_627_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_627_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - Weight::from_ref_time(26_804_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_804_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(27_390_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(27_390_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(30_973_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(30_973_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -92,34 +92,34 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - Weight::from_ref_time(25_929_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_929_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(32_627_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_627_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - Weight::from_ref_time(26_804_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_804_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(27_390_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(27_390_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(30_973_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(30_973_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index 10eda18e0e24f..bae2e8a89c4e5 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_lottery. @@ -63,30 +63,30 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - Weight::from_ref_time(44_706_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(44_706_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - Weight::from_ref_time(12_556_000 as RefTimeWeight) + Weight::from_ref_time(12_556_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(295_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - Weight::from_ref_time(38_051_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(38_051_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - Weight::from_ref_time(6_910_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(6_910_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - Weight::from_ref_time(53_732_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(53_732_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -105,9 +105,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - Weight::from_ref_time(55_868_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(55_868_000 as u64) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } } @@ -121,30 +121,30 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - Weight::from_ref_time(44_706_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(44_706_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Lottery CallIndices (r:0 w:1) fn set_calls(n: u32, ) -> Weight { - Weight::from_ref_time(12_556_000 as RefTimeWeight) + Weight::from_ref_time(12_556_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(295_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - Weight::from_ref_time(38_051_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(38_051_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - Weight::from_ref_time(6_910_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(6_910_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -152,9 +152,9 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - Weight::from_ref_time(53_732_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(53_732_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) // Storage: Lottery Lottery (r:1 w:1) @@ -163,8 +163,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - Weight::from_ref_time(55_868_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(7 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(55_868_000 as u64) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } } diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index 636f8eba64694..4876b3d13e2e3 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_membership. @@ -61,11 +61,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - Weight::from_ref_time(15_318_000 as RefTimeWeight) + Weight::from_ref_time(15_318_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -73,11 +73,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_005_000 as RefTimeWeight) + Weight::from_ref_time(18_005_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -85,11 +85,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_029_000 as RefTimeWeight) + Weight::from_ref_time(18_029_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -97,11 +97,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_105_000 as RefTimeWeight) + Weight::from_ref_time(18_105_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -109,29 +109,29 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - Weight::from_ref_time(18_852_000 as RefTimeWeight) + Weight::from_ref_time(18_852_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - Weight::from_ref_time(4_869_000 as RefTimeWeight) + Weight::from_ref_time(4_869_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - Weight::from_ref_time(1_593_000 as RefTimeWeight) + Weight::from_ref_time(1_593_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(m as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -142,11 +142,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn add_member(m: u32, ) -> Weight { - Weight::from_ref_time(15_318_000 as RefTimeWeight) + Weight::from_ref_time(15_318_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(51_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -154,11 +154,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn remove_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_005_000 as RefTimeWeight) + Weight::from_ref_time(18_005_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -166,11 +166,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn swap_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_029_000 as RefTimeWeight) + Weight::from_ref_time(18_029_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -178,11 +178,11 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn reset_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_105_000 as RefTimeWeight) + Weight::from_ref_time(18_105_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: TechnicalMembership Members (r:1 w:1) // Storage: TechnicalCommittee Proposals (r:1 w:0) @@ -190,28 +190,28 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn change_key(m: u32, ) -> Weight { - Weight::from_ref_time(18_852_000 as RefTimeWeight) + Weight::from_ref_time(18_852_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn set_prime(m: u32, ) -> Weight { - Weight::from_ref_time(4_869_000 as RefTimeWeight) + Weight::from_ref_time(4_869_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(28_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) fn clear_prime(m: u32, ) -> Weight { - Weight::from_ref_time(1_593_000 as RefTimeWeight) + Weight::from_ref_time(1_593_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(m as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index 455326b71de8c..d8d576775408a 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_multisig. @@ -60,199 +60,199 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn as_multi_threshold_1(_z: u32, ) -> Weight { - Weight::from_ref_time(17_537_000 as RefTimeWeight) + Weight::from_ref_time(17_537_000 as u64) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(36_535_000 as RefTimeWeight) + Weight::from_ref_time(36_535_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_918_000 as RefTimeWeight) + Weight::from_ref_time(39_918_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(25_524_000 as RefTimeWeight) + Weight::from_ref_time(25_524_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(94_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_923_000 as RefTimeWeight) + Weight::from_ref_time(39_923_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(91_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(45_877_000 as RefTimeWeight) + Weight::from_ref_time(45_877_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(146_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - Weight::from_ref_time(34_309_000 as RefTimeWeight) + Weight::from_ref_time(34_309_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - Weight::from_ref_time(22_848_000 as RefTimeWeight) + Weight::from_ref_time(22_848_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - Weight::from_ref_time(63_239_000 as RefTimeWeight) + Weight::from_ref_time(63_239_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(161_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - Weight::from_ref_time(51_254_000 as RefTimeWeight) + Weight::from_ref_time(51_254_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { fn as_multi_threshold_1(_z: u32, ) -> Weight { - Weight::from_ref_time(17_537_000 as RefTimeWeight) + Weight::from_ref_time(17_537_000 as u64) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(36_535_000 as RefTimeWeight) + Weight::from_ref_time(36_535_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_918_000 as RefTimeWeight) + Weight::from_ref_time(39_918_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(95_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) fn as_multi_approve(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(25_524_000 as RefTimeWeight) + Weight::from_ref_time(25_524_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(94_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_923_000 as RefTimeWeight) + Weight::from_ref_time(39_923_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(91_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn as_multi_complete(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(45_877_000 as RefTimeWeight) + Weight::from_ref_time(45_877_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(146_000 as u64).saturating_mul(s as u64)) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(z as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn approve_as_multi_create(s: u32, ) -> Weight { - Weight::from_ref_time(34_309_000 as RefTimeWeight) + Weight::from_ref_time(34_309_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:0) fn approve_as_multi_approve(s: u32, ) -> Weight { - Weight::from_ref_time(22_848_000 as RefTimeWeight) + Weight::from_ref_time(22_848_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) fn approve_as_multi_complete(s: u32, ) -> Weight { - Weight::from_ref_time(63_239_000 as RefTimeWeight) + Weight::from_ref_time(63_239_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(161_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: Multisig Calls (r:1 w:1) fn cancel_as_multi(s: u32, ) -> Weight { - Weight::from_ref_time(51_254_000 as RefTimeWeight) + Weight::from_ref_time(51_254_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/node-authorization/src/weights.rs b/frame/node-authorization/src/weights.rs index f5a816ddaecee..63728c21b65c4 100644 --- a/frame/node-authorization/src/weights.rs +++ b/frame/node-authorization/src/weights.rs @@ -21,7 +21,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; pub trait WeightInfo { diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index f24aeb6a0ee0d..e20394ca668b9 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_nomination_pools. @@ -80,9 +80,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - Weight::from_ref_time(123_947_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(17 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + Weight::from_ref_time(123_947_000 as u64) + .saturating_add(T::DbWeight::get().reads(17 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -94,9 +94,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - Weight::from_ref_time(118_236_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) + Weight::from_ref_time(118_236_000 as u64) + .saturating_add(T::DbWeight::get().reads(14 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -108,18 +108,18 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - Weight::from_ref_time(132_475_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(14 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) + Weight::from_ref_time(132_475_000 as u64) + .saturating_add(T::DbWeight::get().reads(14 as u64)) + .saturating_add(T::DbWeight::get().writes(13 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - Weight::from_ref_time(50_299_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(50_299_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -136,9 +136,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - Weight::from_ref_time(121_254_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(18 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(13 as RefTimeWeight)) + Weight::from_ref_time(121_254_000 as u64) + .saturating_add(T::DbWeight::get().reads(18 as u64)) + .saturating_add(T::DbWeight::get().writes(13 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -146,11 +146,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - Weight::from_ref_time(41_928_000 as RefTimeWeight) + Weight::from_ref_time(41_928_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -162,11 +162,11 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(81_611_000 as RefTimeWeight) + Weight::from_ref_time(81_611_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(7 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -189,9 +189,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(139_849_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(19 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(16 as RefTimeWeight)) + Weight::from_ref_time(139_849_000 as u64) + .saturating_add(T::DbWeight::get().reads(19 as u64)) + .saturating_add(T::DbWeight::get().writes(16 as u64)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -216,9 +216,9 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(126_246_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(22 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(15 as RefTimeWeight)) + Weight::from_ref_time(126_246_000 as u64) + .saturating_add(T::DbWeight::get().reads(22 as u64)) + .saturating_add(T::DbWeight::get().writes(15 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -234,30 +234,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(48_829_000 as RefTimeWeight) + Weight::from_ref_time(48_829_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_204_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(12 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - Weight::from_ref_time(26_761_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_761_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - Weight::from_ref_time(14_519_000 as RefTimeWeight) + Weight::from_ref_time(14_519_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -265,14 +265,14 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - Weight::from_ref_time(6_173_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(6_173_000 as u64) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - Weight::from_ref_time(22_261_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_261_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -283,9 +283,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(47_959_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(47_959_000 as u64) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } } @@ -305,9 +305,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn join() -> Weight { - Weight::from_ref_time(123_947_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(17 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + Weight::from_ref_time(123_947_000 as u64) + .saturating_add(RocksDbWeight::get().reads(17 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -319,9 +319,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - Weight::from_ref_time(118_236_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) + Weight::from_ref_time(118_236_000 as u64) + .saturating_add(RocksDbWeight::get().reads(14 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -333,18 +333,18 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - Weight::from_ref_time(132_475_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(14 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) + Weight::from_ref_time(132_475_000 as u64) + .saturating_add(RocksDbWeight::get().reads(14 as u64)) + .saturating_add(RocksDbWeight::get().writes(13 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - Weight::from_ref_time(50_299_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(50_299_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) @@ -361,9 +361,9 @@ impl WeightInfo for () { // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - Weight::from_ref_time(121_254_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(18 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(13 as RefTimeWeight)) + Weight::from_ref_time(121_254_000 as u64) + .saturating_add(RocksDbWeight::get().reads(18 as u64)) + .saturating_add(RocksDbWeight::get().writes(13 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -371,11 +371,11 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - Weight::from_ref_time(41_928_000 as RefTimeWeight) + Weight::from_ref_time(41_928_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -387,11 +387,11 @@ impl WeightInfo for () { // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(81_611_000 as RefTimeWeight) + Weight::from_ref_time(81_611_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(7 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -414,9 +414,9 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(139_849_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(19 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(16 as RefTimeWeight)) + Weight::from_ref_time(139_849_000 as u64) + .saturating_add(RocksDbWeight::get().reads(19 as u64)) + .saturating_add(RocksDbWeight::get().writes(16 as u64)) } // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) @@ -441,9 +441,9 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(126_246_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(22 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(15 as RefTimeWeight)) + Weight::from_ref_time(126_246_000 as u64) + .saturating_add(RocksDbWeight::get().reads(22 as u64)) + .saturating_add(RocksDbWeight::get().writes(15 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -459,30 +459,30 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(48_829_000 as RefTimeWeight) + Weight::from_ref_time(48_829_000 as u64) // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_204_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - Weight::from_ref_time(26_761_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_761_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - Weight::from_ref_time(14_519_000 as RefTimeWeight) + Weight::from_ref_time(14_519_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: NominationPools MinJoinBond (r:0 w:1) // Storage: NominationPools MaxPoolMembers (r:0 w:1) @@ -490,14 +490,14 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - Weight::from_ref_time(6_173_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(6_173_000 as u64) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - Weight::from_ref_time(22_261_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_261_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) @@ -508,8 +508,8 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(47_959_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(47_959_000 as u64) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } } diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index 0d93611a1f097..ad9e3e569e733 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_preimage. @@ -64,87 +64,87 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(44_380_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_280_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(42_809_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(28_964_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(17_555_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(7_745_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_758_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(18_360_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(7_439_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -153,86 +153,86 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:0) fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(44_380_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_280_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(42_809_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(28_964_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(17_555_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(7_745_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_758_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(18_360_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(7_439_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 5b332d98db09b..2d409d977f875 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_proxy. @@ -61,97 +61,97 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - Weight::from_ref_time(17_768_000 as RefTimeWeight) + Weight::from_ref_time(17_768_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(76_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(35_682_000 as RefTimeWeight) + Weight::from_ref_time(35_682_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(a as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_586_000 as RefTimeWeight) + Weight::from_ref_time(25_586_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(a as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(18_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_794_000 as RefTimeWeight) + Weight::from_ref_time(25_794_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(a as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_002_000 as RefTimeWeight) + Weight::from_ref_time(33_002_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(163_000 as u64).saturating_mul(a as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_166_000 as RefTimeWeight) + Weight::from_ref_time(28_166_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_128_000 as RefTimeWeight) + Weight::from_ref_time(28_128_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - Weight::from_ref_time(24_066_000 as RefTimeWeight) + Weight::from_ref_time(24_066_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - Weight::from_ref_time(31_077_000 as RefTimeWeight) + Weight::from_ref_time(31_077_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - Weight::from_ref_time(24_657_000 as RefTimeWeight) + Weight::from_ref_time(24_657_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -159,96 +159,96 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Proxy Proxies (r:1 w:0) fn proxy(p: u32, ) -> Weight { - Weight::from_ref_time(17_768_000 as RefTimeWeight) + Weight::from_ref_time(17_768_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(76_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn proxy_announced(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(35_682_000 as RefTimeWeight) + Weight::from_ref_time(35_682_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(a as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn remove_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_586_000 as RefTimeWeight) + Weight::from_ref_time(25_586_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(a as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(18_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_794_000 as RefTimeWeight) + Weight::from_ref_time(25_794_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(a as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) fn announce(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_002_000 as RefTimeWeight) + Weight::from_ref_time(33_002_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(163_000 as u64).saturating_mul(a as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn add_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_166_000 as RefTimeWeight) + Weight::from_ref_time(28_166_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_128_000 as RefTimeWeight) + Weight::from_ref_time(28_128_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn remove_proxies(p: u32, ) -> Weight { - Weight::from_ref_time(24_066_000 as RefTimeWeight) + Weight::from_ref_time(24_066_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(81_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) fn anonymous(p: u32, ) -> Weight { - Weight::from_ref_time(31_077_000 as RefTimeWeight) + Weight::from_ref_time(31_077_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) fn kill_anonymous(p: u32, ) -> Weight { - Weight::from_ref_time(24_657_000 as RefTimeWeight) + Weight::from_ref_time(24_657_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 052a9bfdcc9e1..6b6cca52316c9 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -40,7 +40,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_ranked_collective. @@ -61,62 +61,62 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - Weight::from_ref_time(11_000_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(11_000_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - Weight::from_ref_time(16_855_000 as RefTimeWeight) + Weight::from_ref_time(16_855_000 as u64) // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_107_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - Weight::from_ref_time(11_936_000 as RefTimeWeight) + Weight::from_ref_time(11_936_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - Weight::from_ref_time(17_582_000 as RefTimeWeight) + Weight::from_ref_time(17_582_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - Weight::from_ref_time(22_000_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(22_000_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - Weight::from_ref_time(6_188_000 as RefTimeWeight) + Weight::from_ref_time(6_188_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } } @@ -127,61 +127,61 @@ impl WeightInfo for () { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - Weight::from_ref_time(11_000_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(11_000_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn remove_member(r: u32, ) -> Weight { - Weight::from_ref_time(16_855_000 as RefTimeWeight) + Weight::from_ref_time(16_855_000 as u64) // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(r as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_107_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(r as u64))) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn promote_member(r: u32, ) -> Weight { - Weight::from_ref_time(11_936_000 as RefTimeWeight) + Weight::from_ref_time(11_936_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(9_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:1) // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) fn demote_member(r: u32, ) -> Weight { - Weight::from_ref_time(17_582_000 as RefTimeWeight) + Weight::from_ref_time(17_582_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RankedCollective Members (r:1 w:0) // Storage: RankedPolls ReferendumInfoFor (r:1 w:1) // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - Weight::from_ref_time(22_000_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(22_000_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) // Storage: RankedCollective Voting (r:0 w:1) fn cleanup_poll(n: u32, ) -> Weight { - Weight::from_ref_time(6_188_000 as RefTimeWeight) + Weight::from_ref_time(6_188_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } } diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index 06a4759938964..b496f16a46b28 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_recovery. @@ -60,71 +60,71 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - Weight::from_ref_time(6_579_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(6_579_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - Weight::from_ref_time(13_402_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_402_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_217_000 as RefTimeWeight) + Weight::from_ref_time(28_217_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - Weight::from_ref_time(34_082_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(34_082_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(22_038_000 as RefTimeWeight) + Weight::from_ref_time(22_038_000 as u64) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_621_000 as RefTimeWeight) + Weight::from_ref_time(28_621_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(353_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(33_287_000 as RefTimeWeight) + Weight::from_ref_time(33_287_000 as u64) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(31_964_000 as RefTimeWeight) + Weight::from_ref_time(31_964_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(222_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - Weight::from_ref_time(12_702_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(12_702_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -132,70 +132,70 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - Weight::from_ref_time(6_579_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(6_579_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - Weight::from_ref_time(13_402_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(13_402_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:1) fn create_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_217_000 as RefTimeWeight) + Weight::from_ref_time(28_217_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - Weight::from_ref_time(34_082_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(34_082_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn vouch_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(22_038_000 as RefTimeWeight) + Weight::from_ref_time(22_038_000 as u64) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(307_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) fn claim_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_621_000 as RefTimeWeight) + Weight::from_ref_time(28_621_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(353_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) fn close_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(33_287_000 as RefTimeWeight) + Weight::from_ref_time(33_287_000 as u64) // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(264_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) fn remove_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(31_964_000 as RefTimeWeight) + Weight::from_ref_time(31_964_000 as u64) // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(222_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - Weight::from_ref_time(12_702_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(12_702_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index 84a726d9e4fbe..50f8aa41b30aa 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_referenda. @@ -80,205 +80,205 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(34_640_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_640_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - Weight::from_ref_time(44_290_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(44_290_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - Weight::from_ref_time(49_428_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(49_428_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - Weight::from_ref_time(50_076_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(50_076_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - Weight::from_ref_time(55_935_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(55_935_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - Weight::from_ref_time(52_921_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(52_921_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - Weight::from_ref_time(29_160_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(29_160_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - Weight::from_ref_time(34_972_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_972_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - Weight::from_ref_time(60_620_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(60_620_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - Weight::from_ref_time(9_615_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(9_615_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - Weight::from_ref_time(113_077_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(113_077_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - Weight::from_ref_time(114_376_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(114_376_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - Weight::from_ref_time(43_901_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(43_901_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - Weight::from_ref_time(43_279_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(43_279_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - Weight::from_ref_time(45_564_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(45_564_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - Weight::from_ref_time(45_061_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(45_061_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - Weight::from_ref_time(23_757_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(23_757_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - Weight::from_ref_time(24_781_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(24_781_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - Weight::from_ref_time(18_344_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_344_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - Weight::from_ref_time(34_752_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_752_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - Weight::from_ref_time(37_055_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(37_055_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - Weight::from_ref_time(31_442_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(31_442_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - Weight::from_ref_time(33_201_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(33_201_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - Weight::from_ref_time(30_047_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_047_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - Weight::from_ref_time(29_195_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_195_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - Weight::from_ref_time(50_119_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(50_119_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - Weight::from_ref_time(32_203_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_203_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -288,204 +288,204 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(34_640_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_640_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - Weight::from_ref_time(44_290_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(44_290_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - Weight::from_ref_time(49_428_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(49_428_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - Weight::from_ref_time(50_076_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(50_076_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - Weight::from_ref_time(55_935_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(55_935_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - Weight::from_ref_time(52_921_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(52_921_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - Weight::from_ref_time(29_160_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(29_160_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - Weight::from_ref_time(34_972_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_972_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - Weight::from_ref_time(60_620_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(60_620_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - Weight::from_ref_time(9_615_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(9_615_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - Weight::from_ref_time(113_077_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(113_077_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - Weight::from_ref_time(114_376_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(114_376_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - Weight::from_ref_time(43_901_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(43_901_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - Weight::from_ref_time(43_279_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(43_279_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - Weight::from_ref_time(45_564_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(45_564_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - Weight::from_ref_time(45_061_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(45_061_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - Weight::from_ref_time(23_757_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(23_757_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - Weight::from_ref_time(24_781_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(24_781_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - Weight::from_ref_time(18_344_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(18_344_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - Weight::from_ref_time(34_752_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(34_752_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - Weight::from_ref_time(37_055_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(37_055_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - Weight::from_ref_time(31_442_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(31_442_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - Weight::from_ref_time(33_201_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(33_201_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - Weight::from_ref_time(30_047_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(30_047_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - Weight::from_ref_time(29_195_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_195_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - Weight::from_ref_time(50_119_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(50_119_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - Weight::from_ref_time(32_203_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(32_203_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index f33ef47cd67e8..40e9e933aab2b 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_remark. @@ -52,10 +52,10 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - Weight::from_ref_time(13_140_000 as RefTimeWeight) + Weight::from_ref_time(13_140_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } } @@ -63,9 +63,9 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) fn store(l: u32, ) -> Weight { - Weight::from_ref_time(13_140_000 as RefTimeWeight) + Weight::from_ref_time(13_140_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } } diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index 9688c9f1a2f9f..afbcf9373b2de 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. @@ -68,146 +68,146 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as RefTimeWeight) + Weight::from_ref_time(9_994_000 as u64) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as RefTimeWeight) + Weight::from_ref_time(10_318_000 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as RefTimeWeight) + Weight::from_ref_time(11_675_000 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as RefTimeWeight) + Weight::from_ref_time(11_934_000 as u64) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as RefTimeWeight) + Weight::from_ref_time(7_279_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as RefTimeWeight) + Weight::from_ref_time(8_619_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as RefTimeWeight) + Weight::from_ref_time(16_129_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as RefTimeWeight) + Weight::from_ref_time(15_785_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as RefTimeWeight) + Weight::from_ref_time(15_778_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as RefTimeWeight) + Weight::from_ref_time(15_912_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as RefTimeWeight) + Weight::from_ref_time(18_013_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as RefTimeWeight) + Weight::from_ref_time(18_131_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as RefTimeWeight) + Weight::from_ref_time(21_230_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as RefTimeWeight) + Weight::from_ref_time(20_139_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -218,145 +218,145 @@ impl WeightInfo for () { // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as RefTimeWeight) + Weight::from_ref_time(9_994_000 as u64) // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((4 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as RefTimeWeight) + Weight::from_ref_time(10_318_000 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as RefTimeWeight) + Weight::from_ref_time(11_675_000 as u64) // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as RefTimeWeight) + Weight::from_ref_time(11_934_000 as u64) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as RefTimeWeight) + Weight::from_ref_time(7_279_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Preimage PreimageFor (r:1 w:0) fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as RefTimeWeight) + Weight::from_ref_time(8_619_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as RefTimeWeight) + Weight::from_ref_time(16_129_000 as u64) // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:2 w:2) fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as RefTimeWeight) + Weight::from_ref_time(15_785_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as RefTimeWeight) + Weight::from_ref_time(15_778_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as RefTimeWeight) + Weight::from_ref_time(15_912_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as RefTimeWeight) + Weight::from_ref_time(18_013_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as RefTimeWeight) + Weight::from_ref_time(18_131_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as RefTimeWeight) + Weight::from_ref_time(21_230_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as RefTimeWeight) + Weight::from_ref_time(20_139_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index 5208a679c6bb7..6e775d3d6f47a 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_session. @@ -55,17 +55,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - Weight::from_ref_time(48_484_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(48_484_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - Weight::from_ref_time(38_003_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(38_003_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } } @@ -75,16 +75,16 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - Weight::from_ref_time(48_484_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(48_484_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - Weight::from_ref_time(38_003_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(38_003_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 82ab44f428372..09c2d8b5de113 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_staking. @@ -86,9 +86,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(43_992_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(43_992_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -96,9 +96,9 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(75_827_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(7 as RefTimeWeight)) + Weight::from_ref_time(75_827_000 as u64) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(7 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -110,20 +110,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(81_075_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) + Weight::from_ref_time(81_075_000 as u64) + .saturating_add(T::DbWeight::get().reads(12 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(35_763_000 as RefTimeWeight) + Weight::from_ref_time(35_763_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(57_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -139,9 +139,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(66_938_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(13 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(11 as RefTimeWeight)) + Weight::from_ref_time(66_938_000 as u64) + .saturating_add(T::DbWeight::get().reads(13 as u64)) + .saturating_add(T::DbWeight::get().writes(11 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -155,19 +155,19 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(52_943_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(52_943_000 as u64) + .saturating_add(T::DbWeight::get().reads(11 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(23_264_000 as RefTimeWeight) + Weight::from_ref_time(23_264_000 as u64) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_006_000 as u64).saturating_mul(k as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(k as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -181,12 +181,12 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(56_596_000 as RefTimeWeight) + Weight::from_ref_time(56_596_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_644_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(12 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -196,50 +196,50 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(51_117_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(51_117_000 as u64) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(11_223_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(11_223_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(19_826_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(19_826_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(3_789_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_789_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(3_793_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_793_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(3_802_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_802_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(3_762_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_762_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(4_318_000 as RefTimeWeight) + Weight::from_ref_time(4_318_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -255,20 +255,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(65_265_000 as RefTimeWeight) + Weight::from_ref_time(65_265_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_029_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(11 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(903_312_000 as RefTimeWeight) + Weight::from_ref_time(903_312_000 as u64) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_968_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -281,13 +281,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(87_569_000 as RefTimeWeight) + Weight::from_ref_time(87_569_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(10 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(24_232_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(10 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -301,13 +301,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(98_839_000 as RefTimeWeight) + Weight::from_ref_time(98_839_000 as u64) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(34_480_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(11 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -316,11 +316,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(74_865_000 as RefTimeWeight) + Weight::from_ref_time(74_865_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(9 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(8 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(64_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(9 as u64)) + .saturating_add(T::DbWeight::get().writes(8 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -332,12 +332,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(22_829_000 as u64).saturating_mul(e as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(e as u64))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -353,12 +353,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(70_933_000 as RefTimeWeight) + Weight::from_ref_time(70_933_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(12 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_021_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(12 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -379,16 +379,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(213_100_000 as u64).saturating_mul(v as u64)) // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(208 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(31_123_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(208 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -399,25 +399,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(23_745_000 as u64).saturating_mul(v as u64)) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_497_000 as u64).saturating_mul(n as u64)) // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(202 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(20_676_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(202 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_097_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -426,8 +426,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(7_041_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(7_041_000 as u64) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -436,8 +436,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(6_495_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(6_495_000 as u64) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -450,16 +450,16 @@ impl WeightInfo for SubstrateWeight { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(62_014_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(62_014_000 as u64) + .saturating_add(T::DbWeight::get().reads(11 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(12_814_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(12_814_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -472,9 +472,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(43_992_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(43_992_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -482,9 +482,9 @@ impl WeightInfo for () { // Storage: BagsList ListNodes (r:3 w:3) // Storage: BagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(75_827_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(7 as RefTimeWeight)) + Weight::from_ref_time(75_827_000 as u64) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(7 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) @@ -496,20 +496,20 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(81_075_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) + Weight::from_ref_time(81_075_000 as u64) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) + .saturating_add(RocksDbWeight::get().writes(8 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(35_763_000 as RefTimeWeight) + Weight::from_ref_time(35_763_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(57_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) @@ -525,9 +525,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(66_938_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(13 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(11 as RefTimeWeight)) + Weight::from_ref_time(66_938_000 as u64) + .saturating_add(RocksDbWeight::get().reads(13 as u64)) + .saturating_add(RocksDbWeight::get().writes(11 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinValidatorBond (r:1 w:0) @@ -541,19 +541,19 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(52_943_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(5 as RefTimeWeight)) + Weight::from_ref_time(52_943_000 as u64) + .saturating_add(RocksDbWeight::get().reads(11 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(23_264_000 as RefTimeWeight) + Weight::from_ref_time(23_264_000 as u64) // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as RefTimeWeight).saturating_mul(k as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(k as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_006_000 as u64).saturating_mul(k as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(k as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) @@ -567,12 +567,12 @@ impl WeightInfo for () { // Storage: BagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(56_596_000 as RefTimeWeight) + Weight::from_ref_time(56_596_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(3_644_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) @@ -582,50 +582,50 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(51_117_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(8 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(51_117_000 as u64) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(11_223_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(11_223_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(19_826_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(19_826_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(3_789_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_789_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(3_793_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_793_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(3_802_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_802_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(3_762_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(3_762_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(4_318_000 as RefTimeWeight) + Weight::from_ref_time(4_318_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(10_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) @@ -641,20 +641,20 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(65_265_000 as RefTimeWeight) + Weight::from_ref_time(65_265_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_029_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(11 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(903_312_000 as RefTimeWeight) + Weight::from_ref_time(903_312_000 as u64) // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_968_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -667,13 +667,13 @@ impl WeightInfo for () { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(87_569_000 as RefTimeWeight) + Weight::from_ref_time(87_569_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(10 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(24_232_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(10 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:0) @@ -687,13 +687,13 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(98_839_000 as RefTimeWeight) + Weight::from_ref_time(98_839_000 as u64) // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(34_480_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(11 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) @@ -702,11 +702,11 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: BagsList ListBags (r:2 w:2) fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(74_865_000 as RefTimeWeight) + Weight::from_ref_time(74_865_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(9 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(8 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(64_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(9 as u64)) + .saturating_add(RocksDbWeight::get().writes(8 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking HistoryDepth (r:1 w:1) @@ -718,12 +718,12 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as RefTimeWeight).saturating_mul(e as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((7 as RefTimeWeight).saturating_mul(e as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(22_829_000 as u64).saturating_mul(e as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(e as u64))) } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) @@ -739,12 +739,12 @@ impl WeightInfo for () { // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(70_933_000 as RefTimeWeight) + Weight::from_ref_time(70_933_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(12 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_021_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) @@ -765,16 +765,16 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(213_100_000 as u64).saturating_mul(v as u64)) // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(208 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(31_123_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(208 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } // Storage: BagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) @@ -785,25 +785,25 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(23_745_000 as u64).saturating_mul(v as u64)) // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(22_497_000 as u64).saturating_mul(n as u64)) // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(202 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((5 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((4 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(s as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(20_676_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(202 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) } // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(v as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(8_097_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -812,8 +812,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(7_041_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(7_041_000 as u64) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -822,8 +822,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(6_495_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(6_495_000 as u64) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) @@ -836,15 +836,15 @@ impl WeightInfo for () { // Storage: BagsList ListBags (r:1 w:1) // Storage: BagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(62_014_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(11 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(6 as RefTimeWeight)) + Weight::from_ref_time(62_014_000 as u64) + .saturating_add(RocksDbWeight::get().reads(11 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(12_814_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(12_814_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index 1967874157f75..851f4cba077c0 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_state_trie_migration. @@ -59,40 +59,40 @@ impl WeightInfo for SubstrateWeight { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - Weight::from_ref_time(19_019_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_019_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - Weight::from_ref_time(1_874_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_874_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) } fn migrate_custom_top_success() -> Weight { - Weight::from_ref_time(16_381_000 as RefTimeWeight) + Weight::from_ref_time(16_381_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - Weight::from_ref_time(25_966_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_966_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn migrate_custom_child_success() -> Weight { - Weight::from_ref_time(16_712_000 as RefTimeWeight) + Weight::from_ref_time(16_712_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - Weight::from_ref_time(29_885_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(29_885_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -101,39 +101,39 @@ impl WeightInfo for () { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - Weight::from_ref_time(19_019_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_019_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - Weight::from_ref_time(1_874_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) + Weight::from_ref_time(1_874_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) } fn migrate_custom_top_success() -> Weight { - Weight::from_ref_time(16_381_000 as RefTimeWeight) + Weight::from_ref_time(16_381_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - Weight::from_ref_time(25_966_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_966_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn migrate_custom_child_success() -> Weight { - Weight::from_ref_time(16_712_000 as RefTimeWeight) + Weight::from_ref_time(16_712_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - Weight::from_ref_time(29_885_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(29_885_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: unknown [0x6b6579] (r:1 w:1) fn process_top_key(v: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(v as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index c556322b6cf93..3b2a8b3b62fc2 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1376,7 +1376,7 @@ pub mod pallet_prelude { ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, - weights::{DispatchClass, Pays, RefTimeWeight, Weight}, + weights::{DispatchClass, Pays, Weight}, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index dab7f4a2746cf..b4fd896e865dc 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -456,8 +456,8 @@ impl GetDispatchInfo for sp_runtime::testing::TestX /// be updated all together once proof size is accounted for. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct RuntimeDbWeight { - pub read: RefTimeWeight, - pub write: RefTimeWeight, + pub read: u64, + pub write: u64, } impl RuntimeDbWeight { @@ -727,7 +727,7 @@ mod tests { fn f03(_origin) { unimplemented!(); } // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as RefTimeWeight, DispatchClass::Normal, Pays::Yes)] + #[weight = ((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes)] fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } #[weight = (0, DispatchClass::Operational, Pays::Yes)] diff --git a/frame/support/src/weights/weight_v2.rs b/frame/support/src/weights/weight_v2.rs index bcda6358c965f..4bfab36663394 100644 --- a/frame/support/src/weights/weight_v2.rs +++ b/frame/support/src/weights/weight_v2.rs @@ -24,10 +24,6 @@ use sp_runtime::{ use super::*; -/// The unit of measurement for computational time spent when executing runtime logic on reference -/// hardware. -pub type RefTimeWeight = u64; - #[derive( Encode, Decode, @@ -46,27 +42,27 @@ pub type RefTimeWeight = u64; #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Weight { /// The weight of computational time used based on some reference hardware. - ref_time: RefTimeWeight, + ref_time: u64, } impl Weight { /// Set the reference time part of the weight. - pub const fn set_ref_time(mut self, c: RefTimeWeight) -> Self { + pub const fn set_ref_time(mut self, c: u64) -> Self { self.ref_time = c; self } /// Return the reference time part of the weight. - pub const fn ref_time(&self) -> RefTimeWeight { + pub const fn ref_time(&self) -> u64 { self.ref_time } /// Return a mutable reference time part of the weight. - pub fn ref_time_mut(&mut self) -> &mut RefTimeWeight { + pub fn ref_time_mut(&mut self) -> &mut u64 { &mut self.ref_time } - pub const MAX: Self = Self { ref_time: RefTimeWeight::MAX }; + pub const MAX: Self = Self { ref_time: u64::MAX }; /// Get the conservative min of `self` and `other` weight. pub fn min(&self, other: Self) -> Self { @@ -89,7 +85,7 @@ impl Weight { } /// Construct [`Weight`] with reference time weight. - pub const fn from_ref_time(ref_time: RefTimeWeight) -> Self { + pub const fn from_ref_time(ref_time: u64) -> Self { Self { ref_time } } @@ -380,8 +376,8 @@ impl sp_runtime::traits::Printable for Weight { // TODO: Eventually remove these -impl From> for PostDispatchInfo { - fn from(maybe_actual_computation: Option) -> Self { +impl From> for PostDispatchInfo { + fn from(maybe_actual_computation: Option) -> Self { let actual_weight = match maybe_actual_computation { Some(actual_computation) => Some(Weight::zero().set_ref_time(actual_computation)), None => None, @@ -390,8 +386,8 @@ impl From> for PostDispatchInfo { } } -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { let (maybe_actual_time, pays_fee) = post_weight_info; let actual_weight = match maybe_actual_time { Some(actual_time) => Some(Weight::zero().set_ref_time(actual_time)), @@ -401,73 +397,73 @@ impl From<(Option, Pays)> for PostDispatchInfo { } } -impl WeighData for RefTimeWeight { +impl WeighData for u64 { fn weigh_data(&self, _: T) -> Weight { return Weight::zero().set_ref_time(*self) } } -impl ClassifyDispatch for RefTimeWeight { +impl ClassifyDispatch for u64 { fn classify_dispatch(&self, _: T) -> DispatchClass { DispatchClass::Normal } } -impl PaysFee for RefTimeWeight { +impl PaysFee for u64 { fn pays_fee(&self, _: T) -> Pays { Pays::Yes } } -impl WeighData for (RefTimeWeight, DispatchClass, Pays) { +impl WeighData for (u64, DispatchClass, Pays) { fn weigh_data(&self, args: T) -> Weight { return self.0.weigh_data(args) } } -impl ClassifyDispatch for (RefTimeWeight, DispatchClass, Pays) { +impl ClassifyDispatch for (u64, DispatchClass, Pays) { fn classify_dispatch(&self, _: T) -> DispatchClass { self.1 } } -impl PaysFee for (RefTimeWeight, DispatchClass, Pays) { +impl PaysFee for (u64, DispatchClass, Pays) { fn pays_fee(&self, _: T) -> Pays { self.2 } } -impl WeighData for (RefTimeWeight, DispatchClass) { +impl WeighData for (u64, DispatchClass) { fn weigh_data(&self, args: T) -> Weight { return self.0.weigh_data(args) } } -impl ClassifyDispatch for (RefTimeWeight, DispatchClass) { +impl ClassifyDispatch for (u64, DispatchClass) { fn classify_dispatch(&self, _: T) -> DispatchClass { self.1 } } -impl PaysFee for (RefTimeWeight, DispatchClass) { +impl PaysFee for (u64, DispatchClass) { fn pays_fee(&self, _: T) -> Pays { Pays::Yes } } -impl WeighData for (RefTimeWeight, Pays) { +impl WeighData for (u64, Pays) { fn weigh_data(&self, args: T) -> Weight { return self.0.weigh_data(args) } } -impl ClassifyDispatch for (RefTimeWeight, Pays) { +impl ClassifyDispatch for (u64, Pays) { fn classify_dispatch(&self, _: T) -> DispatchClass { DispatchClass::Normal } } -impl PaysFee for (RefTimeWeight, Pays) { +impl PaysFee for (u64, Pays) { fn pays_fee(&self, _: T) -> Pays { self.1 } diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 4d597e24356c7..dc107b345c2b8 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -31,10 +31,7 @@ impl SomeAssociation for u64 { mod pallet_old { use super::SomeAssociation; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - traits::Get, - weights::{RefTimeWeight, Weight}, - Parameter, + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, }; use frame_system::ensure_root; @@ -43,7 +40,7 @@ mod pallet_old { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + SomeAssociation; type Event: From> + Into<::Event>; @@ -78,7 +75,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -116,7 +113,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + SomeAssociation @@ -144,7 +141,7 @@ pub mod pallet { #[pallet::call] impl Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 2fd6833eaa428..428de08864f29 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -23,16 +23,13 @@ use frame_support::traits::{ConstU32, ConstU64}; mod pallet_old { use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - traits::Get, - weights::{RefTimeWeight, Weight}, - Parameter, + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, }; use frame_system::ensure_root; pub trait Config: frame_system::Config { type SomeConst: Get; - type Balance: Parameter + codec::HasCompact + From + Into + Default; + type Balance: Parameter + codec::HasCompact + From + Into + Default; type Event: From> + Into<::Event>; } @@ -65,7 +62,7 @@ mod pallet_old { fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); - #[weight = >::into(new_value.clone())] + #[weight = >::into(new_value.clone())] fn set_dummy(origin, #[compact] new_value: T::Balance) { ensure_root(origin)?; @@ -102,7 +99,7 @@ pub mod pallet { type Balance: Parameter + codec::HasCompact + From - + Into + + Into + Default + MaybeSerializeDeserialize + scale_info::StaticTypeInfo; @@ -129,7 +126,7 @@ pub mod pallet { #[pallet::call] impl, I: 'static> Pallet { - #[pallet::weight(>::into(new_value.clone()))] + #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 8b6c291096d5a..9080c9aad43f2 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for frame_system. @@ -59,44 +59,44 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - Weight::from_ref_time(1_000_000 as RefTimeWeight) + Weight::from_ref_time(1_000_000 as u64) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - Weight::from_ref_time(5_367_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(5_367_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(603_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(513_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_026_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } } @@ -104,43 +104,43 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. fn remark(_b: u32, ) -> Weight { - Weight::from_ref_time(1_000_000 as RefTimeWeight) + Weight::from_ref_time(1_000_000 as u64) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(b as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - Weight::from_ref_time(5_367_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(5_367_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn set_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(603_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `i` is `[1, 1000]`. fn kill_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(513_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `p` is `[1, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(1_026_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index f71a0f753a43e..d51f7c9cfe79d 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_timestamp. @@ -54,12 +54,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - Weight::from_ref_time(8_080_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(8_080_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn on_finalize() -> Weight { - Weight::from_ref_time(2_681_000 as RefTimeWeight) + Weight::from_ref_time(2_681_000 as u64) } } @@ -68,11 +68,11 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - Weight::from_ref_time(8_080_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(8_080_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn on_finalize() -> Weight { - Weight::from_ref_time(2_681_000 as RefTimeWeight) + Weight::from_ref_time(2_681_000 as u64) } } diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 106e5e163f02d..086b1c72b21c0 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_tips. @@ -58,59 +58,59 @@ impl WeightInfo for SubstrateWeight { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - Weight::from_ref_time(30_669_000 as RefTimeWeight) + Weight::from_ref_time(30_669_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - Weight::from_ref_time(28_768_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_768_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - Weight::from_ref_time(20_385_000 as RefTimeWeight) + Weight::from_ref_time(20_385_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(r as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(166_000 as u64).saturating_mul(t as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - Weight::from_ref_time(12_287_000 as RefTimeWeight) + Weight::from_ref_time(12_287_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(363_000 as u64).saturating_mul(t as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - Weight::from_ref_time(45_656_000 as RefTimeWeight) + Weight::from_ref_time(45_656_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(276_000 as u64).saturating_mul(t as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - Weight::from_ref_time(18_525_000 as RefTimeWeight) + Weight::from_ref_time(18_525_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(t as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -119,58 +119,58 @@ impl WeightInfo for () { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) fn report_awesome(r: u32, ) -> Weight { - Weight::from_ref_time(30_669_000 as RefTimeWeight) + Weight::from_ref_time(30_669_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - Weight::from_ref_time(28_768_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(28_768_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) fn tip_new(r: u32, t: u32, ) -> Weight { - Weight::from_ref_time(20_385_000 as RefTimeWeight) + Weight::from_ref_time(20_385_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as RefTimeWeight).saturating_mul(r as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(r as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(166_000 as u64).saturating_mul(t as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) fn tip(t: u32, ) -> Weight { - Weight::from_ref_time(12_287_000 as RefTimeWeight) + Weight::from_ref_time(12_287_000 as u64) // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(363_000 as u64).saturating_mul(t as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn close_tip(t: u32, ) -> Weight { - Weight::from_ref_time(45_656_000 as RefTimeWeight) + Weight::from_ref_time(45_656_000 as u64) // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(276_000 as u64).saturating_mul(t as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn slash_tip(t: u32, ) -> Weight { - Weight::from_ref_time(18_525_000 as RefTimeWeight) + Weight::from_ref_time(18_525_000 as u64) // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as RefTimeWeight).saturating_mul(t as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(t as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 16986203de1e5..8fe58dbf03d2b 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -66,8 +66,7 @@ use frame_support::{ dispatch::DispatchResult, traits::{EstimateCallFee, Get}, weights::{ - DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, RefTimeWeight, - Weight, WeightToFee, + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, WeightToFee, }, }; @@ -559,7 +558,7 @@ where } fn length_to_fee(length: u32) -> BalanceOf { - T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as RefTimeWeight)) + T::LengthToFee::weight_to_fee(&Weight::from_ref_time(length as u64)) } fn weight_to_fee(weight: Weight) -> BalanceOf { diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 04fa4e3048c22..6fd9b1b818df6 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_transaction_storage. @@ -59,11 +59,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -72,9 +72,9 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - Weight::from_ref_time(50_978_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(50_978_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -82,9 +82,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - Weight::from_ref_time(106_990_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(106_990_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -97,11 +97,11 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn store(l: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage Transactions (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) @@ -110,9 +110,9 @@ impl WeightInfo for () { // Storage: TransactionStorage BlockTransactions (r:1 w:1) // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - Weight::from_ref_time(50_978_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(6 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(50_978_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) // Storage: TransactionStorage StoragePeriod (r:1 w:0) @@ -120,8 +120,8 @@ impl WeightInfo for () { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - Weight::from_ref_time(106_990_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(5 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(106_990_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index d2a5bc9f1f258..37c62e0c18c52 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_treasury. @@ -58,51 +58,51 @@ impl WeightInfo for SubstrateWeight { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - Weight::from_ref_time(22_063_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(22_063_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - Weight::from_ref_time(26_473_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(26_473_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - Weight::from_ref_time(29_955_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_955_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(10_786_000 as RefTimeWeight) + Weight::from_ref_time(10_786_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - Weight::from_ref_time(6_647_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(6_647_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - Weight::from_ref_time(25_805_000 as RefTimeWeight) + Weight::from_ref_time(25_805_000 as u64) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(28_473_000 as u64).saturating_mul(p as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(p as u64))) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(p as u64))) } } @@ -111,50 +111,50 @@ impl WeightInfo for () { // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - Weight::from_ref_time(22_063_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(22_063_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - Weight::from_ref_time(26_473_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(26_473_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - Weight::from_ref_time(29_955_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(29_955_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) fn approve_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(10_786_000 as RefTimeWeight) + Weight::from_ref_time(10_786_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - Weight::from_ref_time(6_647_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(6_647_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) fn on_initialize_proposals(p: u32, ) -> Weight { - Weight::from_ref_time(25_805_000 as RefTimeWeight) + Weight::from_ref_time(25_805_000 as u64) // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as RefTimeWeight).saturating_mul(p as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((3 as RefTimeWeight).saturating_mul(p as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(28_473_000 as u64).saturating_mul(p as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(p as u64))) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(p as u64))) } } diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index dae72accd2adc..c7a8a2f6baa0f 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_uniques. @@ -80,16 +80,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(33_075_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(19_528_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -103,192 +103,192 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(m as u64))) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(42_146_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(42_960_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(33_025_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_194_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_397_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_278_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_304_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(28_615_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_943_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(22_583_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(47_520_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(45_316_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(38_391_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(38_023_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_398_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(35_621_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_856_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_098_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(24_076_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_035_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_534_000 as u64) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(45_272_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } } @@ -297,16 +297,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(33_075_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(19_528_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:0) @@ -320,191 +320,191 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as RefTimeWeight).saturating_mul(m as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as RefTimeWeight).saturating_mul(a as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((2 as RefTimeWeight).saturating_mul(n as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(m as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(a as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(m as u64))) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(a as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(42_146_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(42_960_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(33_025_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques Asset (r:100 w:100) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as RefTimeWeight) + Weight::from_ref_time(0 as u64) // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as RefTimeWeight).saturating_mul(i as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes((1 as RefTimeWeight).saturating_mul(i as RefTimeWeight))) + .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_194_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_397_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_278_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_304_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(28_615_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(19_943_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(22_583_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(47_520_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(45_316_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(38_391_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(38_023_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(37_398_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(35_621_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(25_856_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(26_098_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(24_076_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_035_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(1 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(1 as RefTimeWeight)) + Weight::from_ref_time(22_534_000 as u64) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques ItemPriceOf (r:1 w:1) // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + Weight::from_ref_time(45_272_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index 1df9c2e67cc1f..b88319f2597c9 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -41,7 +41,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_utility. @@ -58,27 +58,27 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - Weight::from_ref_time(23_113_000 as RefTimeWeight) + Weight::from_ref_time(23_113_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_701_000 as u64).saturating_mul(c as u64)) } fn as_derivative() -> Weight { - Weight::from_ref_time(4_182_000 as RefTimeWeight) + Weight::from_ref_time(4_182_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - Weight::from_ref_time(18_682_000 as RefTimeWeight) + Weight::from_ref_time(18_682_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_794_000 as u64).saturating_mul(c as u64)) } fn dispatch_as() -> Weight { - Weight::from_ref_time(12_049_000 as RefTimeWeight) + Weight::from_ref_time(12_049_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - Weight::from_ref_time(19_136_000 as RefTimeWeight) + Weight::from_ref_time(19_136_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_697_000 as u64).saturating_mul(c as u64)) } } @@ -86,26 +86,26 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - Weight::from_ref_time(23_113_000 as RefTimeWeight) + Weight::from_ref_time(23_113_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_701_000 as u64).saturating_mul(c as u64)) } fn as_derivative() -> Weight { - Weight::from_ref_time(4_182_000 as RefTimeWeight) + Weight::from_ref_time(4_182_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - Weight::from_ref_time(18_682_000 as RefTimeWeight) + Weight::from_ref_time(18_682_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_794_000 as u64).saturating_mul(c as u64)) } fn dispatch_as() -> Weight { - Weight::from_ref_time(12_049_000 as RefTimeWeight) + Weight::from_ref_time(12_049_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - Weight::from_ref_time(19_136_000 as RefTimeWeight) + Weight::from_ref_time(19_136_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as RefTimeWeight).saturating_mul(c as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(2_697_000 as u64).saturating_mul(c as u64)) } } diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 5988b0bd47d54..567d2fef74130 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_vesting. @@ -60,96 +60,96 @@ impl WeightInfo for SubstrateWeight { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_978_000 as RefTimeWeight) + Weight::from_ref_time(32_978_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(82_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_856_000 as RefTimeWeight) + Weight::from_ref_time(32_856_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_522_000 as RefTimeWeight) + Weight::from_ref_time(33_522_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(72_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_558_000 as RefTimeWeight) + Weight::from_ref_time(32_558_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_260_000 as RefTimeWeight) + Weight::from_ref_time(49_260_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(l as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_166_000 as RefTimeWeight) + Weight::from_ref_time(49_166_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(l as u64)) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(43_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(34_042_000 as RefTimeWeight) + Weight::from_ref_time(34_042_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(83_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_937_000 as RefTimeWeight) + Weight::from_ref_time(33_937_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } } @@ -158,95 +158,95 @@ impl WeightInfo for () { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_978_000 as RefTimeWeight) + Weight::from_ref_time(32_978_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(82_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vest_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_856_000 as RefTimeWeight) + Weight::from_ref_time(32_856_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_522_000 as RefTimeWeight) + Weight::from_ref_time(33_522_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(74_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(72_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_558_000 as RefTimeWeight) + Weight::from_ref_time(32_558_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(61_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_260_000 as RefTimeWeight) + Weight::from_ref_time(49_260_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(l as u64)) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_166_000 as RefTimeWeight) + Weight::from_ref_time(49_166_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(l as u64)) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(4 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(4 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(43_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(34_042_000 as RefTimeWeight) + Weight::from_ref_time(34_042_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(83_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_937_000 as RefTimeWeight) + Weight::from_ref_time(33_937_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as RefTimeWeight).saturating_mul(l as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as RefTimeWeight).saturating_mul(s as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } } diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index 88d1dedb2de47..ca474d5e55634 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -39,7 +39,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight, constants::RocksDbWeight}}; +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; /// Weight functions needed for pallet_whitelist. @@ -56,35 +56,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - Weight::from_ref_time(20_938_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(20_938_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - Weight::from_ref_time(22_332_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(22_332_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - Weight::from_ref_time(5_989_917_000 as RefTimeWeight) - .saturating_add(T::DbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(5_989_917_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - Weight::from_ref_time(25_325_000 as RefTimeWeight) + Weight::from_ref_time(25_325_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(T::DbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(T::DbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } } @@ -93,34 +93,34 @@ impl WeightInfo for () { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - Weight::from_ref_time(20_938_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(2 as RefTimeWeight)) + Weight::from_ref_time(20_938_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - Weight::from_ref_time(22_332_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(22_332_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - Weight::from_ref_time(5_989_917_000 as RefTimeWeight) - .saturating_add(RocksDbWeight::get().reads(3 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + Weight::from_ref_time(5_989_917_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - Weight::from_ref_time(25_325_000 as RefTimeWeight) + Weight::from_ref_time(25_325_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as RefTimeWeight).saturating_mul(n as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().reads(2 as RefTimeWeight)) - .saturating_add(RocksDbWeight::get().writes(3 as RefTimeWeight)) + .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } } diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index f6be16cf69ac9..6e016c47ef010 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -15,7 +15,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{RefTimeWeight, Weight}}; +use frame_support::{traits::Get, weights::{Weight}}; use sp_std::marker::PhantomData; /// Weight functions for `{{pallet}}`. @@ -33,22 +33,22 @@ impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { - Weight::from_ref_time({{underscore benchmark.base_weight}} as RefTimeWeight) + Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} - .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight)) + .saturating_add(Weight::from_ref_time({{underscore cw.slope}} as u64).saturating_mul({{cw.name}} as u64)) {{/each}} {{#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as RefTimeWeight)) + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as u64)) {{/if}} {{#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as RefTimeWeight).saturating_mul({{cr.name}} as RefTimeWeight))) + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as u64).saturating_mul({{cr.name}} as u64))) {{/each}} {{#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as RefTimeWeight)) + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as u64)) {{/if}} {{#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as RefTimeWeight).saturating_mul({{cw.name}} as RefTimeWeight))) + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as u64).saturating_mul({{cw.name}} as u64))) {{/each}} } {{/each}} From 1c0d0083c2d3105c32b4fb7331f474ec2f0510dc Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Fri, 2 Sep 2022 21:33:26 +0000 Subject: [PATCH 088/348] Add benchmarking support for digest items (#12159) * Add benchmarking support for digest items * fmt --- bin/node-template/node/src/command.rs | 10 ++++++++-- bin/node/cli/src/command.rs | 15 +++++++++++++-- .../frame/benchmarking-cli/src/extrinsic/bench.rs | 8 +++++--- utils/frame/benchmarking-cli/src/extrinsic/cmd.rs | 5 +++-- utils/frame/benchmarking-cli/src/overhead/cmd.rs | 5 +++-- 5 files changed, 32 insertions(+), 11 deletions(-) diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 142f0b40c325e..f8f02ed8fc2dd 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -139,7 +139,13 @@ pub fn run() -> sc_cli::Result<()> { let PartialComponents { client, .. } = service::new_partial(&config)?; let ext_builder = RemarkBuilder::new(client.clone()); - cmd.run(config, client, inherent_benchmark_data()?, &ext_builder) + cmd.run( + config, + client, + inherent_benchmark_data()?, + Vec::new(), + &ext_builder, + ) }, BenchmarkCmd::Extrinsic(cmd) => { let PartialComponents { client, .. } = service::new_partial(&config)?; @@ -153,7 +159,7 @@ pub fn run() -> sc_cli::Result<()> { )), ]); - cmd.run(client, inherent_benchmark_data()?, &ext_factory) + cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 1d88e9b539421..79abe8975f3b6 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -132,7 +132,13 @@ pub fn run() -> Result<()> { let partial = new_partial(&config)?; let ext_builder = RemarkBuilder::new(partial.client.clone()); - cmd.run(config, partial.client, inherent_benchmark_data()?, &ext_builder) + cmd.run( + config, + partial.client, + inherent_benchmark_data()?, + Vec::new(), + &ext_builder, + ) }, BenchmarkCmd::Extrinsic(cmd) => { // ensure that we keep the task manager alive @@ -147,7 +153,12 @@ pub fn run() -> Result<()> { )), ]); - cmd.run(partial.client, inherent_benchmark_data()?, &ext_factory) + cmd.run( + partial.client, + inherent_benchmark_data()?, + Vec::new(), + &ext_factory, + ) }, BenchmarkCmd::Machine(cmd) => cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 27fc40fb34774..5dcf05956d42c 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -28,7 +28,7 @@ use sp_blockchain::{ use sp_runtime::{ traits::{Block as BlockT, Zero}, transaction_validity::{InvalidTransaction, TransactionValidityError}, - OpaqueExtrinsic, + Digest, DigestItem, OpaqueExtrinsic, }; use clap::Args; @@ -65,6 +65,7 @@ pub(crate) struct Benchmark { client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, + digest_items: Vec, _p: PhantomData<(Block, BA)>, } @@ -80,8 +81,9 @@ where client: Arc, params: BenchmarkParams, inherent_data: sp_inherents::InherentData, + digest_items: Vec, ) -> Self { - Self { client, params, inherent_data, _p: PhantomData } + Self { client, params, inherent_data, digest_items, _p: PhantomData } } /// Benchmark a block with only inherents. @@ -125,7 +127,7 @@ where &self, ext_builder: Option<&dyn ExtrinsicBuilder>, ) -> Result<(Block, Option)> { - let mut builder = self.client.new_block(Default::default())?; + let mut builder = self.client.new_block(Digest { logs: self.digest_items.clone() })?; // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; for inherent in inherents { diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 6b4fd0fad6638..2c94a6be50255 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -19,7 +19,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::Backend as ClientBackend; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; +use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; use clap::{Args, Parser}; use log::info; @@ -82,6 +82,7 @@ impl ExtrinsicCmd { &self, client: Arc, inherent_data: sp_inherents::InherentData, + digest_items: Vec, ext_factory: &ExtrinsicFactory, ) -> Result<()> where @@ -109,7 +110,7 @@ impl ExtrinsicCmd { return Err("Unknown pallet or extrinsic. Use --list for a complete list.".into()), }; - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); let stats = bench.bench_extrinsic(ext_builder)?; info!( "Executing a {}::{} extrinsic takes[ns]:\n{:?}", diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 28ceea63f1572..4d0e2e52df17d 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -23,7 +23,7 @@ use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::Backend as ClientBackend; use sc_service::Configuration; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; +use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; use clap::{Args, Parser}; use log::info; @@ -90,6 +90,7 @@ impl OverheadCmd { cfg: Configuration, client: Arc, inherent_data: sp_inherents::InherentData, + digest_items: Vec, ext_builder: &dyn ExtrinsicBuilder, ) -> Result<()> where @@ -101,7 +102,7 @@ impl OverheadCmd { if ext_builder.pallet() != "system" || ext_builder.extrinsic() != "remark" { return Err(format!("The extrinsic builder is required to build `System::Remark` extrinsics but builds `{}` extrinsics instead", ext_builder.name()).into()); } - let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data); + let bench = Benchmark::new(client, self.params.bench.clone(), inherent_data, digest_items); // per-block execution overhead { From 4b7911607b48a5ca7ebf1d655f12d91cdd241866 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sat, 3 Sep 2022 10:03:36 +0200 Subject: [PATCH 089/348] Emit events for inter contract calls (#12136) * Add topics to contract events * Add `Call` events * Fix compilation for no_std * Added docs --- frame/contracts/src/exec.rs | 143 +++++++++++++++--------- frame/contracts/src/lib.rs | 50 ++++++++- frame/contracts/src/tests.rs | 147 ++++++++++++++++++++----- frame/contracts/src/wasm/code_cache.rs | 10 +- 4 files changed, 266 insertions(+), 84 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 81e1aef92cbfc..4ae336a907001 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -34,7 +34,7 @@ use pallet_contracts_primitives::ExecReturnValue; use smallvec::{Array, SmallVec}; use sp_core::{crypto::UncheckedFrom, ecdsa::Public as ECDSAPublic}; use sp_io::{crypto::secp256k1_ecdsa_recover_compressed, hashing::blake2_256}; -use sp_runtime::traits::Convert; +use sp_runtime::traits::{Convert, Hash}; use sp_std::{marker::PhantomData, mem, prelude::*}; pub type AccountIdOf = ::AccountId; @@ -784,16 +784,19 @@ where /// /// This can be either a call or an instantiate. fn run(&mut self, executable: E, input_data: Vec) -> Result { - let entry_point = self.top_frame().entry_point; + let frame = self.top_frame(); + let entry_point = frame.entry_point; + let delegated_code_hash = + if frame.delegate_caller.is_some() { Some(*executable.code_hash()) } else { None }; let do_transaction = || { // We need to charge the storage deposit before the initial transfer so that // it can create the account in case the initial transfer is < ed. if entry_point == ExportedFunction::Constructor { - let top_frame = top_frame_mut!(self); - top_frame.nested_storage.charge_instantiate( + let frame = top_frame_mut!(self); + frame.nested_storage.charge_instantiate( &self.origin, - &top_frame.account_id, - top_frame.contract_info.get(&top_frame.account_id), + &frame.account_id, + frame.contract_info.get(&frame.account_id), )?; } @@ -805,23 +808,42 @@ where .execute(self, &entry_point, input_data) .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; - // Additional work needs to be performed in case of an instantiation. - if !output.did_revert() && entry_point == ExportedFunction::Constructor { - let frame = self.top_frame(); - - // It is not allowed to terminate a contract inside its constructor. - if matches!(frame.contract_info, CachedContract::Terminated) { - return Err(Error::::TerminatedInConstructor.into()) - } + // Avoid useless work that would be reverted anyways. + if output.did_revert() { + return Ok(output) + } - // Deposit an instantiation event. - deposit_event::( - vec![], - Event::Instantiated { - deployer: self.caller().clone(), - contract: frame.account_id.clone(), - }, - ); + let frame = self.top_frame(); + let account_id = &frame.account_id; + match (entry_point, delegated_code_hash) { + (ExportedFunction::Constructor, _) => { + // It is not allowed to terminate a contract inside its constructor. + if matches!(frame.contract_info, CachedContract::Terminated) { + return Err(Error::::TerminatedInConstructor.into()) + } + + // Deposit an instantiation event. + Contracts::::deposit_event( + vec![T::Hashing::hash_of(self.caller()), T::Hashing::hash_of(account_id)], + Event::Instantiated { + deployer: self.caller().clone(), + contract: account_id.clone(), + }, + ); + }, + (ExportedFunction::Call, Some(code_hash)) => { + Contracts::::deposit_event( + vec![T::Hashing::hash_of(account_id), T::Hashing::hash_of(&code_hash)], + Event::DelegateCalled { contract: account_id.clone(), code_hash }, + ); + }, + (ExportedFunction::Call, None) => { + let caller = self.caller(); + Contracts::::deposit_event( + vec![T::Hashing::hash_of(caller), T::Hashing::hash_of(account_id)], + Event::Called { caller: caller.clone(), contract: account_id.clone() }, + ); + }, } Ok(output) @@ -850,6 +872,7 @@ where // has changed. Err(error) => (false, Err(error.into())), }; + self.pop_frame(success); output } @@ -1134,10 +1157,13 @@ where )?; ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash); - Contracts::::deposit_event(Event::Terminated { - contract: frame.account_id.clone(), - beneficiary: beneficiary.clone(), - }); + Contracts::::deposit_event( + vec![T::Hashing::hash_of(&frame.account_id), T::Hashing::hash_of(&beneficiary)], + Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }, + ); Ok(()) } @@ -1242,7 +1268,7 @@ where } fn deposit_event(&mut self, topics: Vec, data: Vec) { - deposit_event::( + Contracts::::deposit_event( topics, Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); @@ -1304,22 +1330,18 @@ where let prev_hash = top_frame.contract_info().code_hash; E::remove_user(prev_hash); top_frame.contract_info().code_hash = hash; - Contracts::::deposit_event(Event::ContractCodeUpdated { - contract: top_frame.account_id.clone(), - new_code_hash: hash, - old_code_hash: prev_hash, - }); + Contracts::::deposit_event( + vec![T::Hashing::hash_of(&top_frame.account_id), hash, prev_hash], + Event::ContractCodeUpdated { + contract: top_frame.account_id.clone(), + new_code_hash: hash, + old_code_hash: prev_hash, + }, + ); Ok(()) } } -fn deposit_event(topics: Vec, event: Event) { - >::deposit_event_indexed( - &topics, - ::Event::from(event).into(), - ) -} - mod sealing { use super::*; @@ -1347,7 +1369,7 @@ mod tests { gas::GasMeter, storage::Storage, tests::{ - test_utils::{get_balance, place_contract, set_balance}, + test_utils::{get_balance, hash, place_contract, set_balance}, Call, Event as MetaEvent, ExtBuilder, Test, TestFilter, ALICE, BOB, CHARLIE, GAS_LIMIT, }, Error, @@ -2237,7 +2259,10 @@ mod tests { ); assert_eq!( &events(), - &[Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }] + &[ + Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }, + Event::Called { caller: ALICE, contract: BOB }, + ] ); }); } @@ -2289,7 +2314,7 @@ mod tests { // The contract wasn't instantiated so we don't expect to see an instantiation // event here. - assert_eq!(&events(), &[]); + assert_eq!(&events(), &[Event::Called { caller: ALICE, contract: BOB },]); }); } @@ -2591,14 +2616,24 @@ mod tests { let remark_hash = ::Hashing::hash(b"Hello World"); assert_eq!( System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: MetaEvent::System(frame_system::Event::Remarked { - sender: BOB, - hash: remark_hash - }), - topics: vec![], - },] + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::System(frame_system::Event::Remarked { + sender: BOB, + hash: remark_hash + }), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Contracts(crate::Event::Called { + caller: ALICE, + contract: BOB, + }), + topics: vec![hash(&ALICE), hash(&BOB)], + }, + ] ); }); } @@ -2684,6 +2719,14 @@ mod tests { },), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Contracts(crate::Event::Called { + caller: ALICE, + contract: BOB, + }), + topics: vec![hash(&ALICE), hash(&BOB)], + }, ] ); }); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 7fda4bca36d2e..3c63ad86016f3 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -631,11 +631,14 @@ pub mod pallet { }; >::add_user(code_hash)?; >::remove_user(contract.code_hash); - Self::deposit_event(Event::ContractCodeUpdated { - contract: dest.clone(), - new_code_hash: code_hash, - old_code_hash: contract.code_hash, - }); + Self::deposit_event( + vec![T::Hashing::hash_of(&dest), code_hash, contract.code_hash], + Event::ContractCodeUpdated { + contract: dest.clone(), + new_code_hash: code_hash, + old_code_hash: contract.code_hash, + }, + ); contract.code_hash = code_hash; Ok(()) }) @@ -643,7 +646,6 @@ pub mod pallet { } #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Contract deployed by address at the specified address. Instantiated { deployer: T::AccountId, contract: T::AccountId }, @@ -685,6 +687,35 @@ pub mod pallet { /// Previous code hash of the contract. old_code_hash: T::Hash, }, + + /// A contract was called either by a plain account or another contract. + /// + /// # Note + /// + /// Please keep in mind that like all events this is only emitted for successful + /// calls. This is because on failure all storage changes including events are + /// rolled back. + Called { + /// The account that called the `contract`. + caller: T::AccountId, + /// The contract that was called. + contract: T::AccountId, + }, + + /// A contract delegate called a code hash. + /// + /// # Note + /// + /// Please keep in mind that like all events this is only emitted for successful + /// calls. This is because on failure all storage changes including events are + /// rolled back. + DelegateCalled { + /// The contract that performed the delegate call and hence in whose context + /// the `code_hash` is executed. + contract: T::AccountId, + /// The code hash that was delegate called. + code_hash: CodeHash, + }, } #[pallet::error] @@ -1084,4 +1115,11 @@ where }; InternalInstantiateOutput { result: try_exec(), gas_meter, storage_deposit } } + + fn deposit_event(topics: Vec, event: Event) { + >::deposit_event_indexed( + &topics, + ::Event::from(event).into(), + ) + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f4fa6bb3d72db..f65f845f7b9fe 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use self::test_utils::hash; use crate::{ chain_extension::{ ChainExtension, Environment, Ext, InitState, RegisteredChainExtension, @@ -74,8 +75,9 @@ frame_support::construct_runtime!( #[macro_use] pub mod test_utils { - use super::{Balances, Test}; + use super::{Balances, Hash, SysConfig, Test}; use crate::{exec::AccountIdOf, storage::Storage, CodeHash, Config, ContractInfoOf, Nonce}; + use codec::Encode; use frame_support::traits::Currency; pub fn place_contract(address: &AccountIdOf, code_hash: CodeHash) { @@ -95,6 +97,9 @@ pub mod test_utils { pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } + pub fn hash(s: &S) -> <::Hashing as Hash>::Output { + <::Hashing as Hash>::hash_of(s) + } macro_rules! assert_return_code { ( $x:expr , $y:expr $(,)? ) => {{ assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); @@ -571,7 +576,7 @@ fn instantiate_and_call_and_deposit_event() { deployer: ALICE, contract: addr.clone() }), - topics: vec![], + topics: vec![hash(&ALICE), hash(&addr)], }, ] ); @@ -857,7 +862,7 @@ fn deploy_and_call_other_contract() { deployer: caller_addr.clone(), contract: callee_addr.clone(), }), - topics: vec![], + topics: vec![hash(&caller_addr), hash(&callee_addr)], }, EventRecord { phase: Phase::Initialization, @@ -868,6 +873,22 @@ fn deploy_and_call_other_contract() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: caller_addr.clone(), + contract: callee_addr.clone(), + }), + topics: vec![hash(&caller_addr), hash(&callee_addr)], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: caller_addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&caller_addr)], + }, ] ); }); @@ -1067,6 +1088,14 @@ fn cannot_self_destruct_by_refund_after_slash() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, EventRecord { phase: Phase::Initialization, event: Event::Balances(pallet_balances::Event::ReserveRepatriated { @@ -1174,7 +1203,15 @@ fn self_destruct_works() { contract: addr.clone(), beneficiary: DJANGO }), - topics: vec![], + topics: vec![hash(&addr), hash(&DJANGO)], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], }, EventRecord { phase: Phase::Initialization, @@ -2621,7 +2658,7 @@ fn upload_code_works() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![], + topics: vec![code_hash], }, ] ); @@ -2700,7 +2737,7 @@ fn remove_code_works() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![], + topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, @@ -2713,7 +2750,7 @@ fn remove_code_works() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeRemoved { code_hash }), - topics: vec![], + topics: vec![code_hash], }, ] ); @@ -2755,7 +2792,7 @@ fn remove_code_wrong_origin() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![], + topics: vec![code_hash], }, ] ); @@ -2886,7 +2923,7 @@ fn instantiate_with_zero_balance_works() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![], + topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, @@ -2894,7 +2931,7 @@ fn instantiate_with_zero_balance_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![], + topics: vec![hash(&ALICE), hash(&addr)], }, ] ); @@ -2986,7 +3023,7 @@ fn instantiate_with_below_existential_deposit_works() { EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeStored { code_hash }), - topics: vec![], + topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, @@ -2994,7 +3031,7 @@ fn instantiate_with_below_existential_deposit_works() { deployer: ALICE, contract: addr.clone(), }), - topics: vec![], + topics: vec![hash(&ALICE), hash(&addr)], }, ] ); @@ -3074,6 +3111,14 @@ fn storage_deposit_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, EventRecord { phase: Phase::Initialization, event: Event::Balances(pallet_balances::Event::Transfer { @@ -3091,6 +3136,14 @@ fn storage_deposit_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, EventRecord { phase: Phase::Initialization, event: Event::Balances(pallet_balances::Event::Transfer { @@ -3108,6 +3161,14 @@ fn storage_deposit_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, EventRecord { phase: Phase::Initialization, event: Event::Balances(pallet_balances::Event::ReserveRepatriated { @@ -3193,11 +3254,11 @@ fn set_code_extrinsic() { vec![EventRecord { phase: Phase::Initialization, event: Event::Contracts(pallet_contracts::Event::ContractCodeUpdated { - contract: addr, + contract: addr.clone(), new_code_hash, old_code_hash: code_hash, }), - topics: vec![], + topics: vec![hash(&addr), new_code_hash, code_hash], },] ); }); @@ -3226,7 +3287,7 @@ fn call_after_killed_account_needs_funding() { // Destroy the account of the contract by slashing. // Slashing can actually happen if the contract takes part in staking. - // It is a corner case and we except the destruction of the account. + // It is a corner case and we accept the destruction of the account. let _ = ::Currency::slash( &addr, ::Currency::total_balance(&addr), @@ -3284,6 +3345,14 @@ fn call_after_killed_account_needs_funding() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, EventRecord { phase: Phase::Initialization, event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), @@ -3306,6 +3375,14 @@ fn call_after_killed_account_needs_funding() { }), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&addr)], + }, ] ); }); @@ -3454,6 +3531,8 @@ fn set_code_hash() { // upload new code assert_ok!(Contracts::upload_code(Origin::signed(ALICE), new_wasm.clone(), None)); + System::reset_events(); + // First call sets new code_hash and returns 1 let result = Contracts::bare_call( ALICE, @@ -3477,16 +3556,34 @@ fn set_code_hash() { // Checking for the last event only assert_eq!( - System::events().pop().unwrap(), - EventRecord { - phase: Phase::Initialization, - event: Event::Contracts(crate::Event::ContractCodeUpdated { - contract: contract_addr.clone(), - new_code_hash, - old_code_hash: code_hash, - }), - topics: vec![], - }, + &System::events(), + &[ + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::ContractCodeUpdated { + contract: contract_addr.clone(), + new_code_hash, + old_code_hash: code_hash, + }), + topics: vec![hash(&contract_addr), new_code_hash, code_hash], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: contract_addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&contract_addr)], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Called { + caller: ALICE, + contract: contract_addr.clone(), + }), + topics: vec![hash(&ALICE), hash(&contract_addr)], + }, + ], ); }); } diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index d482d5c4bbf3f..eb5551e342bca 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -42,6 +42,7 @@ use frame_support::{ }; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::BadOrigin; +use sp_std::vec; /// Put the instrumented module in storage. /// @@ -96,7 +97,7 @@ where >::insert(&code_hash, orig_code); >::insert(&code_hash, owner_info); *existing = Some(module); - >::deposit_event(Event::CodeStored { code_hash }); + >::deposit_event(vec![code_hash], Event::CodeStored { code_hash }); Ok(()) }, }) @@ -133,7 +134,10 @@ pub fn increment_refcount(code_hash: CodeHash) -> Result<(), Dispa } /// Try to remove code together with all associated information. -pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult { +pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> DispatchResult +where + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ >::try_mutate_exists(&code_hash, |existing| { if let Some(owner_info) = existing { ensure!(owner_info.refcount == 0, >::CodeInUse); @@ -142,7 +146,7 @@ pub fn try_remove(origin: &T::AccountId, code_hash: CodeHash) -> D *existing = None; >::remove(&code_hash); >::remove(&code_hash); - >::deposit_event(Event::CodeRemoved { code_hash }); + >::deposit_event(vec![code_hash], Event::CodeRemoved { code_hash }); Ok(()) } else { Err(>::CodeNotFound.into()) From 78bff0efb83b8b3cff1d09561c335c9b76504066 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Sat, 3 Sep 2022 23:34:47 +0300 Subject: [PATCH 090/348] Use custom type for ProtocolName (#12172) * Add ProtocolName custom type * Use new ProtocolName in sc_network_common * Use new ProtocolName in sc_network * Use new ProtocolName for BEEFY and GRANDPA * Use new ProtocolName for notifications * Use new ProtocolName in sc_network (part 2) * Use new ProtocolName in sc_network_gossip * Use new ProtocolName in sc_offchain * Remove unused imports * Some more fixes * Add tests * Fix minor import issues * Re-export ProtocolName in sc_network * Revert "Re-export ProtocolName in sc_network" This reverts commit 8d8ff71927e7750757f29c9bbd88dc0ba181d214. * Re-export ProtocolName in sc_network * Remove dependency on sc-network-common from beefy-gadget --- client/beefy/src/lib.rs | 8 +- .../finality-grandpa/src/communication/mod.rs | 3 +- .../src/communication/tests.rs | 22 +-- client/finality-grandpa/src/lib.rs | 5 +- client/network-gossip/src/bridge.rs | 38 ++---- client/network-gossip/src/lib.rs | 9 +- client/network-gossip/src/state_machine.rs | 37 ++--- client/network/common/src/config.rs | 4 +- client/network/common/src/protocol.rs | 127 ++++++++++++++++++ client/network/common/src/protocol/event.rs | 10 +- .../network/common/src/request_responses.rs | 7 +- client/network/common/src/service.rs | 44 +++--- client/network/src/behaviour.rs | 20 +-- client/network/src/config.rs | 11 +- client/network/src/error.rs | 5 +- client/network/src/lib.rs | 5 +- client/network/src/protocol.rs | 26 ++-- .../src/protocol/notifications/behaviour.rs | 9 +- .../src/protocol/notifications/handler.rs | 9 +- .../notifications/upgrade/notifications.rs | 65 +++------ client/network/src/request_responses.rs | 29 ++-- client/network/src/service.rs | 50 +++---- client/network/src/service/tests.rs | 63 +++++---- client/network/src/transactions.rs | 12 +- client/network/test/src/lib.rs | 4 +- client/offchain/src/api.rs | 19 ++- client/offchain/src/lib.rs | 20 ++- 27 files changed, 381 insertions(+), 280 deletions(-) diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index bdf44e056786c..cdb7fca10320d 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -20,6 +20,7 @@ use beefy_primitives::{BeefyApi, MmrRootHash}; use prometheus::Registry; use sc_client_api::{Backend, BlockchainEvents, Finalizer}; use sc_consensus::BlockImport; +use sc_network::ProtocolName; use sc_network_gossip::Network as GossipNetwork; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -55,6 +56,7 @@ pub use beefy_protocol_name::standard_name as protocol_standard_name; pub(crate) mod beefy_protocol_name { use sc_chain_spec::ChainSpec; + use sc_network::ProtocolName; const NAME: &str = "/beefy/1"; /// Old names for the notifications protocol, used for backward compatibility. @@ -66,7 +68,7 @@ pub(crate) mod beefy_protocol_name { pub fn standard_name>( genesis_hash: &Hash, chain_spec: &Box, - ) -> std::borrow::Cow<'static, str> { + ) -> ProtocolName { let chain_prefix = match chain_spec.fork_id() { Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), None => format!("/{}", hex::encode(genesis_hash)), @@ -79,7 +81,7 @@ pub(crate) mod beefy_protocol_name { /// [`sc_network::config::NetworkConfiguration::extra_sets`]. /// For standard protocol name see [`beefy_protocol_name::standard_name`]. pub fn beefy_peers_set_config( - protocol_name: std::borrow::Cow<'static, str>, + protocol_name: ProtocolName, ) -> sc_network::config::NonDefaultSetConfig { let mut cfg = sc_network::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); @@ -202,7 +204,7 @@ where /// Prometheus metric registry pub prometheus_registry: Option, /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. - pub protocol_name: std::borrow::Cow<'static, str>, + pub protocol_name: ProtocolName, /// Links between the block importer, the background voter and the RPC layer. pub links: BeefyVoterLinks, } diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 7a47ebe214950..04d7ceaa05c8c 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -70,6 +70,7 @@ pub(crate) mod tests; pub mod grandpa_protocol_name { use sc_chain_spec::ChainSpec; + use sc_network_common::protocol::ProtocolName; pub(crate) const NAME: &str = "/grandpa/1"; /// Old names for the notifications protocol, used for backward compatibility. @@ -81,7 +82,7 @@ pub mod grandpa_protocol_name { pub fn standard_name>( genesis_hash: &Hash, chain_spec: &Box, - ) -> std::borrow::Cow<'static, str> { + ) -> ProtocolName { let chain_prefix = match chain_spec.fork_id() { Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), None => format!("/{}", hex::encode(genesis_hash)), diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 4d709f56f3d61..1f607e8d68c02 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -28,7 +28,10 @@ use parity_scale_codec::Encode; use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::event::{Event as NetworkEvent, ObservedRole}, + protocol::{ + event::{Event as NetworkEvent, ObservedRole}, + ProtocolName, + }, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSender, NotificationSenderError, @@ -41,7 +44,6 @@ use sp_finality_grandpa::AuthorityList; use sp_keyring::Ed25519Keyring; use sp_runtime::traits::NumberFor; use std::{ - borrow::Cow, collections::HashSet, pin::Pin, sync::Arc, @@ -78,7 +80,7 @@ impl NetworkPeers for TestNetwork { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); } - fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) {} + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) {} fn accept_unreserved_peers(&self) { unimplemented!(); @@ -98,7 +100,7 @@ impl NetworkPeers for TestNetwork { fn set_reserved_peers( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); @@ -106,23 +108,23 @@ impl NetworkPeers for TestNetwork { fn add_peers_to_reserved_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_peers_from_reserved_set(&self, _protocol: Cow<'static, str>, _peers: Vec) {} + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} fn add_to_peers_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } @@ -143,14 +145,14 @@ impl NetworkEventStream for TestNetwork { } impl NetworkNotification for TestNetwork { - fn write_notification(&self, target: PeerId, _protocol: Cow<'static, str>, message: Vec) { + fn write_notification(&self, target: PeerId, _protocol: ProtocolName, message: Vec) { let _ = self.sender.unbounded_send(Event::WriteNotification(target, message)); } fn notification_sender( &self, _target: PeerId, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, ) -> Result, NotificationSenderError> { unimplemented!(); } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index cb32957c0b0bf..149cf1f89a222 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -68,6 +68,7 @@ use sc_client_api::{ StorageProvider, TransactionFor, }; use sc_consensus::BlockImport; +use sc_network_common::protocol::ProtocolName; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_api::ProvideRuntimeApi; @@ -266,7 +267,7 @@ pub struct Config { /// TelemetryHandle instance. pub telemetry: Option, /// Chain specific GRANDPA protocol name. See [`crate::protocol_standard_name`]. - pub protocol_name: std::borrow::Cow<'static, str>, + pub protocol_name: ProtocolName, } impl Config { @@ -723,7 +724,7 @@ pub struct GrandpaParams { /// [`sc_network::config::NetworkConfiguration::extra_sets`]. /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( - protocol_name: std::borrow::Cow<'static, str>, + protocol_name: ProtocolName, ) -> sc_network::config::NonDefaultSetConfig { use communication::grandpa_protocol_name; sc_network::config::NonDefaultSetConfig { diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 45eff09332f67..121fa6dc9a50d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -21,7 +21,7 @@ use crate::{ Network, Validator, }; -use sc_network_common::protocol::event::Event; +use sc_network_common::protocol::{event::Event, ProtocolName}; use sc_peerset::ReputationChange; use futures::{ @@ -33,7 +33,6 @@ use log::trace; use prometheus_endpoint::Registry; use sp_runtime::traits::Block as BlockT; use std::{ - borrow::Cow, collections::{HashMap, VecDeque}, pin::Pin, sync::Arc, @@ -46,7 +45,7 @@ pub struct GossipEngine { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - protocol: Cow<'static, str>, + protocol: ProtocolName, /// Incoming events from the network. network_event_stream: Pin + Send>>, @@ -78,7 +77,7 @@ impl GossipEngine { /// Create a new instance. pub fn new + Send + Clone + 'static>( network: N, - protocol: impl Into>, + protocol: impl Into, validator: Arc>, metrics_registry: Option<&Registry>, ) -> Self @@ -329,7 +328,6 @@ mod tests { traits::{Block as BlockT, NumberFor}, }; use std::{ - borrow::Cow, collections::HashSet, sync::{Arc, Mutex}, }; @@ -360,7 +358,7 @@ mod tests { fn report_peer(&self, _who: PeerId, _cost_benefit: ReputationChange) {} - fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -382,7 +380,7 @@ mod tests { fn set_reserved_peers( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); @@ -390,28 +388,23 @@ mod tests { fn add_peers_to_reserved_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_peers_from_reserved_set( - &self, - _protocol: Cow<'static, str>, - _peers: Vec, - ) { - } + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} fn add_to_peers_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } @@ -430,19 +423,14 @@ mod tests { } impl NetworkNotification for TestNetwork { - fn write_notification( - &self, - _target: PeerId, - _protocol: Cow<'static, str>, - _message: Vec, - ) { + fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { unimplemented!(); } fn notification_sender( &self, _target: PeerId, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, ) -> Result, NotificationSenderError> { unimplemented!(); } @@ -505,7 +493,7 @@ mod tests { #[test] fn keeps_multiple_subscribers_per_topic_updated_with_both_old_and_new_messages() { let topic = H256::default(); - let protocol = Cow::Borrowed("/my_protocol"); + let protocol = ProtocolName::from("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); @@ -622,7 +610,7 @@ mod tests { } fn prop(channels: Vec, notifications: Vec>) { - let protocol = Cow::Borrowed("/my_protocol"); + let protocol = ProtocolName::from("/my_protocol"); let remote_peer = PeerId::random(); let network = TestNetwork::default(); diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 0fdde1feac6c6..1c8fb8ba05ce7 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -68,11 +68,12 @@ pub use self::{ }; use libp2p::{multiaddr, PeerId}; -use sc_network_common::service::{ - NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, +use sc_network_common::{ + protocol::ProtocolName, + service::{NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers}, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{borrow::Cow, iter}; +use std::iter; mod bridge; mod state_machine; @@ -82,7 +83,7 @@ mod validator; pub trait Network: NetworkPeers + NetworkEventStream + NetworkNotification + NetworkBlock> { - fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { + fn add_set_reserved(&self, who: PeerId, protocol: ProtocolName) { let addr = iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); let result = self.add_peers_to_reserved_set(protocol, iter::once(addr).collect()); diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index ff75b4520bf03..30a2e9d1494be 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,9 +22,9 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::protocol::event::ObservedRole; +use sc_network_common::protocol::{event::ObservedRole, ProtocolName}; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use std::{borrow::Cow, collections::HashMap, iter, sync::Arc, time, time::Instant}; +use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 // NOTE: The current value is adjusted based on largest production network deployment (Kusama) and @@ -99,7 +99,7 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { fn propagate<'a, B: BlockT, I>( network: &mut dyn Network, - protocol: Cow<'static, str>, + protocol: ProtocolName, messages: I, intent: MessageIntent, peers: &mut HashMap>, @@ -155,7 +155,7 @@ pub struct ConsensusGossip { peers: HashMap>, messages: Vec>, known_messages: LruCache, - protocol: Cow<'static, str>, + protocol: ProtocolName, validator: Arc>, next_broadcast: Instant, metrics: Option, @@ -165,7 +165,7 @@ impl ConsensusGossip { /// Create a new instance using the given validator. pub fn new( validator: Arc>, - protocol: Cow<'static, str>, + protocol: ProtocolName, metrics_registry: Option<&Registry>, ) -> Self { let metrics = match metrics_registry.map(Metrics::register) { @@ -527,7 +527,6 @@ mod tests { traits::NumberFor, }; use std::{ - borrow::Cow, collections::HashSet, pin::Pin, sync::{Arc, Mutex}, @@ -599,7 +598,7 @@ mod tests { self.inner.lock().unwrap().peer_reports.push((who, cost_benefit)); } - fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -621,7 +620,7 @@ mod tests { fn set_reserved_peers( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); @@ -629,28 +628,23 @@ mod tests { fn add_peers_to_reserved_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_peers_from_reserved_set( - &self, - _protocol: Cow<'static, str>, - _peers: Vec, - ) { - } + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) {} fn add_to_peers_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } @@ -666,19 +660,14 @@ mod tests { } impl NetworkNotification for NoOpNetwork { - fn write_notification( - &self, - _target: PeerId, - _protocol: Cow<'static, str>, - _message: Vec, - ) { + fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { unimplemented!(); } fn notification_sender( &self, _target: PeerId, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, ) -> Result, NotificationSenderError> { unimplemented!(); } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index c2e61424c88e0..8b7e045780d7d 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -21,7 +21,9 @@ use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; -/// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. +/// Protocol name prefix, transmitted on the wire for legacy protocol names. +/// I.e., `dot` in `/dot/sync/2`. Should be unique for each chain. Always UTF-8. +/// Deprecated in favour of genesis hash & fork ID based protocol names. #[derive(Clone, PartialEq, Eq, Hash)] pub struct ProtocolId(smallvec::SmallVec<[u8; 6]>); diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs index 0fd36cb511112..11edc373a2620 100644 --- a/client/network/common/src/protocol.rs +++ b/client/network/common/src/protocol.rs @@ -16,4 +16,131 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::{ + borrow::Borrow, + fmt, + hash::{Hash, Hasher}, + ops::Deref, + sync::Arc, +}; + +use libp2p::core::upgrade; + pub mod event; + +/// The protocol name transmitted on the wire. +#[derive(Debug, Clone)] +pub enum ProtocolName { + /// The protocol name as a static string. + Static(&'static str), + /// The protocol name as a dynamically allocated string. + OnHeap(Arc), +} + +impl From<&'static str> for ProtocolName { + fn from(name: &'static str) -> Self { + Self::Static(name) + } +} + +impl From> for ProtocolName { + fn from(name: Arc) -> Self { + Self::OnHeap(name) + } +} + +impl From for ProtocolName { + fn from(name: String) -> Self { + Self::OnHeap(Arc::from(name)) + } +} + +impl Deref for ProtocolName { + type Target = str; + + fn deref(&self) -> &str { + match self { + Self::Static(name) => name, + Self::OnHeap(name) => &name, + } + } +} + +impl Borrow for ProtocolName { + fn borrow(&self) -> &str { + self + } +} + +impl PartialEq for ProtocolName { + fn eq(&self, other: &Self) -> bool { + (self as &str) == (other as &str) + } +} + +impl Eq for ProtocolName {} + +impl Hash for ProtocolName { + fn hash(&self, state: &mut H) { + (self as &str).hash(state) + } +} + +impl fmt::Display for ProtocolName { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(self) + } +} + +impl upgrade::ProtocolName for ProtocolName { + fn protocol_name(&self) -> &[u8] { + (self as &str).as_bytes() + } +} + +#[cfg(test)] +mod tests { + use super::ProtocolName; + use std::{ + borrow::Borrow, + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + }; + + #[test] + fn protocol_name_keys_are_equivalent_to_str_keys() { + const PROTOCOL: &'static str = "/some/protocol/1"; + let static_protocol_name = ProtocolName::from(PROTOCOL); + let on_heap_protocol_name = ProtocolName::from(String::from(PROTOCOL)); + + assert_eq!(>::borrow(&static_protocol_name), PROTOCOL); + assert_eq!(>::borrow(&on_heap_protocol_name), PROTOCOL); + assert_eq!(static_protocol_name, on_heap_protocol_name); + + assert_eq!(hash(static_protocol_name), hash(PROTOCOL)); + assert_eq!(hash(on_heap_protocol_name), hash(PROTOCOL)); + } + + #[test] + fn different_protocol_names_do_not_compare_equal() { + const PROTOCOL1: &'static str = "/some/protocol/1"; + let static_protocol_name1 = ProtocolName::from(PROTOCOL1); + let on_heap_protocol_name1 = ProtocolName::from(String::from(PROTOCOL1)); + + const PROTOCOL2: &'static str = "/some/protocol/2"; + let static_protocol_name2 = ProtocolName::from(PROTOCOL2); + let on_heap_protocol_name2 = ProtocolName::from(String::from(PROTOCOL2)); + + assert_ne!(>::borrow(&static_protocol_name1), PROTOCOL2); + assert_ne!(>::borrow(&on_heap_protocol_name1), PROTOCOL2); + assert_ne!(static_protocol_name1, static_protocol_name2); + assert_ne!(static_protocol_name1, on_heap_protocol_name2); + assert_ne!(on_heap_protocol_name1, on_heap_protocol_name2); + } + + fn hash(x: T) -> u64 { + let mut hasher = DefaultHasher::new(); + x.hash(&mut hasher); + hasher.finish() + } +} diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index c6fb4a2faf9f9..3d8c183da495c 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -19,9 +19,9 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. +use super::ProtocolName; use bytes::Bytes; use libp2p::{core::PeerId, kad::record::Key}; -use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -69,13 +69,13 @@ pub enum Event { /// This is always equal to the value of /// `sc_network::config::NonDefaultSetConfig::notifications_protocol` of one of the /// configured sets. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// Always contains a value equal to the value in /// `sc_network::config::NonDefaultSetConfig::fallback_names`. - negotiated_fallback: Option>, + negotiated_fallback: Option, /// Role of the remote. role: ObservedRole, }, @@ -86,7 +86,7 @@ pub enum Event { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: Cow<'static, str>, + protocol: ProtocolName, }, /// Received one or more messages from the given node using the given protocol. @@ -94,7 +94,7 @@ pub enum Event { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(Cow<'static, str>, Bytes)>, + messages: Vec<(ProtocolName, Bytes)>, }, } diff --git a/client/network/common/src/request_responses.rs b/client/network/common/src/request_responses.rs index a216c9c2d254d..1a8d48e11be53 100644 --- a/client/network/common/src/request_responses.rs +++ b/client/network/common/src/request_responses.rs @@ -18,19 +18,20 @@ //! Collection of generic data structures for request-response protocols. +use crate::protocol::ProtocolName; use futures::channel::{mpsc, oneshot}; use libp2p::{request_response::OutboundFailure, PeerId}; use sc_peerset::ReputationChange; -use std::{borrow::Cow, time::Duration}; +use std::time::Duration; /// Configuration for a single request-response protocol. #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol on the wire. Should be something like `/foo/bar`. - pub name: Cow<'static, str>, + pub name: ProtocolName, /// Fallback on the wire protocol names to support. - pub fallback_names: Vec>, + pub fallback_names: Vec, /// Maximum allowed size, in bytes, of a request. /// diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 9544cddf013cf..88583832e4c38 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -20,7 +20,7 @@ use crate::{ config::MultiaddrWithPeerId, - protocol::event::Event, + protocol::{event::Event, ProtocolName}, request_responses::{IfDisconnected, RequestFailure}, sync::{warp::WarpSyncProgress, StateDownloadProgress, SyncState}, }; @@ -30,7 +30,7 @@ use libp2p::{Multiaddr, PeerId}; use sc_peerset::ReputationChange; pub use signature::Signature; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use std::{borrow::Cow, collections::HashSet, future::Future, pin::Pin, sync::Arc}; +use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; mod signature; @@ -171,7 +171,7 @@ pub trait NetworkPeers { /// See also [`NetworkPeers::remove_from_peers_set`], which has the same effect but also /// prevents the local node from re-establishing an outgoing substream to this peer until it /// is added again. - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>); + fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. fn accept_unreserved_peers(&self); @@ -207,7 +207,7 @@ pub trait NetworkPeers { /// invalid peer ID (which includes the local peer ID). fn set_reserved_peers( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String>; @@ -220,12 +220,12 @@ pub trait NetworkPeers { /// invalid peer ID (which includes the local peer ID). fn add_peers_to_reserved_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String>; /// Remove peers from a peer set. - fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec); + fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); /// Add a peer to a set of peers. /// @@ -238,14 +238,14 @@ pub trait NetworkPeers { /// invalid peer ID (which includes the local peer ID). fn add_to_peers_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String>; /// Remove peers from a peer set. /// /// If we currently have an open substream with this peer, it will soon be closed. - fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec); + fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); /// Returns the number of peers in the sync peer set we're connected to. fn sync_num_connected(&self) -> usize; @@ -273,7 +273,7 @@ where T::report_peer(self, who, cost_benefit) } - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { T::disconnect_peer(self, who, protocol) } @@ -295,7 +295,7 @@ where fn set_reserved_peers( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { T::set_reserved_peers(self, protocol, peers) @@ -303,25 +303,25 @@ where fn add_peers_to_reserved_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { T::add_peers_to_reserved_set(self, protocol, peers) } - fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { T::remove_peers_from_reserved_set(self, protocol, peers) } fn add_to_peers_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { T::add_to_peers_set(self, protocol, peers) } - fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { T::remove_from_peers_set(self, protocol, peers) } @@ -431,7 +431,7 @@ pub trait NetworkNotification { /// /// The protocol must have been registered with /// `crate::config::NetworkConfiguration::notifications_protocols`. - fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec); + fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec); /// Obtains a [`NotificationSender`] for a connected peer, if it exists. /// @@ -502,7 +502,7 @@ pub trait NetworkNotification { fn notification_sender( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, ) -> Result, NotificationSenderError>; } @@ -511,14 +511,14 @@ where T: ?Sized, T: NetworkNotification, { - fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { T::write_notification(self, target, protocol, message) } fn notification_sender( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, ) -> Result, NotificationSenderError> { T::notification_sender(self, target, protocol) } @@ -547,7 +547,7 @@ pub trait NetworkRequest { async fn request( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, connect: IfDisconnected, ) -> Result, RequestFailure>; @@ -565,7 +565,7 @@ pub trait NetworkRequest { fn start_request( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, tx: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, @@ -581,7 +581,7 @@ where fn request<'life0, 'async_trait>( &'life0 self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, connect: IfDisconnected, ) -> Pin, RequestFailure>> + Send + 'async_trait>> @@ -595,7 +595,7 @@ where fn start_request( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, tx: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 4177c2452c7f7..24df47db6803f 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -42,7 +42,10 @@ use log::debug; use sc_consensus::import_queue::{IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, - protocol::event::{DhtEvent, ObservedRole}, + protocol::{ + event::{DhtEvent, ObservedRole}, + ProtocolName, + }, request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, }; use sc_peerset::PeersetHandle; @@ -53,7 +56,6 @@ use sp_runtime::{ Justifications, }; use std::{ - borrow::Cow, collections::{HashSet, VecDeque}, iter, task::{Context, Poll}, @@ -117,7 +119,7 @@ pub enum BehaviourOut { /// Peer which sent us a request. peer: PeerId, /// Protocol name of the request. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// If `Ok`, contains the time elapsed between when we received the request and when we /// sent back the response. If `Err`, the error that happened. result: Result, @@ -130,7 +132,7 @@ pub enum BehaviourOut { /// Peer that we send a request to. peer: PeerId, /// Name of the protocol in question. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// Duration the request took. duration: Duration, /// Result of the request. @@ -144,12 +146,12 @@ pub enum BehaviourOut { /// Node we opened the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. /// See also [`crate::Event::NotificationStreamOpened`]. - negotiated_fallback: Option>, + negotiated_fallback: Option, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, /// Role of the remote. @@ -165,7 +167,7 @@ pub enum BehaviourOut { /// Id of the peer we are connected to. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -176,7 +178,7 @@ pub enum BehaviourOut { /// Node we closed the substream with. remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. - protocol: Cow<'static, str>, + protocol: ProtocolName, }, /// Received one or more messages from the given node using the given protocol. @@ -184,7 +186,7 @@ pub enum BehaviourOut { /// Node we received the message from. remote: PeerId, /// Concerned protocol and associated message. - messages: Vec<(Cow<'static, str>, Bytes)>, + messages: Vec<(ProtocolName, Bytes)>, }, /// Now connected to a new peer for syncing purposes. diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 521aa42827563..3fe95deb0c1ed 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -41,10 +41,9 @@ use libp2p::{ }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::{config::MultiaddrWithPeerId, sync::ChainSync}; +use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName, sync::ChainSync}; use sp_runtime::traits::Block as BlockT; use std::{ - borrow::Cow, collections::HashMap, error::Error, fs, @@ -431,14 +430,14 @@ pub struct NonDefaultSetConfig { /// /// > **Note**: This field isn't present for the default set, as this is handled internally /// > by the networking code. - pub notifications_protocol: Cow<'static, str>, + pub notifications_protocol: ProtocolName, /// If the remote reports that it doesn't support the protocol indicated in the /// `notifications_protocol` field, then each of these fallback names will be tried one by /// one. /// /// If a fallback is used, it will be reported in /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - pub fallback_names: Vec>, + pub fallback_names: Vec, /// Maximum allowed size of single notifications. pub max_notification_size: u64, /// Base configuration. @@ -447,7 +446,7 @@ pub struct NonDefaultSetConfig { impl NonDefaultSetConfig { /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: Cow<'static, str>, max_notification_size: u64) -> Self { + pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { Self { notifications_protocol, max_notification_size, @@ -476,7 +475,7 @@ impl NonDefaultSetConfig { /// Add a list of protocol names used for backward compatibility. /// /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. - pub fn add_fallback_names(&mut self, fallback_names: Vec>) { + pub fn add_fallback_names(&mut self, fallback_names: Vec) { self.fallback_names.extend(fallback_names); } } diff --git a/client/network/src/error.rs b/client/network/src/error.rs index 716235193a80f..b4287ffbd55db 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -20,8 +20,9 @@ use crate::config::TransportConfig; use libp2p::{Multiaddr, PeerId}; +use sc_network_common::protocol::ProtocolName; -use std::{borrow::Cow, fmt}; +use std::fmt; /// Result type alias for the network. pub type Result = std::result::Result; @@ -65,7 +66,7 @@ pub enum Error { #[error("Request-response protocol registered multiple times: {protocol}")] DuplicateRequestResponseProtocol { /// Name of the protocol registered multiple times. - protocol: Cow<'static, str>, + protocol: ProtocolName, }, } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index b1d70c28289bd..35a91e77da524 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -264,7 +264,10 @@ pub mod transactions; pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::PeerInfo; pub use sc_network_common::{ - protocol::event::{DhtEvent, Event, ObservedRole}, + protocol::{ + event::{DhtEvent, Event, ObservedRole}, + ProtocolName, + }, request_responses::{IfDisconnected, RequestFailure}, service::{ KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 64794538999b0..9bcf363cd0692 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -44,6 +44,7 @@ use sc_client_api::HeaderBackend; use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sc_network_common::{ config::ProtocolId, + protocol::ProtocolName, request_responses::RequestFailure, sync::{ message::{ @@ -63,7 +64,6 @@ use sp_runtime::{ Justifications, }; use std::{ - borrow::Cow, collections::{HashMap, HashSet, VecDeque}, io, iter, num::NonZeroUsize, @@ -190,7 +190,7 @@ pub struct Protocol { /// Handles opening the unique substream and sending and receiving raw messages. behaviour: Notifications, /// List of notifications protocols that have been registered. - notification_protocols: Vec>, + notification_protocols: Vec, /// If we receive a new "substream open" event that contains an invalid handshake, we ask the /// inner layer to force-close the substream. Force-closing the substream will generate a /// "substream closed" event. This is a problem: since we can't propagate the "substream open" @@ -460,7 +460,7 @@ where } /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: &str) { + pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { self.behaviour.disconnect_peer( @@ -1085,7 +1085,7 @@ where } /// Sets the list of reserved peers for the given protocol/peerset. - pub fn set_reserved_peerset_peers(&self, protocol: Cow<'static, str>, peers: HashSet) { + pub fn set_reserved_peerset_peers(&self, protocol: ProtocolName, peers: HashSet) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { self.peerset_handle .set_reserved_peers(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peers); @@ -1099,7 +1099,7 @@ where } /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + pub fn remove_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { self.peerset_handle.remove_reserved_peer( sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), @@ -1115,7 +1115,7 @@ where } /// Adds a `PeerId` to the list of reserved peers. - pub fn add_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { + pub fn add_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { self.peerset_handle .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); @@ -1138,7 +1138,7 @@ where } /// Add a peer to a peers set. - pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + pub fn add_to_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { self.peerset_handle .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); @@ -1152,7 +1152,7 @@ where } /// Remove a peer from a peers set. - pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { + pub fn remove_from_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { self.peerset_handle.remove_from_peers_set( sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), @@ -1259,27 +1259,27 @@ pub enum CustomMessageOutcome { /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - negotiated_fallback: Option>, + negotiated_fallback: Option, roles: Roles, notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { remote: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. NotificationStreamClosed { remote: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, }, /// Messages have been received on one or more notifications protocols. NotificationsReceived { remote: PeerId, - messages: Vec<(Cow<'static, str>, Bytes)>, + messages: Vec<(ProtocolName, Bytes)>, }, /// A new block request must be emitted. BlockRequest { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 1f872ec857e79..04f6fe445ac63 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -33,15 +33,14 @@ use libp2p::{ use log::{error, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; +use sc_network_common::protocol::ProtocolName; use sc_peerset::DropReason; use smallvec::SmallVec; use std::{ - borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}, mem, pin::Pin, - str, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, @@ -140,9 +139,9 @@ pub struct Notifications { #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol. - pub name: Cow<'static, str>, + pub name: ProtocolName, /// Names of the protocol to use if the main one isn't available. - pub fallback_names: Vec>, + pub fallback_names: Vec, /// Handshake of the protocol. pub handshake: Vec, /// Maximum allowed size for a notification. @@ -309,7 +308,7 @@ pub enum NotificationsOut { set_id: sc_peerset::SetId, /// If `Some`, a fallback protocol name has been used rather the main protocol name. /// Always matches one of the fallback names passed at initialization. - negotiated_fallback: Option>, + negotiated_fallback: Option, /// Handshake that was sent to us. /// This is normally a "Status" message, but this is out of the concern of this code. received_handshake: Vec, diff --git a/client/network/src/protocol/notifications/handler.rs b/client/network/src/protocol/notifications/handler.rs index c1602319d0ac4..ea09cb76edce1 100644 --- a/client/network/src/protocol/notifications/handler.rs +++ b/client/network/src/protocol/notifications/handler.rs @@ -80,12 +80,11 @@ use libp2p::{ }; use log::error; use parking_lot::{Mutex, RwLock}; +use sc_network_common::protocol::ProtocolName; use std::{ - borrow::Cow, collections::VecDeque, mem, pin::Pin, - str, sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, @@ -146,9 +145,9 @@ pub struct NotifsHandler { #[derive(Debug, Clone)] pub struct ProtocolConfig { /// Name of the protocol. - pub name: Cow<'static, str>, + pub name: ProtocolName, /// Names of the protocol to use if the main one isn't available. - pub fallback_names: Vec>, + pub fallback_names: Vec, /// Handshake of the protocol. The `RwLock` is locked every time a new substream is opened. pub handshake: Arc>>, /// Maximum allowed size for a notification. @@ -297,7 +296,7 @@ pub enum NotifsHandlerOut { /// Index of the protocol in the list of protocols passed at initialization. protocol_index: usize, /// Name of the protocol that was actually negotiated, if the default one wasn't available. - negotiated_fallback: Option>, + negotiated_fallback: Option, /// The endpoint of the connection that is open for custom protocols. endpoint: ConnectedPoint, /// Handshake that was sent to us. diff --git a/client/network/src/protocol/notifications/upgrade/notifications.rs b/client/network/src/protocol/notifications/upgrade/notifications.rs index 3fbb59d399a0e..56cfefd75d53d 100644 --- a/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -39,8 +39,8 @@ use bytes::BytesMut; use futures::prelude::*; use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::{error, warn}; +use sc_network_common::protocol::ProtocolName; use std::{ - borrow::Cow, convert::Infallible, io, mem, pin::Pin, @@ -58,7 +58,7 @@ const MAX_HANDSHAKE_SIZE: usize = 1024; pub struct NotificationsIn { /// Protocol name to use when negotiating the substream. /// The first one is the main name, while the other ones are fall backs. - protocol_names: Vec>, + protocol_names: Vec, /// Maximum allowed size for a single notification. max_notification_size: u64, } @@ -69,7 +69,7 @@ pub struct NotificationsIn { pub struct NotificationsOut { /// Protocol name to use when negotiating the substream. /// The first one is the main name, while the other ones are fall backs. - protocol_names: Vec>, + protocol_names: Vec, /// Message to send when we start the handshake. initial_message: Vec, /// Maximum allowed size for a single notification. @@ -114,8 +114,8 @@ pub struct NotificationsOutSubstream { impl NotificationsIn { /// Builds a new potential upgrade. pub fn new( - main_protocol_name: impl Into>, - fallback_names: Vec>, + main_protocol_name: impl Into, + fallback_names: Vec, max_notification_size: u64, ) -> Self { let mut protocol_names = fallback_names; @@ -126,16 +126,11 @@ impl NotificationsIn { } impl UpgradeInfo for NotificationsIn { - type Info = StringProtocolName; + type Info = ProtocolName; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names - .iter() - .cloned() - .map(StringProtocolName) - .collect::>() - .into_iter() + self.protocol_names.clone().into_iter() } } @@ -172,10 +167,10 @@ where Ok(NotificationsInOpen { handshake, - negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { + negotiated_fallback: if negotiated_name == self.protocol_names[0] { None } else { - Some(negotiated_name.0) + Some(negotiated_name) }, substream, }) @@ -189,7 +184,7 @@ pub struct NotificationsInOpen { pub handshake: Vec, /// If the negotiated name is not the "main" protocol name but a fallback, contains the /// name of the negotiated fallback. - pub negotiated_fallback: Option>, + pub negotiated_fallback: Option, /// Implementation of `Stream` that allows receives messages from the substream. pub substream: NotificationsInSubstream, } @@ -334,8 +329,8 @@ where impl NotificationsOut { /// Builds a new potential upgrade. pub fn new( - main_protocol_name: impl Into>, - fallback_names: Vec>, + main_protocol_name: impl Into, + fallback_names: Vec, initial_message: impl Into>, max_notification_size: u64, ) -> Self { @@ -351,27 +346,12 @@ impl NotificationsOut { } } -/// Implementation of the `ProtocolName` trait, where the protocol name is a string. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct StringProtocolName(Cow<'static, str>); - -impl upgrade::ProtocolName for StringProtocolName { - fn protocol_name(&self) -> &[u8] { - self.0.as_bytes() - } -} - impl UpgradeInfo for NotificationsOut { - type Info = StringProtocolName; + type Info = ProtocolName; type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names - .iter() - .cloned() - .map(StringProtocolName) - .collect::>() - .into_iter() + self.protocol_names.clone().into_iter() } } @@ -406,10 +386,10 @@ where Ok(NotificationsOutOpen { handshake, - negotiated_fallback: if negotiated_name.0 == self.protocol_names[0] { + negotiated_fallback: if negotiated_name == self.protocol_names[0] { None } else { - Some(negotiated_name.0) + Some(negotiated_name) }, substream: NotificationsOutSubstream { socket: Framed::new(socket, codec) }, }) @@ -423,7 +403,7 @@ pub struct NotificationsOutOpen { pub handshake: Vec, /// If the negotiated name is not the "main" protocol name but a fallback, contains the /// name of the negotiated fallback. - pub negotiated_fallback: Option>, + pub negotiated_fallback: Option, /// Implementation of `Sink` that allows sending messages on the substream. pub substream: NotificationsOutSubstream, } @@ -505,11 +485,10 @@ mod tests { use async_std::net::{TcpListener, TcpStream}; use futures::{channel::oneshot, prelude::*}; use libp2p::core::upgrade; - use std::borrow::Cow; #[test] fn basic_works() { - const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -552,7 +531,7 @@ mod tests { fn empty_handshake() { // Check that everything still works when the handshake messages are empty. - const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -593,7 +572,7 @@ mod tests { #[test] fn refused() { - const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -634,7 +613,7 @@ mod tests { #[test] fn large_initial_message_refused() { - const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { @@ -672,7 +651,7 @@ mod tests { #[test] fn large_handshake_refused() { - const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + const PROTO_NAME: &str = "/test/proto/1"; let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); let client = async_std::task::spawn(async move { diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index 9eab85a4c1ce1..d49cbd8051341 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -50,11 +50,13 @@ use libp2p::{ NetworkBehaviourAction, PollParameters, }, }; -use sc_network_common::request_responses::{ - IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, +use sc_network_common::{ + protocol::ProtocolName, + request_responses::{ + IfDisconnected, IncomingRequest, OutgoingResponse, ProtocolConfig, RequestFailure, + }, }; use std::{ - borrow::Cow, collections::{hash_map::Entry, HashMap}, io, iter, pin::Pin, @@ -75,7 +77,7 @@ pub enum Event { /// Peer which has emitted the request. peer: PeerId, /// Name of the protocol in question. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// Whether handling the request was successful or unsuccessful. /// /// When successful contains the time elapsed between when we received the request and when @@ -91,7 +93,7 @@ pub enum Event { /// Peer that we send a request to. peer: PeerId, /// Name of the protocol in question. - protocol: Cow<'static, str>, + protocol: ProtocolName, /// Duration the request took. duration: Duration, /// Result of the request. @@ -110,12 +112,12 @@ pub enum Event { /// [`ProtocolRequestId`]s. #[derive(Debug, Clone, PartialEq, Eq, Hash)] struct ProtocolRequestId { - protocol: Cow<'static, str>, + protocol: ProtocolName, request_id: RequestId, } -impl From<(Cow<'static, str>, RequestId)> for ProtocolRequestId { - fn from((protocol, request_id): (Cow<'static, str>, RequestId)) -> Self { +impl From<(ProtocolName, RequestId)> for ProtocolRequestId { + fn from((protocol, request_id): (ProtocolName, RequestId)) -> Self { Self { protocol, request_id } } } @@ -126,7 +128,7 @@ pub struct RequestResponsesBehaviour { /// Contains the underlying libp2p `RequestResponse` behaviour, plus an optional /// "response builder" used to build responses for incoming requests. protocols: HashMap< - Cow<'static, str>, + ProtocolName, (RequestResponse, Option>), >, @@ -162,7 +164,7 @@ struct MessageRequest { request_id: RequestId, request: Vec, channel: ResponseChannel, ()>>, - protocol: String, + protocol: ProtocolName, resp_builder: Option>, // Once we get incoming request we save all params, create an async call to Peerset // to get the reputation of the peer. @@ -173,7 +175,7 @@ struct MessageRequest { struct RequestProcessingOutcome { peer: PeerId, request_id: RequestId, - protocol: Cow<'static, str>, + protocol: ProtocolName, inner_channel: ResponseChannel, ()>>, response: OutgoingResponse, } @@ -495,7 +497,6 @@ impl NetworkBehaviour for RequestResponsesBehaviour { debug_assert!(false, "Received message on outbound-only protocol."); } - let protocol = Cow::from(protocol); self.pending_responses.push(Box::pin(async move { // The `tx` created above can be dropped if we are not capable of // processing this request, which is reflected as a @@ -618,7 +619,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, request, channel, - protocol: protocol.to_string(), + protocol: protocol.clone(), resp_builder: resp_builder.clone(), get_peer_reputation, }); @@ -766,7 +767,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { pub enum RegisterError { /// A protocol has been specified multiple times. #[error("{0}")] - DuplicateProtocol(Cow<'static, str>), + DuplicateProtocol(ProtocolName), } /// Error when processing a request sent by a remote. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 7a196da25260a..4610b025dde0d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -61,7 +61,10 @@ use parking_lot::Mutex; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::event::{DhtEvent, Event}, + protocol::{ + event::{DhtEvent, Event}, + ProtocolName, + }, request_responses::{IfDisconnected, RequestFailure}, service::{ NetworkDHTProvider, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSigner, @@ -76,7 +79,6 @@ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use sp_blockchain::HeaderBackend; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{ - borrow::Cow, cmp, collections::{HashMap, HashSet}, fs, iter, @@ -122,7 +124,7 @@ pub struct NetworkService { to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc), NotificationsSink>>>, + peers_notifications_sinks: Arc>>, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. notifications_sizes_metric: Option, @@ -890,7 +892,7 @@ where self.peerset.report_peer(who, cost_benefit); } - fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { + fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol)); } @@ -921,7 +923,7 @@ where fn set_reserved_peers( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; @@ -952,7 +954,7 @@ where fn add_peers_to_reserved_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; @@ -976,7 +978,7 @@ where Ok(()) } - fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -986,7 +988,7 @@ where fn add_to_peers_set( &self, - protocol: Cow<'static, str>, + protocol: ProtocolName, peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; @@ -1010,7 +1012,7 @@ where Ok(()) } - fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec) { for peer_id in peers.into_iter() { let _ = self .to_worker @@ -1040,7 +1042,7 @@ where B: BlockT + 'static, H: ExHashT, { - fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { @@ -1077,7 +1079,7 @@ where fn notification_sender( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, ) -> Result, NotificationSenderError> { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. @@ -1108,7 +1110,7 @@ where async fn request( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, connect: IfDisconnected, ) -> Result, RequestFailure> { @@ -1128,7 +1130,7 @@ where fn start_request( &self, target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, tx: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, @@ -1179,7 +1181,7 @@ pub struct NotificationSender { sink: NotificationsSink, /// Name of the protocol on the wire. - protocol_name: Cow<'static, str>, + protocol_name: ProtocolName, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. @@ -1212,7 +1214,7 @@ pub struct NotificationSenderReady<'a> { peer_id: &'a PeerId, /// Name of the protocol on the wire. - protocol_name: &'a Cow<'static, str>, + protocol_name: &'a ProtocolName, /// Field extracted from the [`Metrics`] struct and necessary to report the /// notifications-related metrics. @@ -1256,16 +1258,16 @@ enum ServiceToWorkerMsg { AddReserved(PeerId), RemoveReserved(PeerId), SetReserved(HashSet), - SetPeersetReserved(Cow<'static, str>, HashSet), - AddSetReserved(Cow<'static, str>, PeerId), - RemoveSetReserved(Cow<'static, str>, PeerId), - AddToPeersSet(Cow<'static, str>, PeerId), - RemoveFromPeersSet(Cow<'static, str>, PeerId), + SetPeersetReserved(ProtocolName, HashSet), + AddSetReserved(ProtocolName, PeerId), + RemoveSetReserved(ProtocolName, PeerId), + AddToPeersSet(ProtocolName, PeerId), + RemoveFromPeersSet(ProtocolName, PeerId), SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), Request { target: PeerId, - protocol: Cow<'static, str>, + protocol: ProtocolName, request: Vec, pending_response: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, @@ -1276,7 +1278,7 @@ enum ServiceToWorkerMsg { NetworkState { pending_response: oneshot::Sender>, }, - DisconnectPeer(PeerId, Cow<'static, str>), + DisconnectPeer(PeerId, ProtocolName), NewBestBlockImported(B::Hash, NumberFor), } @@ -1312,7 +1314,7 @@ where boot_node_ids: Arc>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc), NotificationsSink>>>, + peers_notifications_sinks: Arc>>, /// Controller for the handler of incoming and outgoing transactions. tx_handler_controller: transactions::TransactionsHandlerController, } @@ -1456,7 +1458,7 @@ where .network_service .behaviour_mut() .user_protocol_mut() - .disconnect_peer(&who, &protocol_name), + .disconnect_peer(&who, protocol_name), ServiceToWorkerMsg::NewBestBlockImported(hash, number) => this .network_service .behaviour_mut() diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index e0d8798aef91e..8770e14bf5338 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -32,7 +32,7 @@ use sc_network_sync::{ }; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_runtime::traits::{Block as BlockT, Header as _}; -use std::{borrow::Cow, sync::Arc, time::Duration}; +use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< @@ -165,7 +165,7 @@ fn build_test_full_node( (service, event_stream) } -const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); +const PROTOCOL_NAME: &str = "/foo"; /// Builds two nodes and their associated events stream. /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. @@ -179,7 +179,7 @@ fn build_nodes_one_proto() -> ( let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, + notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: Default::default(), @@ -191,7 +191,7 @@ fn build_nodes_one_proto() -> ( let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, + notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { @@ -219,10 +219,18 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); + node1.write_notification( + node2.local_peer_id(), + PROTOCOL_NAME.into(), + b"hello world".to_vec(), + ); } for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id(), PROTOCOL_NAME, b"hello world".to_vec()); + node2.write_notification( + node1.local_peer_id(), + PROTOCOL_NAME.into(), + b"hello world".to_vec(), + ); } async_std::task::block_on(async move { @@ -247,24 +255,24 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 3 { node1.write_notification( node2.local_peer_id(), - PROTOCOL_NAME, + PROTOCOL_NAME.into(), b"hello world".to_vec(), ); } if rand::random::() % 5 >= 3 { node2.write_notification( node1.local_peer_id(), - PROTOCOL_NAME, + PROTOCOL_NAME.into(), b"hello world".to_vec(), ); } // Also randomly disconnect the two nodes from time to time. if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME); + node1.disconnect_peer(node2.local_peer_id(), PROTOCOL_NAME.into()); } if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME); + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); } // Grab next event from either `events_stream1` or `events_stream2`. @@ -288,7 +296,7 @@ fn notifications_state_consistent() { future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME { + if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; @@ -297,7 +305,7 @@ fn notifications_state_consistent() { future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME { + if protocol == PROTOCOL_NAME.into() { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; @@ -306,7 +314,7 @@ fn notifications_state_consistent() { future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME { + if protocol == PROTOCOL_NAME.into() { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, node2.local_peer_id()); @@ -314,7 +322,7 @@ fn notifications_state_consistent() { future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME { + if protocol == PROTOCOL_NAME.into() { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, node1.local_peer_id()); @@ -325,7 +333,7 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node1.write_notification( node2.local_peer_id(), - PROTOCOL_NAME, + PROTOCOL_NAME.into(), b"hello world".to_vec(), ); } @@ -336,7 +344,7 @@ fn notifications_state_consistent() { if rand::random::() % 5 >= 4 { node2.write_notification( node1.local_peer_id(), - PROTOCOL_NAME, + PROTOCOL_NAME.into(), b"hello world".to_vec(), ); } @@ -361,7 +369,7 @@ fn lots_of_incoming_peers_works() { let (main_node, _) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, + notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, @@ -380,7 +388,7 @@ fn lots_of_incoming_peers_works() { let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![], extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, + notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { @@ -449,7 +457,7 @@ fn notifications_back_pressure() { Event::NotificationStreamClosed { .. } => panic!(), Event::NotificationsReceived { messages, .. } => for message in messages { - assert_eq!(message.0, PROTOCOL_NAME); + assert_eq!(message.0, PROTOCOL_NAME.into()); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; }, @@ -473,7 +481,7 @@ fn notifications_back_pressure() { // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id, PROTOCOL_NAME).unwrap(); + let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap(); notif .ready() .await @@ -491,15 +499,14 @@ fn fallback_name_working() { // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether // they can connect. - const NEW_PROTOCOL_NAME: Cow<'static, str> = - Cow::Borrowed("/new-shiny-protocol-that-isnt-PROTOCOL_NAME"); + const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME"; let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.clone(), - fallback_names: vec![PROTOCOL_NAME], + notifications_protocol: NEW_PROTOCOL_NAME.into(), + fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, set_config: Default::default(), }], @@ -510,7 +517,7 @@ fn fallback_name_working() { let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { extra_sets: vec![config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, + notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: config::SetConfig { @@ -531,7 +538,7 @@ fn fallback_name_working() { loop { match events_stream2.next().await.unwrap() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { - assert_eq!(protocol, PROTOCOL_NAME); + assert_eq!(protocol, PROTOCOL_NAME.into()); assert_eq!(negotiated_fallback, None); break }, @@ -545,9 +552,9 @@ fn fallback_name_working() { loop { match events_stream1.next().await.unwrap() { Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } - if protocol == NEW_PROTOCOL_NAME => + if protocol == NEW_PROTOCOL_NAME.into() => { - assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); + assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); break }, _ => {}, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index f557ec0cec7c7..1cf532f33ddc6 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -42,12 +42,14 @@ use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ config::ProtocolId, - protocol::event::{Event, ObservedRole}, + protocol::{ + event::{Event, ObservedRole}, + ProtocolName, + }, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, }; use sp_runtime::traits::Block as BlockT; use std::{ - borrow::Cow, collections::{hash_map::Entry, HashMap}, iter, num::NonZeroUsize, @@ -130,8 +132,8 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { - protocol_name: Cow<'static, str>, - fallback_protocol_names: Vec>, + protocol_name: ProtocolName, + fallback_protocol_names: Vec, } impl TransactionsHandlerPrototype { @@ -244,7 +246,7 @@ enum ToHandler { /// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. pub struct TransactionsHandler { - protocol_name: Cow<'static, str>, + protocol_name: ProtocolName, /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, /// Pending transactions verification tasks. diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 323af13943de7..837cdeed0f3d1 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,7 +23,6 @@ mod block_import; mod sync; use std::{ - borrow::Cow, collections::HashMap, marker::PhantomData, pin::Pin, @@ -58,6 +57,7 @@ use sc_network::{ }; use sc_network_common::{ config::{MultiaddrWithPeerId, ProtocolId}, + protocol::ProtocolName, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; @@ -682,7 +682,7 @@ pub struct FullPeerConfig { /// Block announce validator. pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. - pub notifications_protocols: Vec>, + pub notifications_protocols: Vec, /// The indices of the peers the peer should be connected to. /// /// If `None`, it will be connected to all other peers. diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index f40bceb615148..6d6c52c989c34 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -328,11 +328,12 @@ mod tests { use sc_client_db::offchain::LocalStorage; use sc_network_common::{ config::MultiaddrWithPeerId, + protocol::ProtocolName, service::{NetworkPeers, NetworkStateInfo}, }; use sc_peerset::ReputationChange; use sp_core::offchain::{DbExternalities, Externalities}; - use std::{borrow::Cow, time::SystemTime}; + use std::time::SystemTime; pub(super) struct TestNetwork(); @@ -353,7 +354,7 @@ mod tests { unimplemented!(); } - fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -375,7 +376,7 @@ mod tests { fn set_reserved_peers( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); @@ -383,29 +384,25 @@ mod tests { fn add_peers_to_reserved_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_peers_from_reserved_set( - &self, - _protocol: Cow<'static, str>, - _peers: Vec, - ) { + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } fn add_to_peers_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index e215a016872cc..87e79833b9706 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -249,12 +249,12 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderProvider as _; use sc_client_api::Backend as _; - use sc_network_common::config::MultiaddrWithPeerId; + use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName}; use sc_peerset::ReputationChange; use sc_transaction_pool::{BasicPool, FullChainApi}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use std::{borrow::Cow, collections::HashSet, sync::Arc}; + use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime_client::{ runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, TestClientBuilderExt, @@ -289,7 +289,7 @@ mod tests { unimplemented!(); } - fn disconnect_peer(&self, _who: PeerId, _protocol: Cow<'static, str>) { + fn disconnect_peer(&self, _who: PeerId, _protocol: ProtocolName) { unimplemented!(); } @@ -311,7 +311,7 @@ mod tests { fn set_reserved_peers( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); @@ -319,29 +319,25 @@ mod tests { fn add_peers_to_reserved_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_peers_from_reserved_set( - &self, - _protocol: Cow<'static, str>, - _peers: Vec, - ) { + fn remove_peers_from_reserved_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } fn add_to_peers_set( &self, - _protocol: Cow<'static, str>, + _protocol: ProtocolName, _peers: HashSet, ) -> Result<(), String> { unimplemented!(); } - fn remove_from_peers_set(&self, _protocol: Cow<'static, str>, _peers: Vec) { + fn remove_from_peers_set(&self, _protocol: ProtocolName, _peers: Vec) { unimplemented!(); } From 1a15071a83aa104927e313afb65262359850b0d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Mon, 5 Sep 2022 09:26:45 +0200 Subject: [PATCH 091/348] `try-runtime`::`follow-chain` - execute all blocks (#12048) * extract subscription * FinalizedHeaders * Fool of a Took * testability * tests * review comments * clippy --- Cargo.lock | 1 + utils/frame/try-runtime/cli/Cargo.toml | 3 + .../cli/src/commands/follow_chain.rs | 264 ++++++++++++++++-- 3 files changed, 239 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ae225184c5b28..f6c2df81d6ee1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11122,6 +11122,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-version", + "tokio", "zstd", ] diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index dd98854e84d75..56ead30644d86 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -32,3 +32,6 @@ sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } frame-try-runtime = { path = "../../../../frame/try-runtime" } + +[dev-dependencies] +tokio = "1.17.0" diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 01fc1dae15a05..9f598694d0ff1 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -20,16 +20,20 @@ use crate::{ state_machine_call_with_proof, SharedParams, LOG_TARGET, }; use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, + core::{ + async_trait, + client::{Client, Subscription, SubscriptionClientT}, + }, ws_client::WsClientBuilder, }; use parity_scale_codec::{Decode, Encode}; use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig}; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; +use serde::de::DeserializeOwned; use sp_core::H256; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; -use std::{fmt::Debug, str::FromStr}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, str::FromStr}; const SUB: &str = "chain_subscribeFinalizedHeads"; const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; @@ -58,51 +62,182 @@ pub struct FollowChainCmd { try_state: frame_try_runtime::TryStateSelect, } +/// Start listening for with `SUB` at `url`. +/// +/// Returns a pair `(client, subscription)` - `subscription` alone will be useless, because it +/// relies on the related alive `client`. +async fn start_subscribing( + url: &str, +) -> sc_cli::Result<(Client, Subscription
)> { + let client = WsClientBuilder::default() + .connection_timeout(std::time::Duration::new(20, 0)) + .max_notifs_per_subscription(1024) + .max_request_body_size(u32::MAX) + .build(url) + .await + .map_err(|e| sc_cli::Error::Application(e.into()))?; + + log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); + + let sub = client + .subscribe(SUB, None, UN_SUB) + .await + .map_err(|e| sc_cli::Error::Application(e.into()))?; + Ok((client, sub)) +} + +/// Abstraction over RPC calling for headers. +#[async_trait] +trait HeaderProvider +where + Block::Header: HeaderT, +{ + /// Awaits for the header of the block with hash `hash`. + async fn get_header(&mut self, hash: Block::Hash) -> Block::Header; +} + +struct RpcHeaderProvider { + uri: String, + _phantom: PhantomData, +} + +#[async_trait] +impl HeaderProvider for RpcHeaderProvider +where + Block::Header: DeserializeOwned, +{ + async fn get_header(&mut self, hash: Block::Hash) -> Block::Header { + rpc_api::get_header::(&self.uri, hash).await.unwrap() + } +} + +/// Abstraction over RPC subscription for finalized headers. +#[async_trait] +trait HeaderSubscription +where + Block::Header: HeaderT, +{ + /// Await for the next finalized header from the subscription. + /// + /// Returns `None` if either the subscription has been closed or there was an error when reading + /// an object from the client. + async fn next_header(&mut self) -> Option; +} + +#[async_trait] +impl HeaderSubscription for Subscription +where + Block::Header: DeserializeOwned, +{ + async fn next_header(&mut self) -> Option { + match self.next().await { + Some(Ok(header)) => Some(header), + None => { + log::warn!("subscription closed"); + None + }, + Some(Err(why)) => { + log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); + None + }, + } + } +} + +/// Stream of all finalized headers. +/// +/// Returned headers are guaranteed to be ordered. There are no missing headers (even if some of +/// them lack justification). +struct FinalizedHeaders, HS: HeaderSubscription> { + header_provider: HP, + subscription: HS, + fetched_headers: VecDeque, + last_returned: Option<::Hash>, +} + +impl, HS: HeaderSubscription> + FinalizedHeaders +where + ::Header: DeserializeOwned, +{ + pub fn new(header_provider: HP, subscription: HS) -> Self { + Self { + header_provider, + subscription, + fetched_headers: VecDeque::new(), + last_returned: None, + } + } + + /// Reads next finalized header from the subscription. If some headers (without justification) + /// have been skipped, fetches them as well. Returns number of headers that have been fetched. + /// + /// All fetched headers are stored in `self.fetched_headers`. + async fn fetch(&mut self) -> usize { + let last_finalized = match self.subscription.next_header().await { + Some(header) => header, + None => return 0, + }; + + self.fetched_headers.push_front(last_finalized.clone()); + + let mut last_finalized_parent = *last_finalized.parent_hash(); + let last_returned = self.last_returned.unwrap_or(last_finalized_parent); + + while last_finalized_parent != last_returned { + let parent_header = self.header_provider.get_header(last_finalized_parent).await; + self.fetched_headers.push_front(parent_header.clone()); + last_finalized_parent = *parent_header.parent_hash(); + } + + self.fetched_headers.len() + } + + /// Get the next finalized header. + pub async fn next(&mut self) -> Option { + if self.fetched_headers.is_empty() { + self.fetch().await; + } + + if let Some(header) = self.fetched_headers.pop_front() { + self.last_returned = Some(header.hash()); + Some(header) + } else { + None + } + } +} + pub(crate) async fn follow_chain( shared: SharedParams, command: FollowChainCmd, config: Configuration, ) -> sc_cli::Result<()> where - Block: BlockT + serde::de::DeserializeOwned, + Block: BlockT + DeserializeOwned, Block::Hash: FromStr, - Block::Header: serde::de::DeserializeOwned, + Block::Header: DeserializeOwned, ::Err: Debug, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, { let mut maybe_state_ext = None; - - let client = WsClientBuilder::default() - .connection_timeout(std::time::Duration::new(20, 0)) - .max_notifs_per_subscription(1024) - .max_request_body_size(u32::MAX) - .build(&command.uri) - .await - .unwrap(); - - log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); - let mut subscription: Subscription = - client.subscribe(SUB, None, UN_SUB).await.unwrap(); + let (_client, subscription) = start_subscribing::(&command.uri).await?; let (code_key, code) = extract_code(&config.chain_spec)?; let executor = build_executor::(&shared, &config); let execution = shared.execution; - loop { - let header = match subscription.next().await { - Some(Ok(header)) => header, - None => { - log::warn!("subscription closed"); - break - }, - Some(Err(why)) => { - log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); - continue - }, - }; + let header_provider: RpcHeaderProvider = + RpcHeaderProvider { uri: command.uri.clone(), _phantom: PhantomData {} }; + let mut finalized_headers: FinalizedHeaders< + Block, + RpcHeaderProvider, + Subscription, + > = FinalizedHeaders::new(header_provider, subscription); + while let Some(header) = finalized_headers.next().await { let hash = header.hash(); let number = header.number(); @@ -193,3 +328,74 @@ where log::error!(target: LOG_TARGET, "ws subscription must have terminated."); Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header}; + + type Block = TBlock>; + type BlockNumber = u64; + type Hash = H256; + + struct MockHeaderProvider(pub VecDeque); + + fn headers() -> Vec
{ + let mut headers = vec![Header::new_from_number(0)]; + for n in 1..11 { + headers.push(Header { + parent_hash: headers.last().unwrap().hash(), + ..Header::new_from_number(n) + }) + } + headers + } + + #[async_trait] + impl HeaderProvider for MockHeaderProvider { + async fn get_header(&mut self, _hash: Hash) -> Header { + let height = self.0.pop_front().unwrap(); + headers()[height as usize].clone() + } + } + + struct MockHeaderSubscription(pub VecDeque); + + #[async_trait] + impl HeaderSubscription for MockHeaderSubscription { + async fn next_header(&mut self) -> Option
{ + self.0.pop_front().map(|h| headers()[h as usize].clone()) + } + } + + #[tokio::test] + async fn finalized_headers_works_when_every_block_comes_from_subscription() { + let heights = vec![4, 5, 6, 7]; + + let provider = MockHeaderProvider(vec![].into()); + let subscription = MockHeaderSubscription(heights.clone().into()); + let mut headers = FinalizedHeaders::new(provider, subscription); + + for h in heights { + assert_eq!(h, headers.next().await.unwrap().number); + } + assert_eq!(None, headers.next().await); + } + + #[tokio::test] + async fn finalized_headers_come_from_subscription_and_provider_if_in_need() { + let all_heights = 3..11; + let heights_in_subscription = vec![3, 4, 6, 10]; + // Consecutive headers will be requested in the reversed order. + let heights_not_in_subscription = vec![5, 9, 8, 7]; + + let provider = MockHeaderProvider(heights_not_in_subscription.into()); + let subscription = MockHeaderSubscription(heights_in_subscription.into()); + let mut headers = FinalizedHeaders::new(provider, subscription); + + for h in all_heights { + assert_eq!(h, headers.next().await.unwrap().number); + } + assert_eq!(None, headers.next().await); + } +} From 58d3bc67b9595d0a26f5abba88fe4941a6e75b4a Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 5 Sep 2022 13:47:15 +0300 Subject: [PATCH 092/348] beefy: initialize voter from genesis and fix initial sync (#11959) * client/beefy: use backend instead of client where possible * client/beefy: initialize voter from genesis Now that we have justifications import, we can drop the "lean beefy" behaviour and start building justifications chain from Genesis with containing all past sessions' mandatory blocks justifications. * client/beefy: walk finality tree_route to catch session changes * client/beefy: fix block import During initial block import blocks are not finalized, so trying to validate and append justifications within block import fails (for initial network sync imported blocks). Changes: - Move justification validation to _after_ `inner.block_import()`, so block is imported in backend and runtime api can be called to get the BEEFY authorities for said block. - Move append-to-backend for imported BEEFY justification to voter, because it already has the required logic to BEEFY-finalize blocks only after GRANDPA finalized them. - Mark voting rounds as concluded when finalizing through imported justifications as well as when finalizing through voting. * client/beefy: valid justifications are one per block number The only way we'd get _different_ _validated_ justifications for same block number is if authorities are double voting, which will be handled later. * client/beefy: process incoming justifs during major sync * client/beefy: correct voter initialization BEEFY voter should resume voting from either: - last BEEFY finalized block, - session start, whichever is closest to head. * client/beefy: test voter initialization * client/beefy: impl review suggestions Signed-off-by: acatangiu --- client/beefy/src/import.rs | 75 ++------ client/beefy/src/round.rs | 28 ++- client/beefy/src/tests.rs | 12 +- client/beefy/src/worker.rs | 374 +++++++++++++++++++++++++++++-------- 4 files changed, 342 insertions(+), 147 deletions(-) diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 129484199de89..db4d8bfba7450 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -17,12 +17,11 @@ // along with this program. If not, see . use beefy_primitives::{BeefyApi, BEEFY_ENGINE_ID}; -use codec::Encode; -use log::error; +use log::debug; use std::{collections::HashMap, sync::Arc}; use sp_api::{ProvideRuntimeApi, TransactionFor}; -use sp_blockchain::{well_known_cache_keys, HeaderBackend}; +use sp_blockchain::well_known_cache_keys; use sp_consensus::Error as ConsensusError; use sp_runtime::{ generic::BlockId, @@ -97,29 +96,6 @@ where decode_and_verify_finality_proof::(&encoded[..], number, &validator_set) } - - /// Import BEEFY justification: Send it to worker for processing and also append it to backend. - /// - /// This function assumes: - /// - `justification` is verified and valid, - /// - the block referred by `justification` has been imported _and_ finalized. - fn import_beefy_justification_unchecked( - &self, - number: NumberFor, - justification: BeefyVersionedFinalityProof, - ) { - // Append the justification to the block in the backend. - if let Err(e) = self.backend.append_justification( - BlockId::Number(number), - (BEEFY_ENGINE_ID, justification.encode()), - ) { - error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, justification); - } - // Send the justification to the BEEFY voter for processing. - self.justification_sender - .notify(|| Ok::<_, ()>(justification)) - .expect("forwards closure result; the closure always returns Ok; qed."); - } } #[async_trait::async_trait] @@ -147,42 +123,31 @@ where let hash = block.post_hash(); let number = *block.header.number(); - let beefy_proof = block - .justifications - .as_mut() - .and_then(|just| { - let decoded = just - .get(BEEFY_ENGINE_ID) - .map(|encoded| self.decode_and_verify(encoded, number, hash)); - // Remove BEEFY justification from the list before giving to `inner`; - // we will append it to backend ourselves at the end if all goes well. - just.remove(BEEFY_ENGINE_ID); - decoded - }) - .transpose() - .unwrap_or(None); + let beefy_encoded = block.justifications.as_mut().and_then(|just| { + let encoded = just.get(BEEFY_ENGINE_ID).cloned(); + // Remove BEEFY justification from the list before giving to `inner`; we send it to the + // voter (beefy-gadget) and it will append it to the backend after block is finalized. + just.remove(BEEFY_ENGINE_ID); + encoded + }); // Run inner block import. let inner_import_result = self.inner.import_block(block, new_cache).await?; - match (beefy_proof, &inner_import_result) { - (Some(proof), ImportResult::Imported(_)) => { - let status = self.backend.blockchain().info(); - if number <= status.finalized_number && - Some(hash) == - self.backend - .blockchain() - .hash(number) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { + match (beefy_encoded, &inner_import_result) { + (Some(encoded), ImportResult::Imported(_)) => { + if let Ok(proof) = self.decode_and_verify(&encoded, number, hash) { // The proof is valid and the block is imported and final, we can import. - self.import_beefy_justification_unchecked(number, proof); + debug!(target: "beefy", "🥩 import justif {:?} for block number {:?}.", proof, number); + // Send the justification to the BEEFY voter for processing. + self.justification_sender + .notify(|| Ok::<_, ()>(proof)) + .expect("forwards closure result; the closure always returns Ok; qed."); } else { - error!( + debug!( target: "beefy", - "🥩 Cannot import justification: {:?} for, not yet final, block number {:?}", - proof, - number, + "🥩 error decoding justification: {:?} for imported block {:?}", + encoded, number, ); } }, diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index 762a8f7e5d544..c96613eb38a95 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -147,13 +147,8 @@ where trace!(target: "beefy", "🥩 Round #{} done: {}", round.1, done); if done { - // remove this and older (now stale) rounds let signatures = self.rounds.remove(round)?.votes; - self.rounds.retain(|&(_, number), _| number > round.1); - self.mandatory_done = self.mandatory_done || round.1 == self.session_start; - self.best_done = self.best_done.max(Some(round.1)); - debug!(target: "beefy", "🥩 Concluded round #{}", round.1); - + self.conclude(round.1); Some( self.validators() .iter() @@ -165,9 +160,12 @@ where } } - #[cfg(test)] - pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { - self.mandatory_done = done; + pub(crate) fn conclude(&mut self, round_num: NumberFor) { + // Remove this and older (now stale) rounds. + self.rounds.retain(|&(_, number), _| number > round_num); + self.mandatory_done = self.mandatory_done || round_num == self.session_start; + self.best_done = self.best_done.max(Some(round_num)); + debug!(target: "beefy", "🥩 Concluded round #{}", round_num); } } @@ -178,9 +176,19 @@ mod tests { use beefy_primitives::{crypto::Public, ValidatorSet}; - use super::{threshold, RoundTracker, Rounds}; + use super::{threshold, Block as BlockT, Hash, RoundTracker, Rounds}; use crate::keystore::tests::Keyring; + impl Rounds + where + P: Ord + Hash + Clone, + B: BlockT, + { + pub(crate) fn test_set_mandatory_done(&mut self, done: bool) { + self.mandatory_done = done; + } + } + #[test] fn round_tracker() { let mut rt = RoundTracker::default(); diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index f0257d179cb33..e8d32fe3e8127 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -145,7 +145,7 @@ impl BeefyTestNet { }) } - pub(crate) fn generate_blocks( + pub(crate) fn generate_blocks_and_sync( &mut self, count: usize, session_length: u64, @@ -168,6 +168,7 @@ impl BeefyTestNet { block }); + self.block_until_sync(); } } @@ -528,8 +529,7 @@ fn beefy_finalizing_blocks() { runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 42 blocks including `AuthorityChange` digests every 10 blocks. - net.generate_blocks(42, session_len, &validator_set, true); - net.block_until_sync(); + net.generate_blocks_and_sync(42, session_len, &validator_set, true); let net = Arc::new(Mutex::new(net)); @@ -567,8 +567,7 @@ fn lagging_validators() { runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); // push 62 blocks including `AuthorityChange` digests every 30 blocks. - net.generate_blocks(62, session_len, &validator_set, true); - net.block_until_sync(); + net.generate_blocks_and_sync(62, session_len, &validator_set, true); let net = Arc::new(Mutex::new(net)); @@ -644,8 +643,7 @@ fn correct_beefy_payload() { runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); // push 10 blocks - net.generate_blocks(12, session_len, &validator_set, false); - net.block_until_sync(); + net.generate_blocks_and_sync(12, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9f1938fa91c33..9f54a300472de 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -27,11 +27,12 @@ use codec::{Codec, Decode, Encode}; use futures::StreamExt; use log::{debug, error, info, log_enabled, trace, warn}; -use sc_client_api::{Backend, FinalityNotification}; +use sc_client_api::{Backend, FinalityNotification, HeaderBackend}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; +use sp_blockchain::Backend as BlockchainBackend; use sp_consensus::SyncOracle; use sp_mmr_primitives::MmrApi; use sp_runtime::{ @@ -212,7 +213,7 @@ pub(crate) struct BeefyWorker { /// Buffer holding votes for future processing. pending_votes: BTreeMap, Vec, AuthorityId, Signature>>>, /// Buffer holding justifications for future processing. - pending_justifications: BTreeMap, Vec>>, + pending_justifications: BTreeMap, BeefyVersionedFinalityProof>, /// Chooses which incoming votes to accept and which votes to generate. voting_oracle: VoterOracle, } @@ -246,8 +247,9 @@ where min_block_delta, } = worker_params; - let last_finalized_header = client - .expect_header(BlockId::number(client.info().finalized_number)) + let last_finalized_header = backend + .blockchain() + .expect_header(BlockId::number(backend.blockchain().info().finalized_number)) .expect("latest block always has header available; qed."); BeefyWorker { @@ -313,9 +315,8 @@ where new_session_start: NumberFor, ) { debug!(target: "beefy", "🥩 New active validator set: {:?}", validator_set); - metric_set!(self, beefy_validator_set_id, validator_set.id()); - // BEEFY should produce the mandatory block of each session. + // BEEFY should finalize a mandatory block during each session. if let Some(active_session) = self.voting_oracle.rounds_mut() { if !active_session.mandatory_done() { debug!( @@ -334,7 +335,12 @@ where let id = validator_set.id(); self.voting_oracle.add_session(Rounds::new(new_session_start, validator_set)); - info!(target: "beefy", "🥩 New Rounds for validator set id: {:?} with session_start {:?}", id, new_session_start); + metric_set!(self, beefy_validator_set_id, id); + info!( + target: "beefy", + "🥩 New Rounds for validator set id: {:?} with session_start {:?}", + id, new_session_start + ); } fn handle_finality_notification(&mut self, notification: &FinalityNotification) { @@ -345,11 +351,24 @@ where // update best GRANDPA finalized block we have seen self.best_grandpa_block_header = header.clone(); - // Check for and enqueue potential new session. - if let Some(new_validator_set) = find_authorities_change::(header) { - self.init_session_at(new_validator_set, *header.number()); - // TODO: when adding SYNC protocol, fire up a request for justification for this - // mandatory block here. + // Check all (newly) finalized blocks for new session(s). + let backend = self.backend.clone(); + for header in notification + .tree_route + .iter() + .map(|hash| { + backend + .blockchain() + .expect_header(BlockId::hash(*hash)) + .expect("just finalized block should be available; qed.") + }) + .chain(std::iter::once(header.clone())) + { + if let Some(new_validator_set) = find_authorities_change::(&header) { + self.init_session_at(new_validator_set, *header.number()); + // TODO (grandpa-bridge-gadget/issues/20): when adding SYNC protocol, + // fire up a request for justification for this mandatory block here. + } } } } @@ -389,10 +408,10 @@ where let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { - RoundAction::Process => self.finalize(justification), + RoundAction::Process => self.finalize(justification)?, RoundAction::Enqueue => { debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); - self.pending_justifications.entry(block_num).or_default().push(justification) + self.pending_justifications.entry(block_num).or_insert(justification); }, RoundAction::Drop => (), }; @@ -427,15 +446,8 @@ where info!(target: "beefy", "🥩 Round #{} concluded, finality_proof: {:?}.", round.1, finality_proof); - if let Err(e) = self.backend.append_justification( - BlockId::Number(block_num), - (BEEFY_ENGINE_ID, finality_proof.clone().encode()), - ) { - debug!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); - } - // We created the `finality_proof` and know to be valid. - self.finalize(finality_proof); + self.finalize(finality_proof)?; } } Ok(()) @@ -447,19 +459,29 @@ where /// 3. Send best block hash and `finality_proof` to RPC worker. /// /// Expects `finality proof` to be valid. - fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) { + fn finalize(&mut self, finality_proof: BeefyVersionedFinalityProof) -> Result<(), Error> { + let block_num = match finality_proof { + VersionedFinalityProof::V1(ref sc) => sc.commitment.block_number, + }; + + // Conclude voting round for this block. + self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?.conclude(block_num); // Prune any now "finalized" sessions from queue. self.voting_oracle.try_prune(); - let signed_commitment = match finality_proof { - VersionedFinalityProof::V1(ref sc) => sc, - }; - let block_num = signed_commitment.commitment.block_number; + if Some(block_num) > self.best_beefy_block { // Set new best BEEFY block number. self.best_beefy_block = Some(block_num); metric_set!(self, beefy_best_block, block_num); - self.client.hash(block_num).ok().flatten().map(|hash| { + if let Err(e) = self.backend.append_justification( + BlockId::Number(block_num), + (BEEFY_ENGINE_ID, finality_proof.clone().encode()), + ) { + error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); + } + + self.backend.blockchain().hash(block_num).ok().flatten().map(|hash| { self.links .to_rpc_best_block_sender .notify(|| Ok::<_, ()>(hash)) @@ -473,6 +495,7 @@ where } else { debug!(target: "beefy", "🥩 Can't set best beefy to older: {}", block_num); } + Ok(()) } /// Handle previously buffered justifications and votes that now land in the voting interval. @@ -481,10 +504,10 @@ where let _ph = PhantomData::::default(); fn to_process_for( - pending: &mut BTreeMap, Vec>, + pending: &mut BTreeMap, T>, (start, end): (NumberFor, NumberFor), _: PhantomData, - ) -> BTreeMap, Vec> { + ) -> BTreeMap, T> { // These are still pending. let still_pending = pending.split_off(&end.saturating_add(1u32.into())); // These can be processed. @@ -494,21 +517,23 @@ where // Return ones to process. to_handle } + // Interval of blocks for which we can process justifications and votes right now. + let mut interval = self.voting_oracle.accepted_interval(best_grandpa)?; // Process pending justifications. - let interval = self.voting_oracle.accepted_interval(best_grandpa)?; if !self.pending_justifications.is_empty() { let justifs_to_handle = to_process_for(&mut self.pending_justifications, interval, _ph); - for (num, justifications) in justifs_to_handle.into_iter() { - debug!(target: "beefy", "🥩 Handle buffered justifications for: {:?}.", num); - for justif in justifications.into_iter() { - self.finalize(justif); + for (num, justification) in justifs_to_handle.into_iter() { + debug!(target: "beefy", "🥩 Handle buffered justification for: {:?}.", num); + if let Err(err) = self.finalize(justification) { + error!(target: "beefy", "🥩 Error finalizing block: {}", err); } } + // Possibly new interval after processing justifications. + interval = self.voting_oracle.accepted_interval(best_grandpa)?; } // Process pending votes. - let interval = self.voting_oracle.accepted_interval(best_grandpa)?; if !self.pending_votes.is_empty() { let votes_to_handle = to_process_for(&mut self.pending_votes, interval, _ph); for (num, votes) in votes_to_handle.into_iter() { @@ -547,17 +572,20 @@ where debug!(target: "beefy", "🥩 Try voting on {}", target_number); // Most of the time we get here, `target` is actually `best_grandpa`, - // avoid asking `client` for header in that case. + // avoid getting header from backend in that case. let target_header = if target_number == *self.best_grandpa_block_header.number() { self.best_grandpa_block_header.clone() } else { - self.client.expect_header(BlockId::Number(target_number)).map_err(|err| { - let err_msg = format!( - "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", - target_number, err - ); - Error::Backend(err_msg) - })? + self.backend + .blockchain() + .expect_header(BlockId::Number(target_number)) + .map_err(|err| { + let err_msg = format!( + "Couldn't get header for block #{:?} (error: {:?}), skipping vote..", + target_number, err + ); + Error::Backend(err_msg) + })? }; let target_hash = target_header.hash(); @@ -623,7 +651,78 @@ where Ok(()) } + /// Initialize BEEFY voter state. + /// + /// Should be called only once during worker initialization with latest GRANDPA finalized + /// `header` and the validator set `active` at that point. + fn initialize_voter(&mut self, header: &B::Header, active: ValidatorSet) { + // just a sanity check. + if let Some(rounds) = self.voting_oracle.rounds_mut() { + error!( + target: "beefy", + "🥩 Voting session already initialized at: {:?}, validator set id {}.", + rounds.session_start(), + rounds.validator_set_id(), + ); + return + } + + self.best_grandpa_block_header = header.clone(); + if active.id() == GENESIS_AUTHORITY_SET_ID { + // When starting from genesis, there is no session boundary digest. + // Just initialize `rounds` to Block #1 as BEEFY mandatory block. + info!(target: "beefy", "🥩 Initialize voting session at genesis, block 1."); + self.init_session_at(active, 1u32.into()); + } else { + // TODO (issue #11837): persist local progress to avoid following look-up during init. + let blockchain = self.backend.blockchain(); + let mut header = header.clone(); + + // Walk back the imported blocks and initialize voter either, at the last block with + // a BEEFY justification, or at this session's boundary; voter will resume from there. + loop { + if let Some(true) = blockchain + .justifications(BlockId::hash(header.hash())) + .ok() + .flatten() + .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) + { + info!( + target: "beefy", + "🥩 Initialize voting session at last BEEFY finalized block: {:?}.", + *header.number() + ); + self.init_session_at(active, *header.number()); + // Mark the round as already finalized. + if let Some(round) = self.voting_oracle.rounds_mut() { + round.conclude(*header.number()); + } + self.best_beefy_block = Some(*header.number()); + break + } + + if let Some(validator_set) = find_authorities_change::(&header) { + info!( + target: "beefy", + "🥩 Initialize voting session at current session boundary: {:?}.", + *header.number() + ); + self.init_session_at(validator_set, *header.number()); + break + } + + // Move up the chain. + header = self + .client + .expect_header(BlockId::Hash(*header.parent_hash())) + // in case of db failure here we want to kill the worker + .expect("db failure, voter going down."); + } + } + } + /// Wait for BEEFY runtime pallet to be available. + /// Should be called only once during worker initialization. async fn wait_for_runtime_pallet(&mut self) { let mut gossip_engine = &mut self.gossip_engine; let mut finality_stream = self.client.finality_notification_stream().fuse(); @@ -635,25 +734,19 @@ where None => break }; let at = BlockId::hash(notif.header.hash()); - if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { - if active.id() == GENESIS_AUTHORITY_SET_ID { - // When starting from genesis, there is no session boundary digest. - // Just initialize `rounds` to Block #1 as BEEFY mandatory block. - self.init_session_at(active, 1u32.into()); - } - // In all other cases, we just go without `rounds` initialized, meaning the - // worker won't vote until it witnesses a session change. - // Once we'll implement 'initial sync' (catch-up), the worker will be able to - // start voting right away. - self.handle_finality_notification(¬if); - if let Err(err) = self.try_to_vote() { - debug!(target: "beefy", "🥩 {}", err); + if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { + self.initialize_voter(¬if.header, active); + if !self.sync_oracle.is_major_syncing() { + if let Err(err) = self.try_to_vote() { + debug!(target: "beefy", "🥩 {}", err); + } + } + // Beefy pallet available and voter initialized. + break + } else { + trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); + debug!(target: "beefy", "🥩 Waiting for BEEFY pallet to become available..."); } - break - } else { - trace!(target: "beefy", "🥩 Finality notification: {:?}", notif); - debug!(target: "beefy", "🥩 Waiting for BEEFY pallet to become available..."); - } }, _ = gossip_engine => { break @@ -668,7 +761,10 @@ where /// which is driven by finality notifications and gossiped votes. pub(crate) async fn run(mut self) { info!(target: "beefy", "🥩 run BEEFY worker, best grandpa: #{:?}.", self.best_grandpa_block_header.number()); + let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); + self.wait_for_runtime_pallet().await; + trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); let mut finality_notifications = self.client.finality_notification_stream().fuse(); let mut votes = Box::pin( @@ -684,7 +780,6 @@ where }) .fuse(), ); - let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); loop { let mut gossip_engine = &mut self.gossip_engine; @@ -728,17 +823,19 @@ where } } - // Don't bother acting on 'state' changes during major sync. - if !self.sync_oracle.is_major_syncing() { - // Handle pending justifications and/or votes for now GRANDPA finalized blocks. - if let Err(err) = self.try_pending_justif_and_votes() { - debug!(target: "beefy", "🥩 {}", err); - } + // Handle pending justifications and/or votes for now GRANDPA finalized blocks. + if let Err(err) = self.try_pending_justif_and_votes() { + debug!(target: "beefy", "🥩 {}", err); + } + // Don't bother voting during major sync. + if !self.sync_oracle.is_major_syncing() { // There were external events, 'state' is changed, author a vote if needed/possible. if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); } + } else { + debug!(target: "beefy", "🥩 Skipping voting while major syncing."); } } } @@ -845,13 +942,14 @@ pub(crate) mod tests { use futures::{executor::block_on, future::poll_fn, task::Poll}; - use sc_client_api::HeaderBackend; + use sc_client_api::{Backend as BackendT, HeaderBackend}; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; use sp_api::HeaderT; + use sp_blockchain::Backend as BlockchainBackendT; use substrate_test_runtime_client::{ runtime::{Block, Digest, DigestItem, Header, H256}, - Backend, + Backend, ClientExt, }; fn create_beefy_worker( @@ -1166,10 +1264,11 @@ pub(crate) mod tests { } #[test] - fn test_finalize() { + fn should_finalize_correctly() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); let mut net = BeefyTestNet::new(1, 0); + let backend = net.peer(0).client().as_backend(); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); @@ -1198,10 +1297,16 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); let justif = create_finality_proof(1); - worker.finalize(justif.clone()); + // create new session at block #1 + worker.voting_oracle.add_session(Rounds::new(1, validator_set.clone())); + // try to finalize block #1 + worker.finalize(justif.clone()).unwrap(); + // verify block finalized assert_eq!(worker.best_beefy_block, Some(1)); block_on(poll_fn(move |cx| { + // unknown hash -> nothing streamed assert_eq!(best_block_stream.poll_next_unpin(cx), Poll::Pending); + // commitment streamed match finality_proof.poll_next_unpin(cx) { // expect justification Poll::Ready(Some(received)) => assert_eq!(received, justif), @@ -1213,10 +1318,20 @@ pub(crate) mod tests { // generate 2 blocks, try again expect success let (mut best_block_streams, _) = get_beefy_streams(&mut net, keys); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); - net.generate_blocks(2, 10, &validator_set, false); + net.peer(0).push_blocks(2, false); + // finalize 1 and 2 without justifications + backend.finalize_block(BlockId::number(1), None).unwrap(); + backend.finalize_block(BlockId::number(2), None).unwrap(); let justif = create_finality_proof(2); - worker.finalize(justif); + // create new session at block #2 + worker.voting_oracle.add_session(Rounds::new(2, validator_set)); + worker.finalize(justif).unwrap(); + // verify old session pruned + assert_eq!(worker.voting_oracle.sessions.len(), 1); + // new session starting at #2 is in front + assert_eq!(worker.voting_oracle.rounds_mut().unwrap().session_start(), 2); + // verify block finalized assert_eq!(worker.best_beefy_block, Some(2)); block_on(poll_fn(move |cx| { match best_block_stream.poll_next_unpin(cx) { @@ -1229,6 +1344,10 @@ pub(crate) mod tests { } Poll::Ready(()) })); + + // check BEEFY justifications are also appended to backend + let justifs = backend.blockchain().justifications(BlockId::number(2)).unwrap().unwrap(); + assert!(justifs.get(BEEFY_ENGINE_ID).is_some()) } #[test] @@ -1325,4 +1444,109 @@ pub(crate) mod tests { assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 21); assert_eq!(votes.next().unwrap().first().unwrap().commitment.block_number, 22); } + + #[test] + fn should_initialize_correct_voter() { + let keys = &[Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); + let mut net = BeefyTestNet::new(1, 0); + let backend = net.peer(0).client().as_backend(); + + // push 15 blocks with `AuthorityChange` digests every 10 blocks + net.generate_blocks_and_sync(15, 10, &validator_set, false); + // finalize 13 without justifications + net.peer(0) + .client() + .as_client() + .finalize_block(BlockId::number(13), None) + .unwrap(); + + // Test initialization at session boundary. + { + let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + + // initialize voter at block 13, expect rounds initialized at session_start = 10 + let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); + worker.initialize_voter(&header, validator_set.clone()); + + // verify voter initialized with single session starting at block 10 + assert_eq!(worker.voting_oracle.sessions.len(), 1); + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(rounds.session_start(), 10); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + + // verify next vote target is mandatory block 10 + assert_eq!(worker.best_beefy_block, None); + assert_eq!(*worker.best_grandpa_block_header.number(), 13); + assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(10)); + } + + // Test corner-case where session boundary == last beefy finalized. + { + let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + + // import/append BEEFY justification for session boundary block 10 + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: 10, + validator_set_id: validator_set.id(), + }; + let justif = VersionedFinalityProof::<_, Signature>::V1(SignedCommitment { + commitment, + signatures: vec![None], + }); + backend + .append_justification(BlockId::Number(10), (BEEFY_ENGINE_ID, justif.encode())) + .unwrap(); + + // initialize voter at block 13, expect rounds initialized at last beefy finalized 10 + let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); + worker.initialize_voter(&header, validator_set.clone()); + + // verify voter initialized with single session starting at block 10 + assert_eq!(worker.voting_oracle.sessions.len(), 1); + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(rounds.session_start(), 10); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + + // verify next vote target is mandatory block 10 + assert_eq!(worker.best_beefy_block, Some(10)); + assert_eq!(*worker.best_grandpa_block_header.number(), 13); + assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(12)); + } + + // Test initialization at last BEEFY finalized. + { + let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); + + // import/append BEEFY justification for block 12 + let commitment = Commitment { + payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + block_number: 12, + validator_set_id: validator_set.id(), + }; + let justif = VersionedFinalityProof::<_, Signature>::V1(SignedCommitment { + commitment, + signatures: vec![None], + }); + backend + .append_justification(BlockId::Number(12), (BEEFY_ENGINE_ID, justif.encode())) + .unwrap(); + + // initialize voter at block 13, expect rounds initialized at last beefy finalized 12 + let header = backend.blockchain().header(BlockId::number(13)).unwrap().unwrap(); + worker.initialize_voter(&header, validator_set.clone()); + + // verify voter initialized with single session starting at block 12 + assert_eq!(worker.voting_oracle.sessions.len(), 1); + let rounds = worker.voting_oracle.rounds_mut().unwrap(); + assert_eq!(rounds.session_start(), 12); + assert_eq!(rounds.validator_set_id(), validator_set.id()); + + // verify next vote target is 13 + assert_eq!(worker.best_beefy_block, Some(12)); + assert_eq!(*worker.best_grandpa_block_header.number(), 13); + assert_eq!(worker.voting_oracle.voting_target(worker.best_beefy_block, 13), Some(13)); + } + } } From 6d84315348d1fca9ca59454b9f37411c80e05ab4 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Mon, 5 Sep 2022 15:04:14 +0100 Subject: [PATCH 093/348] drive by spelling fixes (#12175) --- frame/assets/src/types.rs | 2 +- frame/support/src/traits/metadata.rs | 2 +- frame/support/src/traits/tokens/fungibles/approvals.rs | 2 +- frame/support/src/traits/tokens/fungibles/balanced.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs index 2e8a1f911fb0f..677fc5847c614 100644 --- a/frame/assets/src/types.rs +++ b/frame/assets/src/types.rs @@ -165,7 +165,7 @@ pub trait FrozenBalance { /// /// Under normal behaviour, the account balance should not go below the sum of this (if `Some`) /// and the asset's minimum balance. However, the account balance may reasonably begin below - /// this sum (e.g. if less than the sum had ever been transfered into the account). + /// this sum (e.g. if less than the sum had ever been transferred into the account). /// /// In special cases (privileged intervention) the account balance may also go below the sum. /// diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index b0dd5bd5160b4..42f2d759a597d 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -71,7 +71,7 @@ pub trait PalletsInfoAccess { /// /// You probably don't want this function but `infos()` instead. fn count() -> usize { - // for backwards compatibility with XCM-3, Mark is deprecated. + // for backwards compatibility with XCM-3, Mark as deprecated. Self::infos().len() } diff --git a/frame/support/src/traits/tokens/fungibles/approvals.rs b/frame/support/src/traits/tokens/fungibles/approvals.rs index 7a08f11cf042a..48929955d9497 100644 --- a/frame/support/src/traits/tokens/fungibles/approvals.rs +++ b/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -24,7 +24,7 @@ pub trait Inspect: super::Inspect { } pub trait Mutate: Inspect { - // Aprove a delegate account to spend an amount of tokens owned by an owner + // Approve a delegate account to spend an amount of tokens owned by an owner fn approve( asset: Self::AssetId, owner: &AccountId, diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs index a75832e4c440f..a870168e4db91 100644 --- a/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -163,7 +163,7 @@ pub trait Balanced: Inspect { /// **WARNING** /// Do not use this directly unless you want trouble, since it allows you to alter account balances /// without keeping the issuance up to date. It has no safeguards against accidentally creating -/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// token imbalances in your system leading to accidental inflation or deflation. It's really just /// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to /// use. pub trait Unbalanced: Inspect { From 19e628072cfb9b9651767b50bfaff4841b4141a3 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 5 Sep 2022 17:06:20 +0300 Subject: [PATCH 094/348] Enforce blocks response limit (#12146) --- client/network/sync/src/block_request_handler.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index f4f10ac73c8d9..cc61be2b57256 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -405,11 +405,20 @@ where indexed_body, }; - total_size += block_data.body.iter().map(|ex| ex.len()).sum::(); - total_size += block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); + let new_total_size = total_size + + block_data.body.iter().map(|ex| ex.len()).sum::() + + block_data.indexed_body.iter().map(|ex| ex.len()).sum::(); + + // Send at least one block, but make sure to not exceed the limit. + if !blocks.is_empty() && new_total_size > MAX_BODY_BYTES { + break + } + + total_size = new_total_size; + blocks.push(block_data); - if blocks.len() >= max_blocks as usize || total_size > MAX_BODY_BYTES { + if blocks.len() >= max_blocks as usize { break } From fd399c15c33ed8a7215d711fcf93c524a7473023 Mon Sep 17 00:00:00 2001 From: Stephen Shelton Date: Mon, 5 Sep 2022 08:37:25 -0600 Subject: [PATCH 095/348] Impl transaction-payment GenesisConfig with NextFeeMultplier support (#12177) * Impl GenesisConfig with NextFeeMultplier support * Update lib.rs * Use documented const * Unit test multiplier genesis * fmt Co-authored-by: Shawn Tabrizi --- frame/transaction-payment/src/lib.rs | 54 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 5 deletions(-) diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 8fe58dbf03d2b..21d516cda1aa6 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -241,6 +241,10 @@ impl Default for Releases { } } +/// Default value for NextFeeMultiplier. This is used in genesis and is also used in +/// NextFeeMultiplierOnEmpty() to provide a value when none exists in storage. +const MULTIPLIER_DEFAULT_VALUE: Multiplier = Multiplier::from_u32(1); + #[frame_support::pallet] pub mod pallet { use super::*; @@ -300,7 +304,7 @@ pub mod pallet { #[pallet::type_value] pub fn NextFeeMultiplierOnEmpty() -> Multiplier { - Multiplier::saturating_from_integer(1) + MULTIPLIER_DEFAULT_VALUE } #[pallet::storage] @@ -312,12 +316,14 @@ pub mod pallet { pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; #[pallet::genesis_config] - pub struct GenesisConfig; + pub struct GenesisConfig { + pub multiplier: Multiplier, + } #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self + Self { multiplier: MULTIPLIER_DEFAULT_VALUE } } } @@ -325,6 +331,7 @@ pub mod pallet { impl GenesisBuild for GenesisConfig { fn build(&self) { StorageVersion::::put(Releases::V2); + NextFeeMultiplier::::put(self.multiplier); } } @@ -816,7 +823,7 @@ mod tests { use frame_support::{ assert_noop, assert_ok, parameter_types, - traits::{ConstU32, ConstU64, Currency, Imbalance, OnUnbalanced}, + traits::{ConstU32, ConstU64, Currency, GenesisBuild, Imbalance, OnUnbalanced}, weights::{ DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight, WeightToFee as WeightToFeeT, @@ -958,11 +965,18 @@ mod tests { base_weight: Weight, byte_fee: u64, weight_to_fee: u64, + initial_multiplier: Option, } impl Default for ExtBuilder { fn default() -> Self { - Self { balance_factor: 1, base_weight: Weight::zero(), byte_fee: 1, weight_to_fee: 1 } + Self { + balance_factor: 1, + base_weight: Weight::zero(), + byte_fee: 1, + weight_to_fee: 1, + initial_multiplier: None, + } } } @@ -983,6 +997,10 @@ mod tests { self.balance_factor = factor; self } + pub fn with_initial_multiplier(mut self, multiplier: Multiplier) -> Self { + self.initial_multiplier = Some(multiplier); + self + } fn set_constants(&self) { EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); @@ -1007,6 +1025,12 @@ mod tests { } .assimilate_storage(&mut t) .unwrap(); + + if let Some(multiplier) = self.initial_multiplier { + let genesis = pallet::GenesisConfig { multiplier }; + GenesisBuild::::assimilate_storage(&genesis, &mut t).unwrap(); + } + t.into() } } @@ -1720,4 +1744,24 @@ mod tests { assert_eq!(refund_based_fee, actual_fee); }); } + + #[test] + fn genesis_config_works() { + ExtBuilder::default() + .with_initial_multiplier(Multiplier::from_u32(100)) + .build() + .execute_with(|| { + assert_eq!( + >::get(), + Multiplier::saturating_from_integer(100) + ); + }); + } + + #[test] + fn genesis_default_works() { + ExtBuilder::default().build().execute_with(|| { + assert_eq!(>::get(), Multiplier::saturating_from_integer(1)); + }); + } } From 5f18a8b729894d0dcc34d6a7c269d62aa593d910 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Mon, 5 Sep 2022 17:08:50 +0200 Subject: [PATCH 096/348] Alliance pallet: split force_set_members call (#12179) * Alliance pallet: split force_set_members call * use counts for event * ".git/.scripts/bench-bot.sh" pallet dev pallet_alliance Co-authored-by: command-bot <> --- bin/node/runtime/src/impls.rs | 12 +- frame/alliance/README.md | 3 +- frame/alliance/src/benchmarking.rs | 175 +++++----------- frame/alliance/src/lib.rs | 142 ++++++------- frame/alliance/src/mock.rs | 21 +- frame/alliance/src/tests.rs | 186 +++++++---------- frame/alliance/src/types.rs | 22 +- frame/alliance/src/weights.rs | 309 +++++++++++++++-------------- 8 files changed, 358 insertions(+), 512 deletions(-) diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 0d0f864097af5..cedc945b0af7e 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -18,8 +18,7 @@ //! Some configurable implementations as associated type for the substrate runtime. use crate::{ - AccountId, AllianceCollective, AllianceMotion, Assets, Authorship, Balances, Call, Hash, - NegativeImbalance, Runtime, + AccountId, AllianceMotion, Assets, Authorship, Balances, Call, Hash, NegativeImbalance, Runtime, }; use frame_support::{ pallet_prelude::*, @@ -113,15 +112,6 @@ impl ProposalProvider for AllianceProposalProvider { fn proposal_of(proposal_hash: Hash) -> Option { AllianceMotion::proposal_of(proposal_hash) } - - fn proposals() -> Vec { - AllianceMotion::proposals().into_inner() - } - - fn proposals_count() -> u32 { - pallet_collective::Proposals::::decode_len().unwrap_or(0) - as u32 - } } #[cfg(test)] diff --git a/frame/alliance/README.md b/frame/alliance/README.md index 3fcd40b9527ce..dff9e0a47aa2c 100644 --- a/frame/alliance/README.md +++ b/frame/alliance/README.md @@ -66,4 +66,5 @@ to update the Alliance's rule and make announcements. #### Root Calls -- `force_set_members` - Set the members via chain governance. +- `init_members` - Initialize the Alliance, onboard founders, fellows, and allies. +- `disband` - Disband the Alliance, remove all active members and unreserve deposits. diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index d3917b9d2ed95..de2fc392db8f1 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -19,6 +19,7 @@ use sp_runtime::traits::{Bounded, Hash, StaticLookup}; use sp_std::{ + cmp, convert::{TryFrom, TryInto}, mem::size_of, prelude::*, @@ -38,11 +39,6 @@ fn assert_last_event, I: 'static>(generic_event: >:: frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn assert_prev_event, I: 'static>(generic_event: >::Event) { - let events = frame_system::Pallet::::events(); - assert_eq!(events.get(events.len() - 2).expect("events expected").event, generic_event.into()); -} - fn cid(input: impl AsRef<[u8]>) -> Cid { use sha2::{Digest, Sha256}; let mut hasher = Sha256::new(); @@ -126,12 +122,11 @@ benchmarks_instance_pallet! { let proposer = founders[0].clone(); let fellows = (0 .. y).map(fellow::).collect::>(); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; let threshold = m; @@ -178,12 +173,11 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; // Threshold is 1 less than the number of members so that one person can vote nay @@ -247,12 +241,11 @@ benchmarks_instance_pallet! { let founders = (0 .. m).map(founder::).collect::>(); let vetor = founders[0].clone(); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, vec![], vec![], - Default::default(), )?; // Threshold is one less than total members so that two nays will disapprove the vote @@ -299,12 +292,11 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; let proposer = members[0].clone(); @@ -385,12 +377,11 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; let proposer = members[0].clone(); @@ -477,12 +468,11 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; let proposer = members[0].clone(); @@ -554,12 +544,11 @@ benchmarks_instance_pallet! { members.extend(founders.clone()); members.extend(fellows.clone()); - Alliance::::force_set_members( + Alliance::::init_members( SystemOrigin::Root.into(), founders, fellows, vec![], - Default::default(), )?; let proposer = members[0].clone(); @@ -615,124 +604,21 @@ benchmarks_instance_pallet! { assert_eq!(T::ProposalProvider::proposal_of(last_hash), None); } - force_set_members { + init_members { // at least 1 founders let x in 1 .. T::MaxFounders::get(); let y in 0 .. T::MaxFellows::get(); let z in 0 .. T::MaxAllies::get(); - let p in 0 .. T::MaxProposals::get(); - let c in 0 .. T::MaxFounders::get() + T::MaxFellows::get(); - let m in 0 .. T::MaxAllies::get(); let mut founders = (0 .. x).map(founder::).collect::>(); - let mut proposer = founders[0].clone(); let mut fellows = (0 .. y).map(fellow::).collect::>(); let mut allies = (0 .. z).map(ally::).collect::>(); - let witness = ForceSetWitness{ - proposals: p, - voting_members: c, - ally_members: m, - }; - let mut old_fellows: Vec = Vec::new(); - let mut old_allies: Vec = Vec::new(); - - let mut cc = c; - if (m > 0 && c == 0) || (p > 0 && c == 0) { - // if total member count `m` greater than zero, - // one voting member required to set non voting members. - // OR - // if some proposals, - // one voting member required to create proposal. - cc = 1; - } - - // setting the Alliance to disband on the benchmark call - if cc > 0 { - old_fellows = (0..cc).map(fellow::).collect::>(); - old_allies = (0..m).map(ally::).collect::>(); - - // used later for proposal creation. - proposer = old_fellows[0].clone(); - - // set alliance before benchmarked call to include alliance disband. - Alliance::::force_set_members( - SystemOrigin::Root.into(), - vec![old_fellows[0].clone()], - vec![], - vec![], - Default::default(), - )?; - - // using `join_alliance` instead `force_set_members` to join alliance - // to have deposit reserved and bench the worst case scenario. - for fellow in old_fellows.iter().skip(1) { - Alliance::::join_alliance( - SystemOrigin::Signed(fellow.clone()).into() - ).unwrap(); - } - - // elevating allies to have desired voting members count. - for fellow in old_fellows.iter().skip(1) { - Alliance::::elevate_ally( - T::MembershipManager::successful_origin(), - T::Lookup::unlookup(fellow.clone()) - ).unwrap(); - } - - for ally in old_allies.iter() { - Alliance::::join_alliance( - SystemOrigin::Signed(ally.clone()).into() - ).unwrap(); - } - - assert_eq!(Alliance::::voting_members_count(), cc); - assert_eq!(Alliance::::ally_members_count(), m); - } - // adding proposals to veto on the Alliance reset - for i in 0..p { - let threshold = cc; - let bytes_in_storage = i + size_of::() as u32 + 32; - // proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = - AllianceCall::::set_rule { rule: rule(vec![i as u8; i as usize]) }.into(); - Alliance::::propose( - SystemOrigin::Signed(proposer.clone()).into(), - threshold, - Box::new(proposal), - bytes_in_storage, - )?; - } - let mut proposals = T::ProposalProvider::proposals(); - if c != cc { - // removing a helper founder from the alliance which should not be - // included in the actual benchmark call. - Alliance::::give_retirement_notice( - SystemOrigin::Signed(proposer.clone()).into() - )?; - System::::set_block_number( - System::::block_number() + T::RetirementPeriod::get() - ); - Alliance::::retire( - SystemOrigin::Signed(proposer.clone()).into() - )?; - // remove a helper founder from fellows list. - old_fellows.remove(0); - } - }: _(SystemOrigin::Root, founders.clone(), fellows.clone(), allies.clone(), witness) + }: _(SystemOrigin::Root, founders.clone(), fellows.clone(), allies.clone()) verify { founders.sort(); fellows.sort(); allies.sort(); - if !witness.is_zero() { - old_fellows.append(&mut old_allies); - old_fellows.sort(); - proposals.sort(); - assert_prev_event::(Event::AllianceDisbanded { - members: old_fellows, - proposals: proposals, - }.into()); - } assert_last_event::(Event::MembersInitialized { founders: founders.clone(), fellows: fellows.clone(), @@ -743,6 +629,47 @@ benchmarks_instance_pallet! { assert_eq!(Alliance::::members(MemberRole::Ally), allies); } + disband { + // at least 1 founders + let x in 1 .. T::MaxFounders::get() + T::MaxFellows::get(); + let y in 0 .. T::MaxAllies::get(); + let z in 0 .. T::MaxMembersCount::get() / 2; + + let voting_members = (0 .. x).map(founder::).collect::>(); + let allies = (0 .. y).map(ally::).collect::>(); + let witness = DisbandWitness{ + voting_members: x, + ally_members: y, + }; + + // setting the Alliance to disband on the benchmark call + Alliance::::init_members( + SystemOrigin::Root.into(), + voting_members.clone(), + vec![], + allies.clone(), + )?; + + // reserve deposits + let deposit = T::AllyDeposit::get(); + for member in voting_members.iter().chain(allies.iter()).take(z as usize) { + T::Currency::reserve(&member, deposit)?; + >::insert(&member, deposit); + } + + assert_eq!(Alliance::::voting_members_count(), x); + assert_eq!(Alliance::::ally_members_count(), y); + }: _(SystemOrigin::Root, witness) + verify { + assert_last_event::(Event::AllianceDisbanded { + voting_members: x, + ally_members: y, + unreserved: cmp::min(z, x + y), + }.into()); + + assert!(!Alliance::::is_initialized()); + } + set_rule { set_members::(); diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index b60077c4ef99e..8f39b6b51cabd 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -84,7 +84,8 @@ //! //! #### Root Calls //! -//! - `force_set_members` - Set the members via chain governance. +//! - `init_members` - Initialize the Alliance, onboard founders, fellows, and allies. +//! - `disband` - Disband the Alliance, remove all active members and unreserve deposits. #![cfg_attr(not(feature = "std"), no_std)] @@ -204,14 +205,6 @@ pub trait ProposalProvider { /// Return a proposal of the given hash. fn proposal_of(proposal_hash: Hash) -> Option; - - /// Return hashes of all active proposals. - fn proposals() -> Vec; - - // Return count of all active proposals. - // - // Used to check witness data for an extrinsic. - fn proposals_count() -> u32; } /// The various roles that a member can hold. @@ -428,8 +421,8 @@ pub mod pallet { UnscrupulousItemAdded { items: Vec> }, /// Accounts or websites have been removed from the list of unscrupulous items. UnscrupulousItemRemoved { items: Vec> }, - /// Alliance disbanded. - AllianceDisbanded { members: Vec, proposals: Vec }, + /// Alliance disbanded. Includes number deleted members and unreserved deposits. + AllianceDisbanded { voting_members: u32, ally_members: u32, unreserved: u32 }, } #[pallet::genesis_config] @@ -647,81 +640,25 @@ pub mod pallet { Ok(info.into()) } - /// Initialize the founders, fellows, and allies. - /// Founders must be provided to initialize the Alliance. - /// - /// Provide witness data to disband current Alliance before initializing new. - /// Alliance must be empty or disband first to initialize new. - /// - /// Alliance is only disbanded if new member set is not provided. + /// Initialize the Alliance, onboard founders, fellows, and allies. /// + /// Founders must be not empty. + /// The Alliance must be empty. /// Must be called by the Root origin. - #[pallet::weight(T::WeightInfo::force_set_members( - T::MaxFounders::get(), - T::MaxFellows::get(), - T::MaxAllies::get(), - witness.proposals, - witness.voting_members, - witness.ally_members, + #[pallet::weight(T::WeightInfo::init_members( + founders.len() as u32, + fellows.len() as u32, + allies.len() as u32, ))] - pub fn force_set_members( + pub fn init_members( origin: OriginFor, founders: Vec, fellows: Vec, allies: Vec, - witness: ForceSetWitness, ) -> DispatchResult { ensure_root(origin)?; - if !witness.is_zero() { - // Disband Alliance by removing all members and returning deposits. - // Veto and remove all active proposals to avoid any unexpected behavior from - // actionable items managed outside of the pallet. Items managed within the pallet, - // like `UnscrupulousWebsites`, are left for the new Alliance to clean up or keep. - - ensure!( - T::ProposalProvider::proposals_count() <= witness.proposals, - Error::::BadWitness - ); - ensure!( - Self::voting_members_count() <= witness.voting_members, - Error::::BadWitness - ); - ensure!( - Self::ally_members_count() <= witness.ally_members, - Error::::BadWitness - ); - - let mut proposals = T::ProposalProvider::proposals(); - for hash in proposals.iter() { - T::ProposalProvider::veto_proposal(*hash); - } - - let mut members = Self::voting_members(); - T::MembershipChanged::change_members_sorted(&[], &members, &[]); - - members.append(&mut Self::members_of(MemberRole::Ally)); - for member in members.iter() { - if let Some(deposit) = DepositOf::::take(&member) { - let err_amount = T::Currency::unreserve(&member, deposit); - debug_assert!(err_amount.is_zero()); - } - } - - Members::::remove(&MemberRole::Founder); - Members::::remove(&MemberRole::Fellow); - Members::::remove(&MemberRole::Ally); - - members.sort(); - proposals.sort(); - Self::deposit_event(Event::AllianceDisbanded { members, proposals }); - } - - if founders.is_empty() { - ensure!(fellows.is_empty() && allies.is_empty(), Error::::FoundersMissing); - // new members set not provided. - return Ok(()) - } + ensure!(!founders.is_empty(), Error::::FoundersMissing); ensure!(!Self::is_initialized(), Error::::AllianceAlreadyInitialized); let mut founders: BoundedVec = @@ -765,6 +702,59 @@ pub mod pallet { Ok(()) } + /// Disband the Alliance, remove all active members and unreserve deposits. + /// + /// Witness data must be set. + #[pallet::weight(T::WeightInfo::disband( + witness.voting_members, + witness.ally_members, + witness.voting_members + witness.ally_members, + ))] + pub fn disband( + origin: OriginFor, + witness: DisbandWitness, + ) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + + ensure!(!witness.is_zero(), Error::::BadWitness); + ensure!( + Self::voting_members_count() <= witness.voting_members, + Error::::BadWitness + ); + ensure!(Self::ally_members_count() <= witness.ally_members, Error::::BadWitness); + ensure!(Self::is_initialized(), Error::::AllianceNotYetInitialized); + + let voting_members = Self::voting_members(); + T::MembershipChanged::change_members_sorted(&[], &voting_members, &[]); + + let ally_members = Self::members_of(MemberRole::Ally); + let mut unreserve_count: u32 = 0; + for member in voting_members.iter().chain(ally_members.iter()) { + if let Some(deposit) = DepositOf::::take(&member) { + let err_amount = T::Currency::unreserve(&member, deposit); + debug_assert!(err_amount.is_zero()); + unreserve_count += 1; + } + } + + Members::::remove(&MemberRole::Founder); + Members::::remove(&MemberRole::Fellow); + Members::::remove(&MemberRole::Ally); + + Self::deposit_event(Event::AllianceDisbanded { + voting_members: voting_members.len() as u32, + ally_members: ally_members.len() as u32, + unreserved: unreserve_count, + }); + + Ok(Some(T::WeightInfo::disband( + voting_members.len() as u32, + ally_members.len() as u32, + unreserve_count, + )) + .into()) + } + /// Set a new IPFS CID to the alliance rule. #[pallet::weight(T::WeightInfo::set_rule())] pub fn set_rule(origin: OriginFor, rule: Cid) -> DispatchResult { diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index f2632ec3db08c..c140c040fbd0f 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -197,14 +197,6 @@ impl ProposalProvider for AllianceProposalProvider { fn proposal_of(proposal_hash: H256) -> Option { AllianceMotion::proposal_of(proposal_hash) } - - fn proposals() -> Vec { - AllianceMotion::proposals().into_inner() - } - - fn proposals_count() -> u32 { - pallet_collective::Proposals::::decode_len().unwrap_or(0) as u32 - } } parameter_types! { @@ -326,13 +318,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { Error::::AllianceNotYetInitialized ); - assert_ok!(Alliance::force_set_members( - Origin::root(), - vec![1, 2], - vec![3], - vec![], - Default::default() - )); + assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); System::set_block_number(1); }); @@ -369,8 +355,3 @@ pub fn make_proposal(proposal: Call) -> (Call, u32, H256) { let hash = BlakeTwo256::hash_of(&proposal); (proposal, len, hash) } - -pub fn assert_prev_event(event: Event) { - let events = System::events(); - assert_eq!(events.get(events.len() - 2).expect("events expected").event, event); -} diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 9c61561d7ddb5..943c39a2d69c0 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -26,18 +26,56 @@ use crate::mock::*; type AllianceMotionEvent = pallet_collective::Event; #[test] -fn force_set_members_works() { +fn init_members_works() { new_test_ext().execute_with(|| { - // ensure alliance is set - assert_eq!(Alliance::voting_members_sorted(), vec![1, 2, 3]); + // alliance must be reset first, no witness data + assert_noop!( + Alliance::init_members(Origin::root(), vec![8], vec![], vec![],), + Error::::AllianceAlreadyInitialized, + ); - // creating and proposing proposals - let (proposal, proposal_len, hash) = make_remark_proposal(42); - assert_ok!(Alliance::propose(Origin::signed(1), 3, Box::new(proposal), proposal_len)); + // give a retirement notice to check later a retiring member not removed + assert_ok!(Alliance::give_retirement_notice(Origin::signed(2))); + assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); + + // disband the Alliance to init new + assert_ok!(Alliance::disband(Origin::root(), DisbandWitness::new(2, 0))); + + // fails without root + assert_noop!(Alliance::init_members(Origin::signed(1), vec![], vec![], vec![]), BadOrigin); + + // founders missing, other members given + assert_noop!( + Alliance::init_members(Origin::root(), vec![], vec![4], vec![2],), + Error::::FoundersMissing, + ); + + // success call + assert_ok!(Alliance::init_members(Origin::root(), vec![8, 5], vec![4], vec![2],)); - let (k_proposal, k_proposal_len, k_hash) = make_kick_member_proposal(2); - assert_ok!(Alliance::propose(Origin::signed(1), 3, Box::new(k_proposal), k_proposal_len)); - let mut proposals = vec![hash, k_hash]; + // assert new set of voting members + assert_eq!(Alliance::voting_members_sorted(), vec![4, 5, 8]); + // assert new members member + assert!(Alliance::is_founder(&8)); + assert!(Alliance::is_founder(&5)); + assert!(Alliance::is_fellow(&4)); + assert!(Alliance::is_ally(&2)); + // assert a retiring member from previous Alliance not removed + assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); + + System::assert_last_event(mock::Event::Alliance(crate::Event::MembersInitialized { + founders: vec![5, 8], + fellows: vec![4], + allies: vec![2], + })); + }) +} + +#[test] +fn disband_works() { + new_test_ext().execute_with(|| { + // ensure alliance is set + assert_eq!(Alliance::voting_members_sorted(), vec![1, 2, 3]); // give a retirement notice to check later a retiring member not removed assert_ok!(Alliance::give_retirement_notice(Origin::signed(2))); @@ -50,124 +88,46 @@ fn force_set_members_works() { assert_eq!(Balances::free_balance(9), 15); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); - // ensure proposal is listed as active proposal - assert_eq!(::ProposalProvider::proposals(), proposals); - assert_eq!(::ProposalProvider::proposals_count(), 2); - // fails without root - assert_noop!( - Alliance::force_set_members( - Origin::signed(1), - vec![], - vec![], - vec![], - Default::default() - ), - BadOrigin - ); + assert_noop!(Alliance::disband(Origin::signed(1), Default::default()), BadOrigin); - // nothing to do, witness data is default, new members not provided. - assert_ok!(Alliance::force_set_members( - Origin::root(), - vec![], - vec![], - vec![], - Default::default() - )); - - // alliance must be reset first, no witness data. + // bad witness data checks assert_noop!( - Alliance::force_set_members( - Origin::root(), - vec![8], - vec![], - vec![], - Default::default() - ), - Error::::AllianceAlreadyInitialized, + Alliance::disband(Origin::root(), Default::default(),), + Error::::BadWitness ); - // wrong witness data checks - assert_noop!( - Alliance::force_set_members( - Origin::root(), - vec![], - vec![], - vec![], - ForceSetWitness::new(1, 3, 1) - ), - Error::::BadWitness, - ); assert_noop!( - Alliance::force_set_members( - Origin::root(), - vec![], - vec![], - vec![], - ForceSetWitness::new(2, 1, 1) - ), + Alliance::disband(Origin::root(), DisbandWitness::new(1, 1)), Error::::BadWitness, ); assert_noop!( - Alliance::force_set_members( - Origin::root(), - vec![], - vec![], - vec![], - ForceSetWitness::new(1, 3, 0) - ), + Alliance::disband(Origin::root(), DisbandWitness::new(2, 0)), Error::::BadWitness, ); - // founders missing, other members given - assert_noop!( - Alliance::force_set_members( - Origin::root(), - vec![], - vec![4], - vec![2], - ForceSetWitness::new(2, 3, 1) - ), - Error::::FoundersMissing, - ); - // success call - assert_ok!(Alliance::force_set_members( - Origin::root(), - vec![8, 5], - vec![4], - vec![2], - ForceSetWitness::new(2, 3, 1) - )); + assert_ok!(Alliance::disband(Origin::root(), DisbandWitness::new(2, 1))); - // assert new set of voting members - assert_eq!(Alliance::voting_members_sorted(), vec![4, 5, 8]); - // assert new ally member - assert!(Alliance::is_ally(&2)); - // assert a retiring member from previous Alliance not removed - assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - // assert old alliance disband. + // assert members disband assert!(!Alliance::is_member(&1)); - assert!(!Alliance::is_member(&3)); - assert!(!Alliance::is_member(&9)); + assert!(!Alliance::is_initialized()); + // assert a retiring member from the previous Alliance not removed + assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); // deposit unreserved assert_eq!(Balances::free_balance(9), 40); - // all proposals are removed - assert_eq!(::ProposalProvider::proposals(), vec![]); - assert_eq!(::ProposalProvider::proposals_count(), 0); - - // assert events - proposals.sort(); - assert_prev_event(mock::Event::Alliance(crate::Event::AllianceDisbanded { - members: vec![1, 3, 9], - proposals, - })); - System::assert_last_event(mock::Event::Alliance(crate::Event::MembersInitialized { - founders: vec![5, 8], - fellows: vec![4], - allies: vec![2], + System::assert_last_event(mock::Event::Alliance(crate::Event::AllianceDisbanded { + voting_members: 2, + ally_members: 1, + unreserved: 1, })); + + // the Alliance must be set first + assert_noop!( + Alliance::disband(Origin::root(), DisbandWitness::new(100, 100)), + Error::::AllianceNotYetInitialized, + ); }) } @@ -588,14 +548,10 @@ fn assert_powerless(user: Origin) { let cid = test_cid(); let (proposal, _, _) = make_kick_member_proposal(42); + assert_noop!(Alliance::init_members(user.clone(), vec![], vec![], vec![],), BadOrigin); + assert_noop!( - Alliance::force_set_members( - user.clone(), - vec![], - vec![], - vec![], - ForceSetWitness { voting_members: 3, ..Default::default() } - ), + Alliance::disband(user.clone(), DisbandWitness { voting_members: 3, ..Default::default() }), BadOrigin ); diff --git a/frame/alliance/src/types.rs b/frame/alliance/src/types.rs index a7fa9f83e4cf3..90f7ce41b9613 100644 --- a/frame/alliance/src/types.rs +++ b/frame/alliance/src/types.rs @@ -94,32 +94,28 @@ impl Cid { } } -/// Witness data for the `force_set_members` call. -/// Relevant only if executed on an initialized alliance to reset it. +/// Witness data for the `disband` call. #[derive( Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen, TypeInfo, Default, )] -pub struct ForceSetWitness { - /// Number of active proposals which will be vetoed and removed. - #[codec(compact)] - pub(super) proposals: u32, - /// Total number of voting members in the current alliance. +pub struct DisbandWitness { + /// Total number of voting members in the current Alliance. #[codec(compact)] pub(super) voting_members: u32, - /// Total number of ally members in the current alliance. + /// Total number of ally members in the current Alliance. #[codec(compact)] pub(super) ally_members: u32, } #[cfg(test)] -impl ForceSetWitness { - // Creates new ForceSetWitness. - pub(super) fn new(proposals: u32, voting_members: u32, ally_members: u32) -> Self { - Self { proposals, voting_members, ally_members } +impl DisbandWitness { + // Creates new DisbandWitness. + pub(super) fn new(voting_members: u32, ally_members: u32) -> Self { + Self { voting_members, ally_members } } } -impl ForceSetWitness { +impl DisbandWitness { pub(super) fn is_zero(self) -> bool { self == Self::default() } diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 26946c71bf314..9e2ee5681aa99 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-05, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -53,7 +53,8 @@ pub trait WeightInfo { fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight; fn close_disapproved(x: u32, y: u32, p: u32, ) -> Weight; fn close_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight; - fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight; + fn init_members(x: u32, y: u32, z: u32, ) -> Weight; + fn disband(x: u32, y: u32, z: u32, ) -> Weight; fn set_rule() -> Weight; fn announce() -> Weight; fn remove_announcement() -> Weight; @@ -80,13 +81,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(37_864_000 as u64) - // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(34_420_000 as u64) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(145_000 as u64).saturating_mul(x as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(y as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(197_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -95,9 +96,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(46_813_000 as u64) + Weight::from_ref_time(48_443_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(y as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -107,9 +108,9 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_316_000 as u64) + Weight::from_ref_time(35_056_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(171_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -122,13 +123,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_245_000 as u64) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(36_929_000 as u64) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(287_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -141,16 +142,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_088_000 as u64) + fn close_early_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(48_085_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as u64).saturating_mul(x as u64)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(100_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(210_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -164,11 +163,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_374_000 as u64) + Weight::from_ref_time(43_377_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -182,65 +181,69 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(42_798_000 as u64) + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(41_417_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(67_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } - // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: Alliance Members (r:3 w:3) - // Storage: Alliance DepositOf (r:200 w:199) - // Storage: System Account (r:199 w:199) - // Storage: AllianceMotion Voting (r:0 w:100) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:100) + // Storage: AllianceMotion Members (r:1 w:1) /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `c` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as u64).saturating_mul(x as u64)) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as u64).saturating_mul(y as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(z as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as u64).saturating_mul(p as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as u64).saturating_mul(c as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as u64).saturating_mul(m as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(m as u64))) + fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { + Weight::from_ref_time(37_202_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(149_000 as u64).saturating_mul(y as u64)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(120_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(p as u64))) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(c as u64))) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(m as u64))) + } + // Storage: Alliance Members (r:3 w:3) + // Storage: AllianceMotion Proposals (r:1 w:0) + // Storage: Alliance DepositOf (r:101 w:50) + // Storage: System Account (r:50 w:50) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[0, 100]`. + /// The range of component `z` is `[0, 50]`. + fn disband(x: u32, y: u32, z: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_447_000 as u64).saturating_mul(x as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_512_000 as u64).saturating_mul(y as u64)) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(10_760_000 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(y as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(z as u64))) + .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(z as u64))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_721_000 as u64) + Weight::from_ref_time(18_101_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_887_000 as u64) + Weight::from_ref_time(21_090_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(23_052_000 as u64) + Weight::from_ref_time(22_118_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -249,14 +252,14 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(54_504_000 as u64) + Weight::from_ref_time(53_446_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_601_000 as u64) + Weight::from_ref_time(42_690_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -265,7 +268,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_704_000 as u64) + Weight::from_ref_time(37_396_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -275,7 +278,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_859_000 as u64) + Weight::from_ref_time(40_644_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -284,7 +287,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_447_000 as u64) + Weight::from_ref_time(43_440_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -295,7 +298,7 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_718_000 as u64) + Weight::from_ref_time(61_060_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -304,11 +307,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(359_000 as u64) + Weight::from_ref_time(0 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as u64).saturating_mul(n as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as u64).saturating_mul(l as u64)) + .saturating_add(Weight::from_ref_time(1_362_000 as u64).saturating_mul(n as u64)) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -318,10 +321,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as u64).saturating_mul(n as u64)) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as u64).saturating_mul(l as u64)) + // Standard Error: 147_000 + .saturating_add(Weight::from_ref_time(21_060_000 as u64).saturating_mul(n as u64)) + // Standard Error: 57_000 + .saturating_add(Weight::from_ref_time(3_683_000 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -339,13 +342,13 @@ impl WeightInfo for () { /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(37_864_000 as u64) - // Standard Error: 28_000 - .saturating_add(Weight::from_ref_time(69_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(34_420_000 as u64) + // Standard Error: 25_000 + .saturating_add(Weight::from_ref_time(145_000 as u64).saturating_mul(x as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(y as u64)) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(197_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -354,9 +357,9 @@ impl WeightInfo for () { /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(46_813_000 as u64) + Weight::from_ref_time(48_443_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(125_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(y as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -366,9 +369,9 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_316_000 as u64) + Weight::from_ref_time(35_056_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(171_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -381,13 +384,13 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_245_000 as u64) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(336_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(36_929_000 as u64) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(287_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(109_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(178_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -400,16 +403,14 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_088_000 as u64) + fn close_early_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(48_085_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(194_000 as u64).saturating_mul(x as u64)) + .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(93_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(100_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(210_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -423,11 +424,11 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_374_000 as u64) + Weight::from_ref_time(43_377_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(101_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -441,65 +442,69 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(42_798_000 as u64) + fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { + Weight::from_ref_time(41_417_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(67_000 as u64).saturating_mul(x as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(y as u64)) + .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(y as u64)) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } - // Storage: AllianceMotion Proposals (r:1 w:1) // Storage: Alliance Members (r:3 w:3) - // Storage: Alliance DepositOf (r:200 w:199) - // Storage: System Account (r:199 w:199) - // Storage: AllianceMotion Voting (r:0 w:100) - // Storage: AllianceMotion Members (r:0 w:1) - // Storage: AllianceMotion Prime (r:0 w:1) - // Storage: AllianceMotion ProposalOf (r:0 w:100) + // Storage: AllianceMotion Members (r:1 w:1) /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - /// The range of component `c` is `[0, 100]`. - /// The range of component `m` is `[0, 100]`. - fn force_set_members(x: u32, y: u32, z: u32, p: u32, c: u32, m: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 221_000 - .saturating_add(Weight::from_ref_time(1_294_000 as u64).saturating_mul(x as u64)) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(231_000 as u64).saturating_mul(y as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(z as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(9_371_000 as u64).saturating_mul(p as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_673_000 as u64).saturating_mul(c as u64)) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(11_581_000 as u64).saturating_mul(m as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(m as u64))) + fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { + Weight::from_ref_time(37_202_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(149_000 as u64).saturating_mul(y as u64)) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(120_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(p as u64))) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(c as u64))) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(m as u64))) + } + // Storage: Alliance Members (r:3 w:3) + // Storage: AllianceMotion Proposals (r:1 w:0) + // Storage: Alliance DepositOf (r:101 w:50) + // Storage: System Account (r:50 w:50) + // Storage: AllianceMotion Members (r:0 w:1) + // Storage: AllianceMotion Prime (r:0 w:1) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[0, 100]`. + /// The range of component `z` is `[0, 50]`. + fn disband(x: u32, y: u32, z: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_447_000 as u64).saturating_mul(x as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_512_000 as u64).saturating_mul(y as u64)) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(10_760_000 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(y as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(z as u64))) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(z as u64))) } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_721_000 as u64) + Weight::from_ref_time(18_101_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_887_000 as u64) + Weight::from_ref_time(21_090_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(23_052_000 as u64) + Weight::from_ref_time(22_118_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -508,14 +513,14 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(54_504_000 as u64) + Weight::from_ref_time(53_446_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_601_000 as u64) + Weight::from_ref_time(42_690_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -524,7 +529,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_704_000 as u64) + Weight::from_ref_time(37_396_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -534,7 +539,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_859_000 as u64) + Weight::from_ref_time(40_644_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -543,7 +548,7 @@ impl WeightInfo for () { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_447_000 as u64) + Weight::from_ref_time(43_440_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -554,7 +559,7 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_718_000 as u64) + Weight::from_ref_time(61_060_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -563,11 +568,11 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 100]`. /// The range of component `l` is `[1, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(359_000 as u64) + Weight::from_ref_time(0 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_376_000 as u64).saturating_mul(n as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(112_000 as u64).saturating_mul(l as u64)) + .saturating_add(Weight::from_ref_time(1_362_000 as u64).saturating_mul(n as u64)) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -577,10 +582,10 @@ impl WeightInfo for () { /// The range of component `l` is `[1, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 145_000 - .saturating_add(Weight::from_ref_time(20_932_000 as u64).saturating_mul(n as u64)) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(3_649_000 as u64).saturating_mul(l as u64)) + // Standard Error: 147_000 + .saturating_add(Weight::from_ref_time(21_060_000 as u64).saturating_mul(n as u64)) + // Standard Error: 57_000 + .saturating_add(Weight::from_ref_time(3_683_000 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } From e24e9ad5855daef00f64a652ef70ac3848850d56 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 5 Sep 2022 19:41:32 +0200 Subject: [PATCH 097/348] Fetch Babe configuration from runtime state (#11760) * Fetch babe config data from runtime state * Some renaming * More renaming * Final nits * Fix tests and benches * Rename to in BabeConfiguration * Remove duplicate babe parameter description Already specified over the 'PRIMARY_PROBABILITY' constant value * trigger pipeline * trigger pipeline --- bin/node/cli/src/service.rs | 7 +- bin/node/rpc/src/lib.rs | 4 +- bin/node/runtime/src/lib.rs | 16 +-- client/consensus/babe/rpc/src/lib.rs | 18 +-- client/consensus/babe/src/aux_schema.rs | 10 +- client/consensus/babe/src/lib.rs | 120 +++++++----------- client/consensus/babe/src/migration.rs | 4 +- client/consensus/babe/src/tests.rs | 33 +++-- client/consensus/manual-seal/src/consensus.rs | 2 +- .../manual-seal/src/consensus/babe.rs | 18 +-- .../manual-seal/src/consensus/timestamp.rs | 2 +- frame/babe/src/lib.rs | 1 + primitives/consensus/babe/src/lib.rs | 33 +++-- test-utils/runtime/src/lib.rs | 12 +- 14 files changed, 125 insertions(+), 155 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index b20a3ac59a96a..13003c1a7a41f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -199,7 +199,7 @@ pub fn new_partial( let justification_import = grandpa_block_import.clone(); let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get(&*client)?, + sc_consensus_babe::configuration(&*client)?, grandpa_block_import, client.clone(), )?; @@ -682,10 +682,7 @@ mod tests { .epoch_changes() .shared_data() .epoch_data(&epoch_descriptor, |slot| { - sc_consensus_babe::Epoch::genesis( - babe_link.config().genesis_config(), - slot, - ) + sc_consensus_babe::Epoch::genesis(babe_link.config(), slot) }) .unwrap(); diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index e5b666195e1bc..1c8b9cce1a744 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -36,7 +36,7 @@ use std::sync::Arc; use jsonrpsee::RpcModule; use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; use sc_client_api::AuxStore; -use sc_consensus_babe::{Config, Epoch}; +use sc_consensus_babe::{BabeConfiguration, Epoch}; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, @@ -54,7 +54,7 @@ use sp_keystore::SyncCryptoStorePtr; /// Extra dependencies for BABE. pub struct BabeDeps { /// BABE protocol config. - pub babe_config: Config, + pub babe_config: BabeConfiguration, /// BABE pending epoch changes. pub shared_epoch_changes: SharedEpochChanges, /// The keystore that manages the keys of the node. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index fa0f877c5938d..5aa488f328a5e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1859,19 +1859,15 @@ impl_runtime_apis! { } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { - // The choice of `c` parameter (where `1 - c` represents the - // probability of a slot being empty), is done in accordance to the - // slot duration and expected target block time, for safely - // resisting network delays of maximum two seconds. - // - sp_consensus_babe::BabeGenesisConfiguration { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG); + sp_consensus_babe::BabeConfiguration { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), - c: BABE_GENESIS_EPOCH_CONFIG.c, - genesis_authorities: Babe::authorities().to_vec(), + c: epoch_config.c, + authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), - allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, + allowed_slots: epoch_config.allowed_slots, } } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index b000d38a44f02..288f852a5c989 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -25,7 +25,7 @@ use jsonrpsee::{ types::{error::CallError, ErrorObject}, }; -use sc_consensus_babe::{authorship, Config, Epoch}; +use sc_consensus_babe::{authorship, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; @@ -33,7 +33,9 @@ use sp_api::{BlockId, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::{Error as ConsensusError, SelectChain}; -use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; +use sp_consensus_babe::{ + digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi, BabeConfiguration, +}; use sp_core::crypto::ByteArray; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; @@ -57,7 +59,7 @@ pub struct Babe { /// shared reference to the Keystore keystore: SyncCryptoStorePtr, /// config (actually holds the slot duration) - babe_config: Config, + babe_config: BabeConfiguration, /// The SelectChain strategy select_chain: SC, /// Whether to deny unsafe calls @@ -70,7 +72,7 @@ impl Babe { client: Arc, shared_epoch_changes: SharedEpochChanges, keystore: SyncCryptoStorePtr, - babe_config: Config, + babe_config: BabeConfiguration, select_chain: SC, deny_unsafe: DenyUnsafe, ) -> Self { @@ -185,7 +187,7 @@ impl From for JsonRpseeError { async fn epoch_data( epoch_changes: &SharedEpochChanges, client: &Arc, - babe_config: &Config, + babe_config: &BabeConfiguration, slot: u64, select_chain: &SC, ) -> Result @@ -202,7 +204,7 @@ where &parent.hash(), *parent.number(), slot.into(), - |slot| Epoch::genesis(babe_config.genesis_config(), slot), + |slot| Epoch::genesis(babe_config, slot), ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(e.to_string())))? .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) @@ -221,7 +223,7 @@ mod tests { TestClientBuilderExt, }; - use sc_consensus_babe::{block_import, AuthorityPair, Config}; + use sc_consensus_babe::{block_import, AuthorityPair}; use std::sync::Arc; /// creates keystore backed by a temp file @@ -243,7 +245,7 @@ mod tests { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let config = Config::get(&*client).expect("config available"); + let config = sc_consensus_babe::configuration(&*client).expect("config available"); let (_, link) = block_import(config.clone(), client.clone(), client.clone()) .expect("can initialize block-import"); diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 2ab84b9b132cc..fef84bda86974 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -28,7 +28,7 @@ use sc_consensus_epochs::{ EpochChangesFor, SharedEpochChanges, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; +use sp_consensus_babe::{BabeBlockWeight, BabeConfiguration}; use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; @@ -57,7 +57,7 @@ where /// Load or initialize persistent epoch change data from backend. pub fn load_epoch_changes( backend: &B, - config: &BabeGenesisConfiguration, + config: &BabeConfiguration, ) -> ClientResult> { let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; @@ -143,7 +143,7 @@ mod test { use sc_consensus_epochs::{EpochHeader, PersistedEpoch, PersistedEpochHeader}; use sc_network_test::Block as TestBlock; use sp_consensus::Error as ConsensusError; - use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; + use sp_consensus_babe::AllowedSlots; use sp_core::H256; use sp_runtime::traits::NumberFor; use substrate_test_runtime_client; @@ -182,11 +182,11 @@ mod test { let epoch_changes = load_epoch_changes::( &client, - &BabeGenesisConfiguration { + &BabeConfiguration { slot_duration: 10, epoch_length: 4, c: (3, 10), - genesis_authorities: Vec::new(), + authorities: Vec::new(), randomness: Default::default(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index f61ba23d920f3..1303915efee49 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -119,7 +119,7 @@ use sp_consensus::{ SelectChain, }; use sp_consensus_babe::inherents::BabeInherentData; -use sp_consensus_slots::{Slot, SlotDuration}; +use sp_consensus_slots::Slot; use sp_core::{crypto::ByteArray, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; @@ -137,8 +137,7 @@ pub use sp_consensus_babe::{ PrimaryPreDigest, SecondaryPlainPreDigest, }, AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, BabeBlockWeight, - BabeEpochConfiguration, BabeGenesisConfiguration, ConsensusLog, BABE_ENGINE_ID, - VRF_OUTPUT_LENGTH, + BabeConfiguration, BabeEpochConfiguration, ConsensusLog, BABE_ENGINE_ID, VRF_OUTPUT_LENGTH, }; pub use aux_schema::load_block_weight as block_weight; @@ -211,12 +210,12 @@ impl From for Epoch { impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. - pub fn genesis(genesis_config: &BabeGenesisConfiguration, slot: Slot) -> Epoch { + pub fn genesis(genesis_config: &BabeConfiguration, slot: Slot) -> Epoch { Epoch { epoch_index: 0, start_slot: slot, duration: genesis_config.epoch_length, - authorities: genesis_config.genesis_authorities.clone(), + authorities: genesis_config.authorities.clone(), randomness: genesis_config.randomness, config: BabeEpochConfiguration { c: genesis_config.c, @@ -338,56 +337,36 @@ pub struct BabeIntermediate { /// Intermediate key for Babe engine. pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; -/// Configuration for BABE used for defining block verification parameters as -/// well as authoring (e.g. the slot duration). -#[derive(Clone)] -pub struct Config { - genesis_config: BabeGenesisConfiguration, -} - -impl Config { - /// Create a new config by reading the genesis configuration from the runtime. - pub fn get(client: &C) -> ClientResult - where - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: BabeApi, - { - trace!(target: "babe", "Getting slot duration"); - - let mut best_block_id = BlockId::Hash(client.usage_info().chain.best_hash); - if client.usage_info().chain.finalized_state.is_none() { - debug!(target: "babe", "No finalized state is available. Reading config from genesis"); - best_block_id = BlockId::Hash(client.usage_info().chain.genesis_hash); - } - let runtime_api = client.runtime_api(); +/// Read configuration from the runtime state at current best block. +pub fn configuration(client: &C) -> ClientResult +where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, +{ + let block_id = if client.usage_info().chain.finalized_state.is_some() { + BlockId::Hash(client.usage_info().chain.best_hash) + } else { + debug!(target: "babe", "No finalized state is available. Reading config from genesis"); + BlockId::Hash(client.usage_info().chain.genesis_hash) + }; - let version = runtime_api.api_version::>(&best_block_id)?; + let runtime_api = client.runtime_api(); + let version = runtime_api.api_version::>(&block_id)?; - let genesis_config = if version == Some(1) { + let config = match version { + Some(1) => { #[allow(deprecated)] { - runtime_api.configuration_before_version_2(&best_block_id)?.into() + runtime_api.configuration_before_version_2(&block_id)?.into() } - } else if version == Some(2) { - runtime_api.configuration(&best_block_id)? - } else { + }, + Some(2) => runtime_api.configuration(&block_id)?, + _ => return Err(sp_blockchain::Error::VersionInvalid( "Unsupported or invalid BabeApi version".to_string(), - )) - }; - - Ok(Config { genesis_config }) - } - - /// Get the genesis configuration. - pub fn genesis_config(&self) -> &BabeGenesisConfiguration { - &self.genesis_config - } - - /// Get the slot duration defined in the genesis configuration. - pub fn slot_duration(&self) -> SlotDuration { - SlotDuration::from_millis(self.genesis_config.slot_duration) - } + )), + }; + Ok(config) } /// Parameters for BABE. @@ -611,7 +590,7 @@ fn aux_storage_cleanup + HeaderBackend, Block: B async fn answer_requests( mut request_rx: Receiver>, - config: Config, + config: BabeConfiguration, client: Arc, epoch_changes: SharedEpochChanges, ) where @@ -640,9 +619,7 @@ async fn answer_requests( .ok_or(Error::::FetchEpoch(parent_hash))?; let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&config.genesis_config, slot) - }) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&config, slot)) .ok_or(Error::::FetchEpoch(parent_hash))?; Ok(sp_consensus_babe::Epoch { @@ -739,7 +716,7 @@ struct BabeSlotWorker { keystore: SyncCryptoStorePtr, epoch_changes: SharedEpochChanges, slot_notification_sinks: SlotNotificationSinks, - config: Config, + config: BabeConfiguration, block_proposal_slot_portion: SlotProportion, max_block_proposal_slot_portion: Option, telemetry: Option, @@ -797,9 +774,7 @@ where fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { self.epoch_changes .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .map(|epoch| epoch.as_ref().authorities.len()) } @@ -814,9 +789,7 @@ where slot, self.epoch_changes .shared_data() - .viable_epoch(epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - })? + .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))? .as_ref(), &self.keystore, ); @@ -1020,7 +993,7 @@ fn find_next_config_digest( #[derive(Clone)] pub struct BabeLink { epoch_changes: SharedEpochChanges, - config: Config, + config: BabeConfiguration, } impl BabeLink { @@ -1030,7 +1003,7 @@ impl BabeLink { } /// Get the config of this link. - pub fn config(&self) -> &Config { + pub fn config(&self) -> &BabeConfiguration { &self.config } } @@ -1040,7 +1013,7 @@ pub struct BabeVerifier { client: Arc, select_chain: SelectChain, create_inherent_data_providers: CIDP, - config: Config, + config: BabeConfiguration, epoch_changes: SharedEpochChanges, can_author_with: CAW, telemetry: Option, @@ -1245,9 +1218,7 @@ where .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or(Error::::FetchEpoch(parent_hash))?; let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or(Error::::FetchEpoch(parent_hash))?; // We add one to the current slot to allow for some small drift. @@ -1353,7 +1324,7 @@ pub struct BabeBlockImport { inner: I, client: Arc, epoch_changes: SharedEpochChanges, - config: Config, + config: BabeConfiguration, } impl Clone for BabeBlockImport { @@ -1372,7 +1343,7 @@ impl BabeBlockImport { client: Arc, epoch_changes: SharedEpochChanges, block_import: I, - config: Config, + config: BabeConfiguration, ) -> Self { BabeBlockImport { client, inner: block_import, epoch_changes, config } } @@ -1580,9 +1551,7 @@ where old_epoch_changes = Some((*epoch_changes).clone()); let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(&self.config.genesis_config, slot) - }) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) })?; @@ -1761,7 +1730,7 @@ where /// Also returns a link object used to correctly instantiate the import queue /// and background worker. pub fn block_import( - config: Config, + config: BabeConfiguration, wrapped_block_import: I, client: Arc, ) -> ClientResult<(BabeBlockImport, BabeLink)> @@ -1772,8 +1741,7 @@ where + PreCommitActions + 'static, { - let epoch_changes = - aux_schema::load_epoch_changes::(&*client, &config.genesis_config)?; + let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; let link = BabeLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; // NOTE: this isn't entirely necessary, but since we didn't use to prune the @@ -1884,9 +1852,9 @@ where // Revert epoch changes tree. - let config = Config::get(&*client)?; - let epoch_changes = - aux_schema::load_epoch_changes::(&*client, config.genesis_config())?; + // This config is only used on-genesis. + let config = configuration(&*client)?; + let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; let mut epoch_changes = epoch_changes.shared_data(); if revert_up_to_number == Zero::zero() { diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs index a8c3772bbefb8..23413aa6a7b1b 100644 --- a/client/consensus/babe/src/migration.rs +++ b/client/consensus/babe/src/migration.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - AuthorityId, BabeAuthorityWeight, BabeEpochConfiguration, BabeGenesisConfiguration, Epoch, + AuthorityId, BabeAuthorityWeight, BabeConfiguration, BabeEpochConfiguration, Epoch, NextEpochDescriptor, VRF_OUTPUT_LENGTH, }; use codec::{Decode, Encode}; @@ -64,7 +64,7 @@ impl EpochT for EpochV0 { impl EpochV0 { /// Migrate the sturct to current epoch version. - pub fn migrate(self, config: &BabeGenesisConfiguration) -> Epoch { + pub fn migrate(self, config: &BabeConfiguration) -> Epoch { Epoch { epoch_index: self.epoch_index, start_slot: self.start_slot, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 5ecdb42f7f177..7207b7a36c3d4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -36,6 +36,7 @@ use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityPair, Slot, }; +use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ @@ -71,7 +72,7 @@ type BabeBlockImport = struct DummyFactory { client: Arc, epoch_changes: SharedEpochChanges, - config: Config, + config: BabeConfiguration, mutator: Mutator, } @@ -139,7 +140,7 @@ impl DummyProposer { &self.parent_hash, self.parent_number, this_slot, - |slot| Epoch::genesis(self.factory.config.genesis_config(), slot), + |slot| Epoch::genesis(&self.factory.config, slot), ) .expect("client has data to find epoch") .expect("can compute epoch for baked block"); @@ -288,7 +289,7 @@ impl TestNetFactory for BabeTestNet { ) { let client = client.as_client(); - let config = Config::get(&*client).expect("config available"); + let config = crate::configuration(&*client).expect("config available"); let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) .expect("can initialize block-import"); @@ -559,11 +560,11 @@ fn can_author_block() { }, }; - let mut config = crate::BabeGenesisConfiguration { + let mut config = crate::BabeConfiguration { slot_duration: 1000, epoch_length: 100, c: (3, 10), - genesis_authorities: Vec::new(), + authorities: Vec::new(), randomness: [0; 32], allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }; @@ -708,12 +709,12 @@ fn importing_block_one_sets_genesis_epoch() { &mut block_import, ); - let genesis_epoch = Epoch::genesis(data.link.config.genesis_config(), 999.into()); + let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); let epoch_changes = data.link.epoch_changes.shared_data(); let epoch_for_second_block = epoch_changes .epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000.into(), |slot| { - Epoch::genesis(data.link.config.genesis_config(), slot) + Epoch::genesis(&data.link.config, slot) }) .unwrap() .unwrap(); @@ -771,16 +772,14 @@ fn revert_prunes_epoch_changes_and_removes_weights() { // Load and check epoch changes. - let actual_nodes = aux_schema::load_epoch_changes::( - &*client, - data.link.config.genesis_config(), - ) - .expect("load epoch changes") - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| *h) - .collect::>(); + let actual_nodes = + aux_schema::load_epoch_changes::(&*client, &data.link.config) + .expect("load epoch changes") + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| *h) + .collect::>(); let expected_nodes = vec![ canon[0], // A diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index b5dfc3d809c13..a812bb028c7f2 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -39,7 +39,7 @@ pub trait ConsensusDataProvider: Send + Sync { /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; - /// set up the neccessary import params. + /// Set up the necessary import params. fn append_block_import( &self, parent: &B::Header, diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index cc73a3fa961ce..300a96695c90a 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -24,8 +24,7 @@ use crate::Error; use codec::Encode; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus_babe::{ - authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch, - INTERMEDIATE_KEY, + authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Epoch, INTERMEDIATE_KEY, }; use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, @@ -40,7 +39,7 @@ use sp_consensus::CacheKeyId; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, inherents::BabeInherentData, - AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID, + AuthorityId, BabeApi, BabeAuthorityWeight, BabeConfiguration, ConsensusLog, BABE_ENGINE_ID, }; use sp_consensus_slots::Slot; use sp_inherents::InherentData; @@ -64,7 +63,10 @@ pub struct BabeConsensusDataProvider { epoch_changes: SharedEpochChanges, /// BABE config, gotten from the runtime. - config: Config, + /// NOTE: This is used to fetch `slot_duration` and `epoch_length` in the + /// `ConsensusDataProvider` implementation. Correct as far as these values + /// are not changed during an epoch change. + config: BabeConfiguration, /// Authorities to be used for this babe chain. authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, @@ -152,7 +154,7 @@ where return Err(Error::StringError("Cannot supply empty authority set!".into())) } - let config = Config::get(&*client)?; + let config = sc_consensus_babe::configuration(&*client)?; Ok(Self { config, @@ -177,9 +179,7 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet)?; let epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| { - Epoch::genesis(self.config.genesis_config(), slot) - }) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { log::info!(target: "babe", "create_digest: no viable_epoch :("); sp_consensus::Error::InvalidAuthoritiesSet @@ -306,7 +306,7 @@ where identifier, EpochHeader { start_slot: slot, - end_slot: (*slot * self.config.genesis_config().epoch_length).into(), + end_slot: (*slot * self.config.epoch_length).into(), }, ), _ => unreachable!( diff --git a/client/consensus/manual-seal/src/consensus/timestamp.rs b/client/consensus/manual-seal/src/consensus/timestamp.rs index 70b5e5de4ec6c..f899b80d6c9af 100644 --- a/client/consensus/manual-seal/src/consensus/timestamp.rs +++ b/client/consensus/manual-seal/src/consensus/timestamp.rs @@ -63,7 +63,7 @@ impl SlotTimestampProvider { C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, C::Api: BabeApi, { - let slot_duration = sc_consensus_babe::Config::get(&*client)?.slot_duration(); + let slot_duration = sc_consensus_babe::configuration(&*client)?.slot_duration(); let time = Self::with_header(&client, slot_duration, |header| { let slot_number = *sc_consensus_babe::find_pre_digest::(&header) diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 48ca62a5b1a09..9a99d8ed995ab 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -306,6 +306,7 @@ pub mod pallet { /// The configuration for the current epoch. Should never be `None` as it is initialized in /// genesis. #[pallet::storage] + #[pallet::getter(fn epoch_config)] pub(super) type EpochConfig = StorageValue<_, BabeEpochConfiguration>; /// The configuration for the next epoch, `None` if the config will not change diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 492d1a9a7238f..621ab859b914f 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -137,7 +137,7 @@ pub enum ConsensusLog { /// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub struct BabeGenesisConfigurationV1 { +pub struct BabeConfigurationV1 { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. /// @@ -156,7 +156,7 @@ pub struct BabeGenesisConfigurationV1 { pub c: (u64, u64), /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// The randomness for the genesis epoch. pub randomness: Randomness, @@ -166,13 +166,13 @@ pub struct BabeGenesisConfigurationV1 { pub secondary_slots: bool, } -impl From for BabeGenesisConfiguration { - fn from(v1: BabeGenesisConfigurationV1) -> Self { +impl From for BabeConfiguration { + fn from(v1: BabeConfigurationV1) -> Self { Self { slot_duration: v1.slot_duration, epoch_length: v1.epoch_length, c: v1.c, - genesis_authorities: v1.genesis_authorities, + authorities: v1.authorities, randomness: v1.randomness, allowed_slots: if v1.secondary_slots { AllowedSlots::PrimaryAndSecondaryPlainSlots @@ -185,7 +185,7 @@ impl From for BabeGenesisConfiguration { /// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub struct BabeGenesisConfiguration { +pub struct BabeConfiguration { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. /// @@ -203,16 +203,23 @@ pub struct BabeGenesisConfiguration { /// of a slot being empty. pub c: (u64, u64), - /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// The authorities + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// The randomness for the genesis epoch. + /// The randomness pub randomness: Randomness, /// Type of allowed slots. pub allowed_slots: AllowedSlots, } +impl BabeConfiguration { + /// Convenience method to get the slot duration as a `SlotDuration` value. + pub fn slot_duration(&self) -> SlotDuration { + SlotDuration::from_millis(self.slot_duration) + } +} + /// Types of allowed slots. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -237,7 +244,7 @@ impl AllowedSlots { } } -/// Configuration data used by the BABE consensus engine. +/// Configuration data used by the BABE consensus engine that may change with epochs. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BabeEpochConfiguration { @@ -357,12 +364,12 @@ sp_api::decl_runtime_apis! { /// API necessary for block authorship with BABE. #[api_version(2)] pub trait BabeApi { - /// Return the genesis configuration for BABE. The configuration is only read on genesis. - fn configuration() -> BabeGenesisConfiguration; + /// Return the configuration for BABE. + fn configuration() -> BabeConfiguration; /// Return the configuration for BABE. Version 1. #[changed_in(2)] - fn configuration() -> BabeGenesisConfigurationV1; + fn configuration() -> BabeConfigurationV1; /// Returns the slot that started the current epoch. fn current_epoch_start() -> Slot; diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 181e3fec62b24..bb526010dddd5 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -849,12 +849,12 @@ cfg_if! { } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { - sp_consensus_babe::BabeGenesisConfiguration { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { slot_duration: 1000, epoch_length: EpochDuration::get(), c: (3, 10), - genesis_authorities: system::authorities() + authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, @@ -1123,12 +1123,12 @@ cfg_if! { } impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { - sp_consensus_babe::BabeGenesisConfiguration { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { slot_duration: 1000, epoch_length: EpochDuration::get(), c: (3, 10), - genesis_authorities: system::authorities() + authorities: system::authorities() .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, From 6bdf4c0f8c5b977b3b21ce6302798f6b7df2491d Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 6 Sep 2022 08:34:29 +0200 Subject: [PATCH 098/348] [Fix] Make sure pool metadata is removed on pool dissolve (#12154) * [Fix] Make sure pool metadata is removed on pool dissolve * add migration * remove_metadata helper removed * fix typo and add a comment * fix pre_upgrade * fix migration * Update frame/nomination-pools/src/migration.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/nomination-pools/src/migration.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * address comments * fix comments * Update frame/nomination-pools/src/migration.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * address comments * permissions fix Co-authored-by: parity-processbot <> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- frame/nomination-pools/src/lib.rs | 12 +++-- frame/nomination-pools/src/migration.rs | 65 +++++++++++++++++++++++++ frame/nomination-pools/src/mock.rs | 2 +- frame/nomination-pools/src/tests.rs | 11 ++++- 4 files changed, 83 insertions(+), 7 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 3809a70440d27..a8c4e50daa0b5 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1141,7 +1141,7 @@ pub mod pallet { use sp_runtime::traits::CheckedAdd; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] @@ -2191,8 +2191,8 @@ impl Pallet { } /// Remove everything related to the given bonded pool. /// - /// All sub-pools are also deleted. All accounts are dusted and the leftover of the reward - /// account is returned to the depositor. + /// Metadata and all of the sub-pools are also deleted. All accounts are dusted and the leftover + /// of the reward account is returned to the depositor. pub fn dissolve_pool(bonded_pool: BondedPool) { let reward_account = bonded_pool.reward_account(); let bonded_account = bonded_pool.bonded_account(); @@ -2229,6 +2229,9 @@ impl Pallet { T::Currency::make_free_balance_be(&bonded_pool.bonded_account(), Zero::zero()); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); + // Remove bonded pool metadata. + Metadata::::remove(bonded_pool.id); + bonded_pool.remove(); } @@ -2406,8 +2409,7 @@ impl Pallet { let reward_pools = RewardPools::::iter_keys().collect::>(); assert_eq!(bonded_pools, reward_pools); - // TODO: can't check this right now: https://github.com/paritytech/substrate/issues/12077 - // assert!(Metadata::::iter_keys().all(|k| bonded_pools.contains(&k))); + assert!(Metadata::::iter_keys().all(|k| bonded_pools.contains(&k))); assert!(SubPoolsStorage::::iter_keys().all(|k| bonded_pools.contains(&k))); assert!(MaxPools::::get().map_or(true, |max| bonded_pools.len() <= (max as usize))); diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 412c954a2bbf3..bcf23ee863a39 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -100,6 +100,7 @@ pub mod v1 { fn post_upgrade() -> Result<(), &'static str> { // new version must be set. assert_eq!(Pallet::::on_chain_storage_version(), 1); + Pallet::::try_state(frame_system::Pallet::::block_number())?; Ok(()) } } @@ -384,3 +385,67 @@ pub mod v2 { } } } + +pub mod v3 { + use super::*; + + /// This migration removes stale bonded-pool metadata, if any. + pub struct MigrateToV3(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV3 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); + + if current > onchain { + let mut metadata_iterated = 0u64; + let mut metadata_removed = 0u64; + Metadata::::iter_keys() + .filter(|id| { + metadata_iterated += 1; + !BondedPools::::contains_key(&id) + }) + .collect::>() + .into_iter() + .for_each(|id| { + metadata_removed += 1; + Metadata::::remove(&id); + }); + current.put::>(); + // metadata iterated + bonded pools read + a storage version read + let total_reads = metadata_iterated * 2 + 1; + // metadata removed + a storage version write + let total_writes = metadata_removed + 1; + T::DbWeight::get().reads_writes(total_reads, total_writes) + } else { + log!(info, "MigrateToV3 should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + ensure!( + Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), + "the on_chain version is equal or more than the current one" + ); + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + ensure!( + Metadata::::iter_keys().all(|id| BondedPools::::contains_key(&id)), + "not all of the stale metadata has been removed" + ); + ensure!(Pallet::::on_chain_storage_version() == 3, "wrong storage version"); + Ok(()) + } + } +} diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 3bb260e56f180..4428c28558f18 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -290,7 +290,7 @@ impl ExtBuilder { let amount_to_bond = Pools::depositor_min_bond(); Balances::make_free_balance_be(&10, amount_to_bond * 5); assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); - + assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); let last_pool = LastPoolId::::get(); for (account_id, bonded) in self.members { Balances::make_free_balance_be(&account_id, bonded * 2); diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 705f8ce3a6449..20ba2b76fe31a 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -47,6 +47,7 @@ fn test_setup_works() { assert_eq!(SubPoolsStorage::::count(), 0); assert_eq!(PoolMembers::::count(), 1); assert_eq!(StakingMock::bonding_duration(), 3); + assert!(Metadata::::contains_key(1)); let last_pool = LastPoolId::::get(); assert_eq!( @@ -1928,6 +1929,7 @@ mod claim_payout { ] ); + assert!(!Metadata::::contains_key(1)); // original ed + ed put into reward account + reward + bond + dust. assert_eq!(Balances::free_balance(&10), 35 + 5 + 13 + 10 + 1); }) @@ -3159,6 +3161,7 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 } ] ); + assert!(!Metadata::::contains_key(1)); assert_eq!( balances_events_since_last_call(), vec![ @@ -3269,6 +3272,10 @@ mod withdraw_unbonded { CurrentEra::set(CurrentEra::get() + 3); + // set metadata to check that it's being removed on dissolve + assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); + assert!(Metadata::::contains_key(1)); + // when assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); @@ -3287,6 +3294,7 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 } ] ); + assert!(!Metadata::::contains_key(1)); assert_eq!( balances_events_since_last_call(), vec![ @@ -3797,6 +3805,7 @@ mod withdraw_unbonded { Event::Destroyed { pool_id: 1 }, ] ); + assert!(!Metadata::::contains_key(1)); }) } } @@ -4039,7 +4048,7 @@ mod set_state { // Then assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); - // If the pool is not ok to be open, it cannot be permissionleslly set to a state that + // If the pool is not ok to be open, it cannot be permissionlessly set to a state that // isn't destroying unsafe_set_state(1, PoolState::Open); assert_noop!( From 20037d573c0c2b319998541c6140df4c3d07eb56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Miko=C5=82ajczyk?= Date: Tue, 6 Sep 2022 10:01:35 +0200 Subject: [PATCH 099/348] `try-runtime`::`follow-chain` - keep connection (#12167) * Refactor RPC module * Add flag to `follow-chain` * Multithreading remark * fmt * O_O * unused import * cmon * accidental removal reverted * remove RpcHeaderProvider * mut refs * fmt * no mutability * now? * now? * arc mutex * async mutex * async mutex * uhm * connect in constructor * remove dep * old import * another take * trigger polkadot pipeline * trigger pipeline --- bin/node/cli/tests/common.rs | 5 +- .../frame/remote-externalities/src/rpc_api.rs | 175 +++++++++++------- .../cli/src/commands/execute_block.rs | 9 +- .../cli/src/commands/follow_chain.rs | 61 +++--- .../cli/src/commands/offchain_worker.rs | 3 +- utils/frame/try-runtime/cli/src/lib.rs | 13 +- 6 files changed, 160 insertions(+), 106 deletions(-) diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 9c739c2cf2d28..3b83f4339611d 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -24,7 +24,7 @@ use nix::{ unistd::Pid, }; use node_primitives::Block; -use remote_externalities::rpc_api; +use remote_externalities::rpc_api::RpcService; use std::{ io::{BufRead, BufReader, Read}, ops::{Deref, DerefMut}, @@ -71,9 +71,10 @@ pub async fn wait_n_finalized_blocks( pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { let mut built_blocks = std::collections::HashSet::new(); let mut interval = tokio::time::interval(Duration::from_secs(2)); + let rpc_service = RpcService::new(url, false).await.unwrap(); loop { - if let Ok(block) = rpc_api::get_finalized_head::(url.to_string()).await { + if let Ok(block) = rpc_service.get_finalized_head::().await { built_blocks.insert(block); if built_blocks.len() > n { break diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs index 37555de480d4c..3ea30a30221a2 100644 --- a/utils/frame/remote-externalities/src/rpc_api.rs +++ b/utils/frame/remote-externalities/src/rpc_api.rs @@ -19,82 +19,131 @@ // TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 use jsonrpsee::{ - core::client::ClientT, + core::client::{Client, ClientT}, rpc_params, + types::ParamsSer, ws_client::{WsClient, WsClientBuilder}, }; -use sp_runtime::{ - generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT}, -}; - -/// Get the header of the block identified by `at` -pub async fn get_header(from: S, at: Block::Hash) -> Result -where - Block: BlockT, - Block::Header: serde::de::DeserializeOwned, - S: AsRef, -{ - let client = build_client(from).await?; +use serde::de::DeserializeOwned; +use sp_runtime::{generic::SignedBlock, traits::Block as BlockT}; +use std::sync::Arc; - client - .request::("chain_getHeader", rpc_params!(at)) - .await - .map_err(|e| format!("chain_getHeader request failed: {:?}", e)) +enum RpcCall { + GetHeader, + GetFinalizedHead, + GetBlock, + GetRuntimeVersion, } -/// Get the finalized head -pub async fn get_finalized_head(from: S) -> Result -where - Block: BlockT, - S: AsRef, -{ - let client = build_client(from).await?; +impl RpcCall { + fn as_str(&self) -> &'static str { + match self { + RpcCall::GetHeader => "chain_getHeader", + RpcCall::GetFinalizedHead => "chain_getFinalizedHead", + RpcCall::GetBlock => "chain_getBlock", + RpcCall::GetRuntimeVersion => "state_getRuntimeVersion", + } + } +} +/// General purpose method for making RPC calls. +async fn make_request<'a, T: DeserializeOwned>( + client: &Arc, + call: RpcCall, + params: Option>, +) -> Result { client - .request::("chain_getFinalizedHead", None) + .request::(call.as_str(), params) .await - .map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e)) + .map_err(|e| format!("{} request failed: {:?}", call.as_str(), e)) } -/// Get the signed block identified by `at`. -pub async fn get_block(from: S, at: Block::Hash) -> Result -where - S: AsRef, - Block: BlockT + serde::de::DeserializeOwned, - Block::Header: HeaderT, -{ - let client = build_client(from).await?; - let signed_block = client - .request::>("chain_getBlock", rpc_params!(at)) - .await - .map_err(|e| format!("chain_getBlock request failed: {:?}", e))?; - - Ok(signed_block.block) +enum ConnectionPolicy { + Reuse(Arc), + Reconnect, } -/// Build a websocket client that connects to `from`. -async fn build_client>(from: S) -> Result { - WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .build(from.as_ref()) - .await - .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) +/// Simple RPC service that is capable of keeping the connection. +/// +/// Service will connect to `uri` for the first time already during initialization. +/// +/// Be careful with reusing the connection in a multithreaded environment. +pub struct RpcService { + uri: String, + policy: ConnectionPolicy, } -/// Get the runtime version of a given chain. -pub async fn get_runtime_version( - from: S, - at: Option, -) -> Result -where - S: AsRef, - Block: BlockT + serde::de::DeserializeOwned, - Block::Header: HeaderT, -{ - let client = build_client(from).await?; - client - .request::("state_getRuntimeVersion", rpc_params!(at)) - .await - .map_err(|e| format!("state_getRuntimeVersion request failed: {:?}", e)) +impl RpcService { + /// Creates a new RPC service. If `keep_connection`, then connects to `uri` right away. + pub async fn new>(uri: S, keep_connection: bool) -> Result { + let policy = if keep_connection { + ConnectionPolicy::Reuse(Arc::new(Self::build_client(uri.as_ref()).await?)) + } else { + ConnectionPolicy::Reconnect + }; + Ok(Self { uri: uri.as_ref().to_string(), policy }) + } + + /// Returns the address at which requests are sent. + pub fn uri(&self) -> String { + self.uri.clone() + } + + /// Build a websocket client that connects to `self.uri`. + async fn build_client>(uri: S) -> Result { + WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .build(uri) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) + } + + /// Generic method for making RPC requests. + async fn make_request<'a, T: DeserializeOwned>( + &self, + call: RpcCall, + params: Option>, + ) -> Result { + match self.policy { + // `self.keep_connection` must have been `true`. + ConnectionPolicy::Reuse(ref client) => make_request(client, call, params).await, + ConnectionPolicy::Reconnect => { + let client = Arc::new(Self::build_client(&self.uri).await?); + make_request(&client, call, params).await + }, + } + } + + /// Get the header of the block identified by `at`. + pub async fn get_header(&self, at: Block::Hash) -> Result + where + Block: BlockT, + Block::Header: DeserializeOwned, + { + self.make_request(RpcCall::GetHeader, rpc_params!(at)).await + } + + /// Get the finalized head. + pub async fn get_finalized_head(&self) -> Result { + self.make_request(RpcCall::GetFinalizedHead, None).await + } + + /// Get the signed block identified by `at`. + pub async fn get_block( + &self, + at: Block::Hash, + ) -> Result { + Ok(self + .make_request::>(RpcCall::GetBlock, rpc_params!(at)) + .await? + .block) + } + + /// Get the runtime version of a given chain. + pub async fn get_runtime_version( + &self, + at: Option, + ) -> Result { + self.make_request(RpcCall::GetRuntimeVersion, rpc_params!(at)).await + } } diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 6a3ef24ff3771..70ba3615f874a 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -89,6 +89,8 @@ impl ExecuteBlockCmd { Block::Hash: FromStr, ::Err: Debug, { + let rpc_service = rpc_api::RpcService::new(ws_uri, false).await?; + match (&self.block_at, &self.state) { (Some(block_at), State::Snap { .. }) => hash_of::(block_at), (Some(block_at), State::Live { .. }) => { @@ -100,9 +102,7 @@ impl ExecuteBlockCmd { target: LOG_TARGET, "No --block-at or --at provided, using the latest finalized block instead" ); - remote_externalities::rpc_api::get_finalized_head::(ws_uri) - .await - .map_err(Into::into) + rpc_service.get_finalized_head::().await.map_err(Into::into) }, (None, State::Live { at: Some(at), .. }) => hash_of::(at), _ => { @@ -148,7 +148,8 @@ where let block_ws_uri = command.block_ws_uri::(); let block_at = command.block_at::(block_ws_uri.clone()).await?; - let block: Block = rpc_api::get_block::(block_ws_uri.clone(), block_at).await?; + let rpc_service = rpc_api::RpcService::new(block_ws_uri.clone(), false).await?; + let block: Block = rpc_service.get_block::(block_at).await?; let parent_hash = block.header().parent_hash(); log::info!( target: LOG_TARGET, diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 9f598694d0ff1..f493d5c10cd29 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -27,13 +27,13 @@ use jsonrpsee::{ ws_client::WsClientBuilder, }; use parity_scale_codec::{Decode, Encode}; -use remote_externalities::{rpc_api, Builder, Mode, OnlineConfig}; +use remote_externalities::{rpc_api::RpcService, Builder, Mode, OnlineConfig}; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use serde::de::DeserializeOwned; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, str::FromStr}; +use std::{collections::VecDeque, fmt::Debug, str::FromStr}; const SUB: &str = "chain_subscribeFinalizedHeads"; const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; @@ -60,6 +60,10 @@ pub struct FollowChainCmd { /// round-robin fashion. #[clap(long, default_value = "none")] try_state: frame_try_runtime::TryStateSelect, + + /// If present, a single connection to a node will be kept and reused for fetching blocks. + #[clap(long)] + keep_connection: bool, } /// Start listening for with `SUB` at `url`. @@ -93,21 +97,16 @@ where Block::Header: HeaderT, { /// Awaits for the header of the block with hash `hash`. - async fn get_header(&mut self, hash: Block::Hash) -> Block::Header; -} - -struct RpcHeaderProvider { - uri: String, - _phantom: PhantomData, + async fn get_header(&self, hash: Block::Hash) -> Block::Header; } #[async_trait] -impl HeaderProvider for RpcHeaderProvider +impl HeaderProvider for RpcService where Block::Header: DeserializeOwned, { - async fn get_header(&mut self, hash: Block::Hash) -> Block::Header { - rpc_api::get_header::(&self.uri, hash).await.unwrap() + async fn get_header(&self, hash: Block::Hash) -> Block::Header { + self.get_header::(hash).await.unwrap() } } @@ -148,19 +147,20 @@ where /// /// Returned headers are guaranteed to be ordered. There are no missing headers (even if some of /// them lack justification). -struct FinalizedHeaders, HS: HeaderSubscription> { - header_provider: HP, +struct FinalizedHeaders<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> +{ + header_provider: &'a HP, subscription: HS, fetched_headers: VecDeque, last_returned: Option<::Hash>, } -impl, HS: HeaderSubscription> - FinalizedHeaders +impl<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> + FinalizedHeaders<'a, Block, HP, HS> where ::Header: DeserializeOwned, { - pub fn new(header_provider: HP, subscription: HS) -> Self { + pub fn new(header_provider: &'a HP, subscription: HS) -> Self { Self { header_provider, subscription, @@ -229,19 +229,16 @@ where let executor = build_executor::(&shared, &config); let execution = shared.execution; - let header_provider: RpcHeaderProvider = - RpcHeaderProvider { uri: command.uri.clone(), _phantom: PhantomData {} }; - let mut finalized_headers: FinalizedHeaders< - Block, - RpcHeaderProvider, - Subscription, - > = FinalizedHeaders::new(header_provider, subscription); + let rpc_service = RpcService::new(&command.uri, command.keep_connection).await?; + + let mut finalized_headers: FinalizedHeaders> = + FinalizedHeaders::new(&rpc_service, subscription); while let Some(header) = finalized_headers.next().await { let hash = header.hash(); let number = header.number(); - let block = rpc_api::get_block::(&command.uri, hash).await.unwrap(); + let block = rpc_service.get_block::(hash).await.unwrap(); log::debug!( target: LOG_TARGET, @@ -333,12 +330,14 @@ where mod tests { use super::*; use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header}; + use std::sync::Arc; + use tokio::sync::Mutex; type Block = TBlock>; type BlockNumber = u64; type Hash = H256; - struct MockHeaderProvider(pub VecDeque); + struct MockHeaderProvider(pub Arc>>); fn headers() -> Vec
{ let mut headers = vec![Header::new_from_number(0)]; @@ -353,8 +352,8 @@ mod tests { #[async_trait] impl HeaderProvider for MockHeaderProvider { - async fn get_header(&mut self, _hash: Hash) -> Header { - let height = self.0.pop_front().unwrap(); + async fn get_header(&self, _hash: Hash) -> Header { + let height = self.0.lock().await.pop_front().unwrap(); headers()[height as usize].clone() } } @@ -372,9 +371,9 @@ mod tests { async fn finalized_headers_works_when_every_block_comes_from_subscription() { let heights = vec![4, 5, 6, 7]; - let provider = MockHeaderProvider(vec![].into()); + let provider = MockHeaderProvider(Default::default()); let subscription = MockHeaderSubscription(heights.clone().into()); - let mut headers = FinalizedHeaders::new(provider, subscription); + let mut headers = FinalizedHeaders::new(&provider, subscription); for h in heights { assert_eq!(h, headers.next().await.unwrap().number); @@ -389,9 +388,9 @@ mod tests { // Consecutive headers will be requested in the reversed order. let heights_not_in_subscription = vec![5, 9, 8, 7]; - let provider = MockHeaderProvider(heights_not_in_subscription.into()); + let provider = MockHeaderProvider(Arc::new(Mutex::new(heights_not_in_subscription.into()))); let subscription = MockHeaderSubscription(heights_in_subscription.into()); - let mut headers = FinalizedHeaders::new(provider, subscription); + let mut headers = FinalizedHeaders::new(&provider, subscription); for h in all_heights { assert_eq!(h, headers.next().await.unwrap().number); diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 11ceb0a81cf37..a579692abd9e2 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -119,7 +119,8 @@ where let header_at = command.header_at::()?; let header_ws_uri = command.header_ws_uri::(); - let header = rpc_api::get_header::(header_ws_uri.clone(), header_at).await?; + let rpc_service = rpc_api::RpcService::new(header_ws_uri.clone(), false).await?; + let header = rpc_service.get_header::(header_at).await?; log::info!( target: LOG_TARGET, "fetched header from {:?}, block number: {:?}", diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 76679c43f7f14..c71496e0b850c 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -267,7 +267,8 @@ use parity_scale_codec::Decode; use remote_externalities::{ - Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, + rpc_api::RpcService, Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, + TestExternalities, }; use sc_chain_spec::ChainSpec; use sc_cli::{ @@ -541,8 +542,8 @@ impl State { impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where - Block: BlockT + serde::de::DeserializeOwned, - Block::Header: serde::de::DeserializeOwned, + Block: BlockT + DeserializeOwned, + Block::Header: DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, NumberFor: FromStr, @@ -626,13 +627,15 @@ where /// /// If the spec names don't match, if `relaxed`, then it emits a warning, else it panics. /// If the spec versions don't match, it only ever emits a warning. -pub(crate) async fn ensure_matching_spec( +pub(crate) async fn ensure_matching_spec( uri: String, expected_spec_name: String, expected_spec_version: u32, relaxed: bool, ) { - match remote_externalities::rpc_api::get_runtime_version::(uri.clone(), None) + let rpc_service = RpcService::new(uri.clone(), false).await.unwrap(); + match rpc_service + .get_runtime_version::(None) .await .map(|version| (String::from(version.spec_name.clone()), version.spec_version)) .map(|(spec_name, spec_version)| (spec_name.to_lowercase(), spec_version)) From 12b0d8e9957c6159d433cd4601d5f297b9bf4313 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 6 Sep 2022 14:16:29 +0200 Subject: [PATCH 100/348] Uniques: Small fix in docs (#12143) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * small fix in docs * Update frame/uniques/src/lib.rs Co-authored-by: Bastian Köcher * Update lib.rs * Update lib.rs * Update lib.rs Co-authored-by: Bastian Köcher --- frame/uniques/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 3b0cf6dc673c9..bbecb08e51b3b 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -910,7 +910,8 @@ pub mod pallet { /// Approve an item to be transferred by a delegated third-party account. /// - /// Origin must be Signed and must be the owner of the `item`. + /// The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be + /// either the owner of the `item` or the admin of the collection. /// /// - `collection`: The collection of the item to be approved for delegated transfer. /// - `item`: The item of the item to be approved for delegated transfer. From dcfaa41d11e70cfc976e292b2975c7285cf3668c Mon Sep 17 00:00:00 2001 From: NingLin-P Date: Tue, 6 Sep 2022 20:24:31 +0800 Subject: [PATCH 101/348] staking: avoid proportional slashing leak dust into chunks that should not be slashed (#12058) * replace slash ratio with remaining ratio Signed-off-by: linning * little refactor Signed-off-by: linning * fix test Signed-off-by: linning * fix typo Signed-off-by: linning * revert refactor Signed-off-by: linning * rounding up instead of remaining ratio Signed-off-by: linning * address comment Signed-off-by: linning * Update frame/nomination-pools/test-staking/src/lib.rs Signed-off-by: linning Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/nomination-pools/test-staking/src/lib.rs Signed-off-by: linning Co-authored-by: Roman Useinov Signed-off-by: linning Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Roman Useinov Co-authored-by: parity-processbot <> --- .../nomination-pools/test-staking/src/lib.rs | 10 ++++---- frame/staking/src/lib.rs | 12 +++++++--- frame/staking/src/tests.rs | 24 +++++++++++++++---- primitives/runtime/src/lib.rs | 2 +- 4 files changed, 33 insertions(+), 15 deletions(-) diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 5a7cd494362ca..8c6ecae937063 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -490,12 +490,10 @@ fn pool_slash_proportional() { assert_eq!( pool_events_since_last_call(), vec![ - // This last pool got slashed only the leftover dust. Otherwise in principle, this - // chunk/pool should have not been affected. - PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 127, balance: 19 }, - // This pool got slashed 12.5, which rounded down to 12. - PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 8 }, - // This pool got slashed 12.5, which rounded down to 12. + // This era got slashed 12.5, which rounded up to 13. + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 128, balance: 7 }, + // This era got slashed 12 instead of 12.5 because an earlier chunk got 0.5 more + // slashed, and 12 is all the remaining slash PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 129, balance: 8 }, // Bonded pool got slashed for 25, remaining 15 in it. PoolsEvent::PoolSlashed { pool_id: 1, balance: 15 } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 38466c8cb1d62..4a1f0cb886ed0 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -310,7 +310,7 @@ use scale_info::TypeInfo; use sp_runtime::{ curve::PiecewiseLinear, traits::{AtLeast32BitUnsigned, Convert, Saturating, StaticLookup, Zero}, - Perbill, Perquintill, RuntimeDebug, + Perbill, Perquintill, Rounding, RuntimeDebug, }; use sp_staking::{ offence::{Offence, OffenceError, ReportOffence}, @@ -564,6 +564,7 @@ impl StakingLedger { return Zero::zero() } + use sp_runtime::PerThing as _; use sp_staking::OnStakerSlash as _; let mut remaining_slash = slash_amount; let pre_slash_total = self.total; @@ -594,7 +595,12 @@ impl StakingLedger { } }); let affected_balance = self.active.saturating_add(unbonding_affected_balance); - let ratio = Perquintill::from_rational(slash_amount, affected_balance); + let ratio = Perquintill::from_rational_with_rounding( + slash_amount, + affected_balance, + Rounding::Up, + ) + .unwrap_or_else(|_| Perquintill::one()); ( Some(ratio), affected_indices.chain((0..first_slashable_index).rev()).collect::>(), @@ -618,7 +624,7 @@ impl StakingLedger { let mut slash_out_of = |target: &mut BalanceOf, slash_remaining: &mut BalanceOf| { let mut slash_from_target = if let Some(ratio) = maybe_proportional { - ratio * (*target) + ratio.mul_ceil(*target) } else { *slash_remaining } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 107d494a9711b..0d00f5a5e61df 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -31,7 +31,7 @@ use pallet_balances::Error as BalancesError; use sp_runtime::{ assert_eq_error_rate, traits::{BadOrigin, Dispatchable}, - Perbill, Percent, + Perbill, Percent, Rounding, }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, @@ -5125,6 +5125,18 @@ fn proportional_ledger_slash_works() { assert_eq!(LedgerSlashPerEra::get().0, 0); assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 30), (7, 30)])); + // Given + ledger.unlocking = bounded_vec![c(4, 100), c(5, 100), c(6, 100), c(7, 100)]; + ledger.total = 4 * 100; + ledger.active = 0; + // When the first 2 chunks don't overlap with the affected range of unlock eras. + assert_eq!(ledger.slash(15, 0, 3), 15); + // Then + assert_eq!(ledger.unlocking, vec![c(4, 100), c(5, 100), c(6, 100 - 8), c(7, 100 - 7)]); + assert_eq!(ledger.total, 4 * 100 - 15); + assert_eq!(LedgerSlashPerEra::get().0, 0); + assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(6, 92), (7, 93)])); + // Given ledger.unlocking = bounded_vec![c(4, 40), c(5, 100), c(6, 10), c(7, 250)]; ledger.active = 500; @@ -5247,6 +5259,7 @@ fn proportional_ledger_slash_works() { assert_eq!(LedgerSlashPerEra::get().1, BTreeMap::from([(4, 0), (5, 0), (6, 0), (7, 0)])); // Given + use sp_runtime::PerThing as _; let slash = u64::MAX as Balance * 2; let value = u64::MAX as Balance * 2; let unit = 100; @@ -5259,18 +5272,19 @@ fn proportional_ledger_slash_works() { ledger.active = unit; ledger.total = unit * 4 + value; // When - assert_eq!(ledger.slash(slash, 0, 0), slash - 43); + assert_eq!(ledger.slash(slash, 0, 0), slash - 5); // Then // The amount slashed out of `unit` let affected_balance = value + unit * 4; - let ratio = Perquintill::from_rational(slash, affected_balance); + let ratio = + Perquintill::from_rational_with_rounding(slash, affected_balance, Rounding::Up).unwrap(); // `unit` after the slash is applied let unit_slashed = { - let unit_slash = ratio * unit; + let unit_slash = ratio.mul_ceil(unit); unit - unit_slash }; let value_slashed = { - let value_slash = ratio * value; + let value_slash = ratio.mul_ceil(value); value - value_slash }; assert_eq!(ledger.active, unit_slashed); diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index cd43038522914..811ca33dbfa25 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -97,7 +97,7 @@ pub use sp_arithmetic::helpers_128bit; pub use sp_arithmetic::{ traits::SaturatedConversion, FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, - UpperOf, + Rounding, UpperOf, }; pub use either::Either; From 3f70bcedf020d072e2f611a2448f2af46a4ae9b4 Mon Sep 17 00:00:00 2001 From: Sasha Gryaznov Date: Wed, 7 Sep 2022 11:54:17 +0300 Subject: [PATCH 102/348] [contracts] API host functions: remove `seal_` name prefix + enable aliasing (#12126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * works but ugly * refactored + renamed host fns * fixed tests * fix benchmarks * updated marco docs * Update frame/contracts/proc-macro/src/lib.rs Co-authored-by: Alexander Theißen * fix for the duplicated prefixed alias bug + test * refactored a bit * fix warnings + try to make macro rustdoc compile * fmt after clearing * examples update + nocompile * add seal_ prefixes to unstable host functions * updated after a review Co-authored-by: Alexander Theißen --- frame/contracts/fixtures/call_runtime.wat | 4 +- frame/contracts/proc-macro/src/lib.rs | 72 ++++-- frame/contracts/src/benchmarking/mod.rs | 22 +- frame/contracts/src/wasm/mod.rs | 66 +++++- frame/contracts/src/wasm/runtime.rs | 260 +++++++++++----------- 5 files changed, 262 insertions(+), 162 deletions(-) diff --git a/frame/contracts/fixtures/call_runtime.wat b/frame/contracts/fixtures/call_runtime.wat index d5467f6e95e3e..62fa08680a097 100644 --- a/frame/contracts/fixtures/call_runtime.wat +++ b/frame/contracts/fixtures/call_runtime.wat @@ -1,6 +1,6 @@ ;; This passes its input to `seal_call_runtime` and returns the return value to its caller. (module - (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) + (import "__unstable__" "call_runtime" (func $call_runtime (param i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -17,7 +17,7 @@ ) ;; Just use the call passed as input and store result to memory (i32.store (i32.const 0) - (call $seal_call_runtime + (call $call_runtime (i32.const 4) ;; Pointer where the call is stored (i32.load (i32.const 0)) ;; Size of the call ) diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 65c13bb1fc607..648bf0fd1f812 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -174,22 +174,16 @@ impl ToTokens for HostFn { } impl HostFn { - pub fn try_from(item: syn::Item) -> syn::Result { + pub fn try_from(item: syn::ItemFn) -> syn::Result { let err = |span, msg| { let msg = format!("Invalid host function definition. {}", msg); syn::Error::new(span, msg) }; let msg = "only #[version()] or #[unstable] attribute is allowed."; let span = item.span(); - let item = match item { - syn::Item::Fn(i_fn) => Ok(i_fn), - _ => Err(err(span, msg)), - }?; - + let mut attrs = item.attrs.clone(); + attrs.retain(|a| !(a.path.is_ident("doc") || a.path.is_ident("prefixed_alias"))); let name = item.sig.ident.to_string(); - let attrs: Vec<&syn::Attribute> = - item.attrs.iter().filter(|m| !m.path.is_ident("doc")).collect(); - let module = match attrs.len() { 0 => Ok("seal0".to_string()), 1 => { @@ -306,6 +300,7 @@ impl HostFn { } } } + impl EnvDef { pub fn try_from(item: syn::ItemMod) -> syn::Result { let span = item.span(); @@ -316,9 +311,32 @@ impl EnvDef { .ok_or(err("Invalid environment definition, expected `mod` to be inlined."))? .1; + let extract_fn = |i: &syn::Item| match i { + syn::Item::Fn(i_fn) => Some(i_fn.clone()), + _ => None, + }; + + let selector = |a: &syn::Attribute| a.path.is_ident("prefixed_alias"); + + let aliases = items + .iter() + .filter_map(extract_fn) + .filter(|i| i.attrs.iter().any(selector)) + .map(|mut i| { + i.attrs.retain(|i| !selector(i)); + i.sig.ident = syn::Ident::new( + &format!("seal_{}", &i.sig.ident.to_string()), + i.sig.ident.span(), + ); + i + }) + .map(|i| HostFn::try_from(i)); + let host_funcs = items .iter() - .map(|i| HostFn::try_from(i.clone())) + .filter_map(extract_fn) + .map(|i| HostFn::try_from(i)) + .chain(aliases) .collect::, _>>()?; Ok(Self { host_funcs }) @@ -484,7 +502,7 @@ fn expand_impls(def: &mut EnvDef) -> proc_macro2::TokenStream { /// ```nocompile /// #[define_env] /// pub mod some_env { -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result<(), TrapReason> { +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result<(), TrapReason> { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// } @@ -499,17 +517,45 @@ fn expand_impls(def: &mut EnvDef) -> proc_macro2::TokenStream { /// #[define_env] /// pub mod some_env { /// #[version(1)] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// +/// #[unstable] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) +/// } +/// } +/// ``` +/// +/// In legacy versions of pallet_contracts, it was a naming convention that all host functions had +/// to be named with the `seal_` prefix. For the sake of backwards compatibility, each host function +/// now can get a such prefix-named alias function generated by marking it by the +/// `#[prefixed_alias]` attribute: +/// +/// ## Example +/// +/// ```nocompile +/// #[define_env] +/// pub mod some_env { +/// #[version(1)] +/// #[prefixed_alias] +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// /// #[unstable] -/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn some_host_fn(ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// } /// ``` /// +/// In this example, the following host functions will be generated by the macro: +/// - `some_host_fn()` in module `seal1`, +/// - `seal_some_host_fn()` in module `seal1`, +/// - `some_host_fn()` in module `__unstable__`. +/// /// Only following return types are allowed for the host functions defined with the macro: /// - `Result<(), TrapReason>`, /// - `Result`, diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index e29cb51728e1e..2f8d101504ffa 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -920,7 +920,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_set_storage", + name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -968,7 +968,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_set_storage", + name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1016,7 +1016,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_set_storage", + name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1068,7 +1068,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_clear_storage", + name: "clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1115,7 +1115,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_clear_storage", + name: "clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1163,7 +1163,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_get_storage", + name: "get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1217,7 +1217,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_get_storage", + name: "get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1272,7 +1272,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_contains_storage", + name: "contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1319,7 +1319,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_contains_storage", + name: "contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1367,7 +1367,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_take_storage", + name: "take_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], @@ -1421,7 +1421,7 @@ benchmarks! { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { module: "__unstable__", - name: "seal_take_storage", + name: "take_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), }], diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index f989c21b00ffc..6790830b97e02 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -929,7 +929,7 @@ mod tests { (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "__unstable__" "seal_contains_storage" (func $seal_contains_storage (param i32 i32) (result i32))) + (import "__unstable__" "contains_storage" (func $contains_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) @@ -947,7 +947,7 @@ mod tests { ) ;; Call seal_clear_storage and save what it returns at 0 (i32.store (i32.const 0) - (call $seal_contains_storage + (call $contains_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len ) @@ -1678,11 +1678,53 @@ mod tests { ) (func (export "deploy")) ) +"#; + + const CODE_TIMESTAMP_NOW_UNPREFIXED: &str = r#" +(module + (import "seal0" "now" (func $now (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; size of our buffer is 32 bytes + (data (i32.const 32) "\20") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; This stores the block timestamp in the buffer + (call $now (i32.const 0) (i32.const 32)) + + ;; assert len == 8 + (call $assert + (i32.eq + (i32.load (i32.const 32)) + (i32.const 8) + ) + ) + + ;; assert that contents of the buffer is equal to the i64 value of 1111. + (call $assert + (i64.eq + (i64.load (i32.const 0)) + (i64.const 1111) + ) + ) + ) + (func (export "deploy")) +) "#; #[test] fn now() { assert_ok!(execute(CODE_TIMESTAMP_NOW, vec![], MockExt::default())); + assert_ok!(execute(CODE_TIMESTAMP_NOW_UNPREFIXED, vec![], MockExt::default())); } const CODE_MINIMUM_BALANCE: &str = r#" @@ -2221,7 +2263,7 @@ mod tests { #[cfg(feature = "unstable-interface")] const CODE_CALL_RUNTIME: &str = r#" (module - (import "__unstable__" "seal_call_runtime" (func $seal_call_runtime (param i32 i32) (result i32))) + (import "__unstable__" "call_runtime" (func $call_runtime (param i32 i32) (result i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -2238,7 +2280,7 @@ mod tests { ) ;; Just use the call passed as input and store result to memory (i32.store (i32.const 0) - (call $seal_call_runtime + (call $call_runtime (i32.const 4) ;; Pointer where the call is stored (i32.load (i32.const 0)) ;; Size of the call ) @@ -2350,7 +2392,7 @@ mod tests { (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "set_storage" (func $set_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer @@ -2367,7 +2409,7 @@ mod tests { ) ;; Store the passed value to the passed key and store result to memory (i32.store (i32.const 168) - (call $seal_set_storage + (call $set_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len (i32.add ;; value_ptr = 8 + key_len @@ -2421,7 +2463,7 @@ mod tests { (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "seal_get_storage" (func $seal_get_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "get_storage" (func $get_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) @@ -2442,7 +2484,7 @@ mod tests { ) ;; Load a storage value and result of this call into the output buffer (i32.store (i32.const 168) - (call $seal_get_storage + (call $get_storage (i32.const 12) ;; key_ptr (i32.load (i32.const 8)) ;; key_len (i32.const 172) ;; Pointer to the output buffer @@ -2515,7 +2557,7 @@ mod tests { (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "seal_clear_storage" (func $seal_clear_storage (param i32 i32) (result i32))) + (import "__unstable__" "clear_storage" (func $clear_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; size of input buffer @@ -2532,7 +2574,7 @@ mod tests { ) ;; Call seal_clear_storage and save what it returns at 0 (i32.store (i32.const 0) - (call $seal_clear_storage + (call $clear_storage (i32.const 8) ;; key_ptr (i32.load (i32.const 4)) ;; key_len ) @@ -2601,7 +2643,7 @@ mod tests { (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "__unstable__" "seal_take_storage" (func $seal_take_storage (param i32 i32 i32 i32) (result i32))) + (import "__unstable__" "take_storage" (func $take_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) @@ -2623,7 +2665,7 @@ mod tests { ;; Load a storage value and result of this call into the output buffer (i32.store (i32.const 168) - (call $seal_take_storage + (call $take_storage (i32.const 12) ;; key_ptr (i32.load (i32.const 8)) ;; key_len (i32.const 172) ;; Pointer to the output buffer diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 296f322f494d0..334223e3b8432 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -957,7 +957,7 @@ pub mod env { /// This call is supposed to be called only by instrumentation injected code. /// /// - amount: How much gas is used. - fn gas(ctx: Runtime, amount: u32) -> Result<(), TrapReason> { + fn gas(ctx: Runtime, amount: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) } @@ -966,8 +966,9 @@ pub mod env { /// /// Equivalent to the newer version of `seal_set_storage` with the exception of the return /// type. Still a valid thing to call when not interested in the return value. - fn seal_set_storage( - ctx: Runtime, + #[prefixed_alias] + fn set_storage( + ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32, @@ -994,8 +995,9 @@ pub mod env { /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. #[version(1)] - fn seal_set_storage( - ctx: Runtime, + #[prefixed_alias] + fn set_storage( + ctx: Runtime, key_ptr: u32, value_ptr: u32, value_len: u32, @@ -1020,8 +1022,9 @@ pub mod env { /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. #[unstable] - fn seal_set_storage( - ctx: Runtime, + #[prefixed_alias] + fn set_storage( + ctx: Runtime, key_ptr: u32, key_len: u32, value_ptr: u32, @@ -1034,7 +1037,8 @@ pub mod env { /// /// Equivalent to the newer version of `seal_clear_storage` with the exception of the return /// type. Still a valid thing to call when not interested in the return value. - fn seal_clear_storage(ctx: Runtime, key_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn clear_storage(ctx: Runtime, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(KeyType::Fix, key_ptr).map(|_| ()) } @@ -1050,11 +1054,8 @@ pub mod env { /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. #[unstable] - fn seal_clear_storage( - ctx: Runtime, - key_ptr: u32, - key_len: u32, - ) -> Result { + #[prefixed_alias] + fn clear_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { ctx.clear_storage(KeyType::Variable(key_len), key_ptr) } @@ -1073,8 +1074,9 @@ pub mod env { /// # Errors /// /// `ReturnCode::KeyNotFound` - fn seal_get_storage( - ctx: Runtime, + #[prefixed_alias] + fn get_storage( + ctx: Runtime, key_ptr: u32, out_ptr: u32, out_len_ptr: u32, @@ -1101,8 +1103,9 @@ pub mod env { /// /// `ReturnCode::KeyNotFound` #[unstable] - fn seal_get_storage( - ctx: Runtime, + #[prefixed_alias] + fn get_storage( + ctx: Runtime, key_ptr: u32, key_len: u32, out_ptr: u32, @@ -1124,7 +1127,8 @@ pub mod env { /// /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. - fn seal_contains_storage(ctx: Runtime, key_ptr: u32) -> Result { + #[prefixed_alias] + fn contains_storage(ctx: Runtime, key_ptr: u32) -> Result { ctx.contains_storage(KeyType::Fix, key_ptr) } @@ -1142,11 +1146,8 @@ pub mod env { /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. #[unstable] - fn seal_contains_storage( - ctx: Runtime, - key_ptr: u32, - key_len: u32, - ) -> Result { + #[prefixed_alias] + fn contains_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { ctx.contains_storage(KeyType::Variable(key_len), key_ptr) } @@ -1164,8 +1165,9 @@ pub mod env { /// /// `ReturnCode::KeyNotFound` #[unstable] - fn seal_take_storage( - ctx: Runtime, + #[prefixed_alias] + fn take_storage( + ctx: Runtime, key_ptr: u32, key_len: u32, out_ptr: u32, @@ -1200,8 +1202,9 @@ pub mod env { /// # Errors /// /// `ReturnCode::TransferFailed` - fn seal_transfer( - ctx: Runtime, + #[prefixed_alias] + fn transfer( + ctx: Runtime, account_ptr: u32, _account_len: u32, value_ptr: u32, @@ -1233,8 +1236,9 @@ pub mod env { /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards /// compatibility. Consider switching to the newest version of this function. - fn seal_call( - ctx: Runtime, + #[prefixed_alias] + fn call( + ctx: Runtime, callee_ptr: u32, _callee_len: u32, gas: u64, @@ -1285,8 +1289,9 @@ pub mod env { /// `ReturnCode::TransferFailed` /// `ReturnCode::NotCallable` #[version(1)] - fn seal_call( - ctx: Runtime, + #[prefixed_alias] + fn call( + ctx: Runtime, flags: u32, callee_ptr: u32, gas: u64, @@ -1330,8 +1335,9 @@ pub mod env { /// `ReturnCode::CalleeReverted`: Output buffer is returned. /// `ReturnCode::CalleeTrapped` /// `ReturnCode::CodeNotFound` - fn seal_delegate_call( - ctx: Runtime, + #[prefixed_alias] + fn delegate_call( + ctx: Runtime, flags: u32, code_hash_ptr: u32, input_data_ptr: u32, @@ -1360,8 +1366,9 @@ pub mod env { /// The values `_code_hash_len` and `_value_len` are ignored because the encoded sizes /// of those types are fixed through `[`MaxEncodedLen`]. The fields exist for backwards /// compatibility. Consider switching to the newest version of this function. - fn seal_instantiate( - ctx: Runtime, + #[prefixed_alias] + fn instantiate( + ctx: Runtime, code_hash_ptr: u32, _code_hash_len: u32, gas: u64, @@ -1432,8 +1439,9 @@ pub mod env { /// `ReturnCode::TransferFailed` /// `ReturnCode::CodeNotFound` #[version(1)] - fn seal_instantiate( - ctx: Runtime, + #[prefixed_alias] + fn instantiate( + ctx: Runtime, code_hash_ptr: u32, gas: u64, value_ptr: u32, @@ -1473,8 +1481,9 @@ pub mod env { /// The value `_beneficiary_len` is ignored because the encoded sizes /// this type is fixed through `[`MaxEncodedLen`]. The field exist for backwards /// compatibility. Consider switching to the newest version of this function. - fn seal_terminate( - ctx: Runtime, + #[prefixed_alias] + fn terminate( + ctx: Runtime, beneficiary_ptr: u32, _beneficiary_len: u32, ) -> Result<(), TrapReason> { @@ -1497,7 +1506,8 @@ pub mod env { /// - Failed to send the balance to the beneficiary. /// - The deletion queue is full. #[version(1)] - fn seal_terminate(ctx: Runtime, beneficiary_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn terminate(ctx: Runtime, beneficiary_ptr: u32) -> Result<(), TrapReason> { ctx.terminate(beneficiary_ptr) } @@ -1511,7 +1521,8 @@ pub mod env { /// # Note /// /// This function traps if the input was previously forwarded by a `seal_call`. - fn seal_input(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn input(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::InputBase)?; if let Some(input) = ctx.input_data.take() { ctx.write_sandbox_output(out_ptr, out_len_ptr, &input, false, |len| { @@ -1542,7 +1553,7 @@ pub mod env { /// /// Using a reserved bit triggers a trap. fn seal_return( - ctx: Runtime, + ctx: Runtime, flags: u32, data_ptr: u32, data_len: u32, @@ -1564,7 +1575,8 @@ pub mod env { /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then /// the address of the contract will be returned. The value is encoded as T::AccountId. - fn seal_caller(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn caller(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Caller)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -1583,7 +1595,8 @@ pub mod env { /// `T::AccountId`. Traps otherwise. /// /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). - fn seal_is_contract(ctx: Runtime, account_ptr: u32) -> Result { + #[prefixed_alias] + fn is_contract(ctx: Runtime, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::IsContract)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(account_ptr)?; @@ -1604,8 +1617,9 @@ pub mod env { /// # Errors /// /// `ReturnCode::KeyNotFound` - fn seal_code_hash( - ctx: Runtime, + #[prefixed_alias] + fn code_hash( + ctx: Runtime, account_ptr: u32, out_ptr: u32, out_len_ptr: u32, @@ -1634,11 +1648,8 @@ pub mod env { /// - `out_ptr`: pointer to the linear memory where the returning value is written to. /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and /// the value length is written to. - fn seal_own_code_hash( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn own_code_hash(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; let code_hash_encoded = &ctx.ext.own_code_hash().encode(); Ok(ctx.write_sandbox_output( @@ -1660,7 +1671,8 @@ pub mod env { /// and `false` indicates that the caller is another contract. /// /// Returned value is a u32-encoded boolean: (0 = false, 1 = true). - fn seal_caller_is_origin(ctx: Runtime) -> Result { + #[prefixed_alias] + fn caller_is_origin(ctx: Runtime) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; Ok(ctx.ext.caller_is_origin() as u32) } @@ -1671,11 +1683,8 @@ pub mod env { /// `out_len_ptr` must point to a u32 value that describes the available space at /// `out_ptr`. This call overwrites it with the size of the value. If the available /// space at `out_ptr` is less than the size of the value a trap is triggered. - fn seal_address( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn address(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Address)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -1699,8 +1708,9 @@ pub mod env { /// /// It is recommended to avoid specifying very small values for `gas` as the prices for a single /// gas can be smaller than one. - fn seal_weight_to_fee( - ctx: Runtime, + #[prefixed_alias] + fn weight_to_fee( + ctx: Runtime, gas: u64, out_ptr: u32, out_len_ptr: u32, @@ -1724,11 +1734,8 @@ pub mod env { /// space at `out_ptr` is less than the size of the value a trap is triggered. /// /// The data is encoded as Gas. - fn seal_gas_left( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn gas_left(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; let gas_left = &ctx.ext.gas_meter().gas_left().encode(); Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, gas_left, false, already_charged)?) @@ -1742,11 +1749,8 @@ pub mod env { /// space at `out_ptr` is less than the size of the value a trap is triggered. /// /// The data is encoded as T::Balance. - fn seal_balance( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn balance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -1765,8 +1769,9 @@ pub mod env { /// space at `out_ptr` is less than the size of the value a trap is triggered. /// /// The data is encoded as T::Balance. - fn seal_value_transferred( - ctx: Runtime, + #[prefixed_alias] + fn value_transferred( + ctx: Runtime, out_ptr: u32, out_len_ptr: u32, ) -> Result<(), TrapReason> { @@ -1792,8 +1797,9 @@ pub mod env { /// # Deprecation /// /// This function is deprecated. Users should migrate to the version in the "seal1" module. - fn seal_random( - ctx: Runtime, + #[prefixed_alias] + fn random( + ctx: Runtime, subject_ptr: u32, subject_len: u32, out_ptr: u32, @@ -1835,8 +1841,9 @@ pub mod env { /// call this on later blocks until the block number returned is later than the latest /// commitment. #[version(1)] - fn seal_random( - ctx: Runtime, + #[prefixed_alias] + fn random( + ctx: Runtime, subject_ptr: u32, subject_len: u32, out_ptr: u32, @@ -1862,7 +1869,8 @@ pub mod env { /// `out_len_ptr` must point to a u32 value that describes the available space at /// `out_ptr`. This call overwrites it with the size of the value. If the available /// space at `out_ptr` is less than the size of the value a trap is triggered. - fn seal_now(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn now(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Now)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -1876,11 +1884,8 @@ pub mod env { /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. /// /// The data is encoded as T::Balance. - fn seal_minimum_balance( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn minimum_balance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MinimumBalance)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -1901,8 +1906,9 @@ pub mod env { /// # Deprecation /// /// There is no longer a tombstone deposit. This function always returns 0. - fn seal_tombstone_deposit( - ctx: Runtime, + #[prefixed_alias] + fn tombstone_deposit( + ctx: Runtime, out_ptr: u32, out_len_ptr: u32, ) -> Result<(), TrapReason> { @@ -1917,8 +1923,9 @@ pub mod env { /// /// The state rent functionality was removed. This is stub only exists for /// backwards compatiblity - fn seal_restore_to( - ctx: Runtime, + #[prefixed_alias] + fn restore_to( + ctx: Runtime, _dest_ptr: u32, _dest_len: u32, _code_hash_ptr: u32, @@ -1939,8 +1946,9 @@ pub mod env { /// The state rent functionality was removed. This is stub only exists for /// backwards compatiblity #[version(1)] - fn seal_restore_to( - ctx: Runtime, + #[prefixed_alias] + fn restore_to( + ctx: Runtime, _dest_ptr: u32, _code_hash_ptr: u32, _rent_allowance_ptr: u32, @@ -1959,8 +1967,9 @@ pub mod env { /// - topics_len - the length of the topics buffer. Pass 0 if you want to pass an empty vector. /// - data_ptr - a pointer to a raw data buffer which will saved along the event. /// - data_len - the length of the data buffer. - fn seal_deposit_event( - ctx: Runtime, + #[prefixed_alias] + fn deposit_event( + ctx: Runtime, topics_ptr: u32, topics_len: u32, data_ptr: u32, @@ -2017,8 +2026,9 @@ pub mod env { /// /// The state rent functionality was removed. This is stub only exists for /// backwards compatiblity. - fn seal_set_rent_allowance( - ctx: Runtime, + #[prefixed_alias] + fn set_rent_allowance( + ctx: Runtime, _value_ptr: u32, _value_len: u32, ) -> Result<(), TrapReason> { @@ -2033,7 +2043,8 @@ pub mod env { /// The state rent functionality was removed. This is stub only exists for /// backwards compatiblity. #[version(1)] - fn seal_set_rent_allowance(ctx: Runtime, _value_ptr: u32) -> Result<(), TrapReason> { + #[prefixed_alias] + fn set_rent_allowance(ctx: Runtime, _value_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::DebugMessage)?; Ok(()) } @@ -2044,11 +2055,8 @@ pub mod env { /// /// The state rent functionality was removed. This is stub only exists for /// backwards compatiblity. - fn seal_rent_allowance( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn rent_allowance(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; let rent_allowance = >::max_value().encode(); Ok(ctx.write_sandbox_output( @@ -2066,11 +2074,8 @@ pub mod env { /// `out_len_ptr` must point to a u32 value that describes the available space at /// `out_ptr`. This call overwrites it with the size of the value. If the available /// space at `out_ptr` is less than the size of the value a trap is triggered. - fn seal_block_number( - ctx: Runtime, - out_ptr: u32, - out_len_ptr: u32, - ) -> Result<(), TrapReason> { + #[prefixed_alias] + fn block_number(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::BlockNumber)?; Ok(ctx.write_sandbox_output( out_ptr, @@ -2098,8 +2103,9 @@ pub mod env { /// - `input_len`: the length of the input data in bytes. /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The /// function will write the result directly into this buffer. - fn seal_hash_sha2_256( - ctx: Runtime, + #[prefixed_alias] + fn hash_sha2_256( + ctx: Runtime, input_ptr: u32, input_len: u32, output_ptr: u32, @@ -2125,8 +2131,9 @@ pub mod env { /// - `input_len`: the length of the input data in bytes. /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The /// function will write the result directly into this buffer. - fn seal_hash_keccak_256( - ctx: Runtime, + #[prefixed_alias] + fn hash_keccak_256( + ctx: Runtime, input_ptr: u32, input_len: u32, output_ptr: u32, @@ -2152,8 +2159,9 @@ pub mod env { /// - `input_len`: the length of the input data in bytes. /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The /// function will write the result directly into this buffer. - fn seal_hash_blake2_256( - ctx: Runtime, + #[prefixed_alias] + fn hash_blake2_256( + ctx: Runtime, input_ptr: u32, input_len: u32, output_ptr: u32, @@ -2179,8 +2187,9 @@ pub mod env { /// - `input_len`: the length of the input data in bytes. /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The /// function will write the result directly into this buffer. - fn seal_hash_blake2_128( - ctx: Runtime, + #[prefixed_alias] + fn hash_blake2_128( + ctx: Runtime, input_ptr: u32, input_len: u32, output_ptr: u32, @@ -2200,8 +2209,9 @@ pub mod env { /// /// If no chain extension exists the contract will trap with the `NoChainExtension` /// module error. - fn seal_call_chain_extension( - ctx: Runtime, + #[prefixed_alias] + fn call_chain_extension( + ctx: Runtime, id: u32, input_ptr: u32, input_len: u32, @@ -2243,8 +2253,9 @@ pub mod env { /// not being executed as an RPC. For example, they could allow users to disable logging /// through compile time flags (cargo features) for on-chain deployment. Additionally, the /// return value of this function can be cached in order to prevent further calls at runtime. - fn seal_debug_message( - ctx: Runtime, + #[prefixed_alias] + fn debug_message( + ctx: Runtime, str_ptr: u32, str_len: u32, ) -> Result { @@ -2297,8 +2308,9 @@ pub mod env { /// This function is unstable and subject to change (or removal) in the future. Do not /// deploy a contract using it to a production chain. #[unstable] - fn seal_call_runtime( - ctx: Runtime, + #[prefixed_alias] + fn call_runtime( + ctx: Runtime, call_ptr: u32, call_len: u32, ) -> Result { @@ -2334,8 +2346,9 @@ pub mod env { /// # Errors /// /// `ReturnCode::EcdsaRecoverFailed` - fn seal_ecdsa_recover( - ctx: Runtime, + #[prefixed_alias] + fn ecdsa_recover( + ctx: Runtime, signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32, @@ -2387,10 +2400,8 @@ pub mod env { /// # Errors /// /// `ReturnCode::CodeNotFound` - fn seal_set_code_hash( - ctx: Runtime, - code_hash_ptr: u32, - ) -> Result { + #[prefixed_alias] + fn set_code_hash(ctx: Runtime, code_hash_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(code_hash_ptr)?; match ctx.ext.set_code_hash(code_hash) { @@ -2418,8 +2429,9 @@ pub mod env { /// # Errors /// /// `ReturnCode::EcdsaRecoverFailed` - fn seal_ecdsa_to_eth_address( - ctx: Runtime, + #[prefixed_alias] + fn ecdsa_to_eth_address( + ctx: Runtime, key_ptr: u32, out_ptr: u32, ) -> Result { From 021f71264133d1cf0db5bd3e0112dbf8dcecdd9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 7 Sep 2022 14:49:44 +0100 Subject: [PATCH 103/348] Update contract weights (#12192) * Dummy commit * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts Co-authored-by: command-bot <> --- frame/contracts/src/weights.rs | 1876 +++++++++++++++++--------------- 1 file changed, 984 insertions(+), 892 deletions(-) diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index f1f80ba7a43df..4ec30437c57f1 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,22 +18,22 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-06, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_contracts // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --pallet=pallet_contracts +// --chain=dev // --output=./frame/contracts/src/weights.rs // --template=./.maintain/frame-weight-template.hbs @@ -166,25 +166,25 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(1_654_000 as u64) + Weight::from_ref_time(2_948_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(8_564_000 as u64) + Weight::from_ref_time(14_858_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Contracts DeletionQueue (r:1 w:0) - /// The range of component `q` is `[0, 1024]`. + /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as u64).saturating_mul(q as u64)) + Weight::from_ref_time(14_778_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_228_000 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -192,9 +192,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(19_016_000 as u64) + Weight::from_ref_time(21_622_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -202,31 +202,33 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(205_194_000 as u64) + Weight::from_ref_time(207_601_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:3 w:3) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(288_487_000 as u64) + Weight::from_ref_time(287_355_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(9 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -234,526 +236,560 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(186_136_000 as u64) + Weight::from_ref_time(178_912_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().writes(7 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(149_232_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + Weight::from_ref_time(159_188_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: System EventTopics (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(51_721_000 as u64) + Weight::from_ref_time(52_177_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(Weight::from_ref_time(47_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(30_016_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + Weight::from_ref_time(37_011_000 as u64) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) + // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(27_192_000 as u64) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + Weight::from_ref_time(40_206_000 as u64) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(206_405_000 as u64) - // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_309_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(36_885_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(106_220_000 as u64) - // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(166_137_000 as u64) + // Standard Error: 428_000 + .saturating_add(Weight::from_ref_time(214_063_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(104_498_000 as u64) - // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(174_069_000 as u64) + // Standard Error: 443_000 + .saturating_add(Weight::from_ref_time(272_524_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(208_696_000 as u64) - // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_577_000 as u64) + // Standard Error: 39_000 + .saturating_add(Weight::from_ref_time(40_425_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(205_612_000 as u64) - // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_162_000 as u64) + // Standard Error: 20_000 + .saturating_add(Weight::from_ref_time(15_192_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(206_947_000 as u64) - // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_517_000 as u64) + // Standard Error: 38_000 + .saturating_add(Weight::from_ref_time(36_722_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(208_692_000 as u64) - // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_864_000 as u64) + // Standard Error: 35_000 + .saturating_add(Weight::from_ref_time(36_355_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_811_000 as u64) - // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(226_396_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(112_018_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(207_406_000 as u64) - // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_775_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(36_691_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_260_000 as u64) - // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_202_000 as u64) + // Standard Error: 42_000 + .saturating_add(Weight::from_ref_time(36_393_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(206_448_000 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_507_000 as u64) + // Standard Error: 44_000 + .saturating_add(Weight::from_ref_time(36_211_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(206_969_000 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_500_000 as u64) + // Standard Error: 58_000 + .saturating_add(Weight::from_ref_time(36_290_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(211_611_000 as u64) - // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_310_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(98_714_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(134_484_000 as u64) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(149_071_000 as u64) + // Standard Error: 30_000 + .saturating_add(Weight::from_ref_time(18_084_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(208_556_000 as u64) - // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_918_000 as u64) + // Standard Error: 48_000 + .saturating_add(Weight::from_ref_time(34_848_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(268_886_000 as u64) + Weight::from_ref_time(280_543_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(9_599_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. - fn seal_return(_r: u32, ) -> Weight { - Weight::from_ref_time(203_591_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn seal_return(r: u32, ) -> Weight { + Weight::from_ref_time(219_310_000 as u64) + // Standard Error: 303_000 + .saturating_add(Weight::from_ref_time(1_187_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(204_258_000 as u64) + Weight::from_ref_time(221_399_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(206_625_000 as u64) - // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(222_285_000 as u64) + // Standard Error: 163_000 + .saturating_add(Weight::from_ref_time(52_104_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((6 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(208_866_000 as u64) - // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(233_586_000 as u64) + // Standard Error: 90_000 + .saturating_add(Weight::from_ref_time(133_209_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(220_860_000 as u64) - // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(231_267_000 as u64) + // Standard Error: 118_000 + .saturating_add(Weight::from_ref_time(232_306_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:80 w:80) + // Storage: System EventTopics (r:2 w:2) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(439_782_000 as u64) - // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as u64).saturating_mul(t as u64)) - // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(438_016_000 as u64) + // Standard Error: 538_000 + .saturating_add(Weight::from_ref_time(179_846_000 as u64).saturating_mul(t as u64)) + // Standard Error: 147_000 + .saturating_add(Weight::from_ref_time(66_893_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(140_280_000 as u64) - // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(156_730_000 as u64) + // Standard Error: 48_000 + .saturating_add(Weight::from_ref_time(29_019_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(161_247_000 as u64) - // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(185_733_000 as u64) + // Standard Error: 384_000 + .saturating_add(Weight::from_ref_time(427_052_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_247_000 as u64) - // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(55 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(53 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(520_776_000 as u64) + // Standard Error: 1_346_000 + .saturating_add(Weight::from_ref_time(93_389_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(52 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(50 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_812_000 as u64) - // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(55 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(53 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(504_262_000 as u64) + // Standard Error: 1_198_000 + .saturating_add(Weight::from_ref_time(68_581_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(52 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(50 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(184_803_000 as u64) - // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) + Weight::from_ref_time(188_507_000 as u64) + // Standard Error: 514_000 + .saturating_add(Weight::from_ref_time(416_965_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(500_958_000 as u64) - // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(55 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(52 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(481_328_000 as u64) + // Standard Error: 1_304_000 + .saturating_add(Weight::from_ref_time(70_798_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(49 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(177_682_000 as u64) - // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(200_040_000 as u64) + // Standard Error: 292_000 + .saturating_add(Weight::from_ref_time(341_568_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(465_285_000 as u64) - // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(55 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(443_969_000 as u64) + // Standard Error: 1_076_000 + .saturating_add(Weight::from_ref_time(154_674_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(179_118_000 as u64) - // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + Weight::from_ref_time(194_849_000 as u64) + // Standard Error: 310_000 + .saturating_add(Weight::from_ref_time(320_113_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_056_000 as u64) - // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(54 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(416_712_000 as u64) + // Standard Error: 932_000 + .saturating_add(Weight::from_ref_time(63_903_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_884_000 as u64) - // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) + Weight::from_ref_time(194_760_000 as u64) + // Standard Error: 375_000 + .saturating_add(Weight::from_ref_time(438_855_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_408_000 as u64) - // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(55 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(53 as u64)) - .saturating_add(T::DbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(509_485_000 as u64) + // Standard Error: 1_486_000 + .saturating_add(Weight::from_ref_time(162_250_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(52 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(49 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(127_181_000 as u64) - // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) + Weight::from_ref_time(162_740_000 as u64) + // Standard Error: 691_000 + .saturating_add(Weight::from_ref_time(1_338_567_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + // Standard Error: 8_018_000 + .saturating_add(Weight::from_ref_time(16_181_274_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((160 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads((79 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + // Standard Error: 8_806_000 + .saturating_add(Weight::from_ref_time(15_988_209_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_196_444_000 as u64) - // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as u64).saturating_mul(c as u64)) - .saturating_add(T::DbWeight::get().reads(85 as u64)) + Weight::from_ref_time(9_685_415_000 as u64) + // Standard Error: 6_048_000 + .saturating_add(Weight::from_ref_time(1_132_315_000 as u64).saturating_mul(t as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(9_753_000 as u64).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(81 as u64)) + .saturating_add(T::DbWeight::get().writes(163 as u64)) .saturating_add(T::DbWeight::get().writes((81 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((320 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - .saturating_add(T::DbWeight::get().writes((320 as u64).saturating_mul(r as u64))) + // Standard Error: 30_079_000 + .saturating_add(Weight::from_ref_time(21_155_990_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -761,457 +797,467 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_282_498_000 as u64) - // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as u64).saturating_mul(t as u64)) - // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(167 as u64)) + Weight::from_ref_time(12_321_649_000 as u64) + // Standard Error: 77_000 + .saturating_add(Weight::from_ref_time(125_883_000 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) - .saturating_add(T::DbWeight::get().writes(165 as u64)) + .saturating_add(T::DbWeight::get().writes(247 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(203_959_000 as u64) - // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_020_000 as u64) + // Standard Error: 63_000 + .saturating_add(Weight::from_ref_time(57_966_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(349_915_000 as u64) - // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(319_801_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(209_219_000 as u64) - // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_308_000 as u64) + // Standard Error: 78_000 + .saturating_add(Weight::from_ref_time(70_579_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(208_860_000 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 97_000 + .saturating_add(Weight::from_ref_time(247_328_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(206_165_000 as u64) - // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_851_000 as u64) + // Standard Error: 57_000 + .saturating_add(Weight::from_ref_time(48_446_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(255_955_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 93_000 + .saturating_add(Weight::from_ref_time(97_258_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(208_153_000 as u64) - // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_346_000 as u64) + // Standard Error: 46_000 + .saturating_add(Weight::from_ref_time(47_910_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(278_368_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 116_000 + .saturating_add(Weight::from_ref_time(97_458_000 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(331_955_000 as u64) - // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(355_672_000 as u64) + // Standard Error: 678_000 + .saturating_add(Weight::from_ref_time(2_970_861_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(207_838_000 as u64) - // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + Weight::from_ref_time(232_179_000 as u64) + // Standard Error: 463_000 + .saturating_add(Weight::from_ref_time(2_059_953_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads((79 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) + // Standard Error: 1_725_000 + .saturating_add(Weight::from_ref_time(1_107_351_000 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(73_955_000 as u64) + Weight::from_ref_time(70_023_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(848_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(74_057_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_335_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_761_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(74_137_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_875_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_289_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(73_844_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_128_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(2_288_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_850_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_558_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(73_924_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_585_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_293_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_574_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_492_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(73_343_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_344_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_089_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(76_267_000 as u64) + Weight::from_ref_time(72_833_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(74_877_000 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_722_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(6_545_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(88_665_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(84_465_000 as u64) + // Standard Error: 13_000 + .saturating_add(Weight::from_ref_time(8_307_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(98_600_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_185_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(599_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(74_555_000 as u64) + Weight::from_ref_time(70_473_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(875_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(74_329_000 as u64) + Weight::from_ref_time(70_352_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(907_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(74_612_000 as u64) + Weight::from_ref_time(70_239_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_219_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(76_906_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_049_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_543_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(76_979_000 as u64) + Weight::from_ref_time(72_551_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_693_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(74_370_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_228_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(889_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(73_584_000 as u64) - // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_987_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(186_348_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(74_206_000 as u64) + Weight::from_ref_time(70_186_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_304_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(73_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_796_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(73_985_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_184_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_315_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(74_117_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_059_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_316_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(73_981_000 as u64) + Weight::from_ref_time(70_066_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_282_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(74_104_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_100_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_285_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(74_293_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_992_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(74_055_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_935_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(73_710_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_024_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(73_917_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_794_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(74_048_000 as u64) + Weight::from_ref_time(69_997_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_817_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(74_029_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_610_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_804_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(74_267_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_759_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_846_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(73_952_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_405_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(1_808_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(73_851_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_968_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_838_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(74_034_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_825_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_854_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_973_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_822_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(74_000_000 as u64) + Weight::from_ref_time(69_907_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_824_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(73_883_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_280_000 as u64) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(1_756_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(74_216_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_827_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(73_989_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_995_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_461_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(73_857_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_627_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_453_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(73_801_000 as u64) + Weight::from_ref_time(70_040_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_456_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(74_130_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_754_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_450_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(74_071_000 as u64) + Weight::from_ref_time(69_848_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(74_201_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_904_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_837_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(74_241_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_912_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_839_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(74_331_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_656_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_858_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(73_674_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_927_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(73_807_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_025_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(73_725_000 as u64) + Weight::from_ref_time(69_916_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_843_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(73_755_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_988_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_842_000 as u64).saturating_mul(r as u64)) } } @@ -1219,25 +1265,25 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(1_654_000 as u64) + Weight::from_ref_time(2_948_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(8_564_000 as u64) + Weight::from_ref_time(14_858_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Contracts DeletionQueue (r:1 w:0) - /// The range of component `q` is `[0, 1024]`. + /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_944_000 as u64).saturating_mul(q as u64)) + Weight::from_ref_time(14_778_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_228_000 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1245,9 +1291,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(19_016_000 as u64) + Weight::from_ref_time(21_622_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1255,31 +1301,33 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(205_194_000 as u64) + Weight::from_ref_time(207_601_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(53_000 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:3 w:3) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(288_487_000 as u64) + Weight::from_ref_time(287_355_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(9 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) // Storage: Contracts Nonce (r:1 w:1) @@ -1287,526 +1335,560 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(186_136_000 as u64) + Weight::from_ref_time(178_912_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().writes(7 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System Account (r:1 w:1) + // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(149_232_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + Weight::from_ref_time(159_188_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Contracts CodeStorage (r:1 w:1) + // Storage: System EventTopics (r:1 w:1) // Storage: Contracts PristineCode (r:0 w:1) // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(51_721_000 as u64) + Weight::from_ref_time(52_177_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(Weight::from_ref_time(47_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:1 w:1) // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(30_016_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + Weight::from_ref_time(37_011_000 as u64) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:2 w:2) + // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(27_192_000 as u64) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + Weight::from_ref_time(40_206_000 as u64) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(206_405_000 as u64) - // Standard Error: 112_000 - .saturating_add(Weight::from_ref_time(40_987_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_309_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(36_885_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(106_220_000 as u64) - // Standard Error: 710_000 - .saturating_add(Weight::from_ref_time(307_648_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(166_137_000 as u64) + // Standard Error: 428_000 + .saturating_add(Weight::from_ref_time(214_063_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(104_498_000 as u64) - // Standard Error: 633_000 - .saturating_add(Weight::from_ref_time(368_901_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(174_069_000 as u64) + // Standard Error: 443_000 + .saturating_add(Weight::from_ref_time(272_524_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(208_696_000 as u64) - // Standard Error: 101_000 - .saturating_add(Weight::from_ref_time(44_445_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_577_000 as u64) + // Standard Error: 39_000 + .saturating_add(Weight::from_ref_time(40_425_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(205_612_000 as u64) - // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_162_000 as u64) + // Standard Error: 20_000 + .saturating_add(Weight::from_ref_time(15_192_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(206_947_000 as u64) - // Standard Error: 107_000 - .saturating_add(Weight::from_ref_time(40_789_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_517_000 as u64) + // Standard Error: 38_000 + .saturating_add(Weight::from_ref_time(36_722_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(208_692_000 as u64) - // Standard Error: 109_000 - .saturating_add(Weight::from_ref_time(40_600_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_864_000 as u64) + // Standard Error: 35_000 + .saturating_add(Weight::from_ref_time(36_355_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_811_000 as u64) - // Standard Error: 208_000 - .saturating_add(Weight::from_ref_time(116_831_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(226_396_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(112_018_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(207_406_000 as u64) - // Standard Error: 117_000 - .saturating_add(Weight::from_ref_time(40_702_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_775_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(36_691_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(209_260_000 as u64) - // Standard Error: 130_000 - .saturating_add(Weight::from_ref_time(40_479_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_202_000 as u64) + // Standard Error: 42_000 + .saturating_add(Weight::from_ref_time(36_393_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(206_448_000 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(40_134_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(223_507_000 as u64) + // Standard Error: 44_000 + .saturating_add(Weight::from_ref_time(36_211_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(206_969_000 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(40_251_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_500_000 as u64) + // Standard Error: 58_000 + .saturating_add(Weight::from_ref_time(36_290_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(211_611_000 as u64) - // Standard Error: 175_000 - .saturating_add(Weight::from_ref_time(98_675_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(225_310_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(98_714_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(134_484_000 as u64) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(19_329_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(149_071_000 as u64) + // Standard Error: 30_000 + .saturating_add(Weight::from_ref_time(18_084_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(208_556_000 as u64) - // Standard Error: 125_000 - .saturating_add(Weight::from_ref_time(40_328_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_918_000 as u64) + // Standard Error: 48_000 + .saturating_add(Weight::from_ref_time(34_848_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(268_886_000 as u64) + Weight::from_ref_time(280_543_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_627_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(9_599_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. - fn seal_return(_r: u32, ) -> Weight { - Weight::from_ref_time(203_591_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn seal_return(r: u32, ) -> Weight { + Weight::from_ref_time(219_310_000 as u64) + // Standard Error: 303_000 + .saturating_add(Weight::from_ref_time(1_187_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(204_258_000 as u64) + Weight::from_ref_time(221_399_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(183_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts DeletionQueue (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(206_625_000 as u64) - // Standard Error: 672_000 - .saturating_add(Weight::from_ref_time(59_377_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(222_285_000 as u64) + // Standard Error: 163_000 + .saturating_add(Weight::from_ref_time(52_104_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((6 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(208_866_000 as u64) - // Standard Error: 164_000 - .saturating_add(Weight::from_ref_time(133_438_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(233_586_000 as u64) + // Standard Error: 90_000 + .saturating_add(Weight::from_ref_time(133_209_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(220_860_000 as u64) - // Standard Error: 209_000 - .saturating_add(Weight::from_ref_time(239_951_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(231_267_000 as u64) + // Standard Error: 118_000 + .saturating_add(Weight::from_ref_time(232_306_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) - // Storage: System EventTopics (r:80 w:80) + // Storage: System EventTopics (r:2 w:2) /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(439_782_000 as u64) - // Standard Error: 1_643_000 - .saturating_add(Weight::from_ref_time(264_687_000 as u64).saturating_mul(t as u64)) - // Standard Error: 323_000 - .saturating_add(Weight::from_ref_time(67_636_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(438_016_000 as u64) + // Standard Error: 538_000 + .saturating_add(Weight::from_ref_time(179_846_000 as u64).saturating_mul(t as u64)) + // Standard Error: 147_000 + .saturating_add(Weight::from_ref_time(66_893_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(140_280_000 as u64) - // Standard Error: 82_000 - .saturating_add(Weight::from_ref_time(32_717_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(156_730_000 as u64) + // Standard Error: 48_000 + .saturating_add(Weight::from_ref_time(29_019_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(161_247_000 as u64) - // Standard Error: 883_000 - .saturating_add(Weight::from_ref_time(423_997_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(185_733_000 as u64) + // Standard Error: 384_000 + .saturating_add(Weight::from_ref_time(427_052_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_247_000 as u64) - // Standard Error: 2_745_000 - .saturating_add(Weight::from_ref_time(85_282_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(55 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(53 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(520_776_000 as u64) + // Standard Error: 1_346_000 + .saturating_add(Weight::from_ref_time(93_389_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(52 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(50 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_812_000 as u64) - // Standard Error: 2_513_000 - .saturating_add(Weight::from_ref_time(74_554_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(55 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(53 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(504_262_000 as u64) + // Standard Error: 1_198_000 + .saturating_add(Weight::from_ref_time(68_581_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(52 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(50 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(184_803_000 as u64) - // Standard Error: 733_000 - .saturating_add(Weight::from_ref_time(404_933_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) + Weight::from_ref_time(188_507_000 as u64) + // Standard Error: 514_000 + .saturating_add(Weight::from_ref_time(416_965_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(500_958_000 as u64) - // Standard Error: 2_980_000 - .saturating_add(Weight::from_ref_time(75_996_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(55 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(52 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(481_328_000 as u64) + // Standard Error: 1_304_000 + .saturating_add(Weight::from_ref_time(70_798_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(49 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(177_682_000 as u64) - // Standard Error: 743_000 - .saturating_add(Weight::from_ref_time(338_172_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(200_040_000 as u64) + // Standard Error: 292_000 + .saturating_add(Weight::from_ref_time(341_568_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(465_285_000 as u64) - // Standard Error: 2_599_000 - .saturating_add(Weight::from_ref_time(155_106_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(55 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(443_969_000 as u64) + // Standard Error: 1_076_000 + .saturating_add(Weight::from_ref_time(154_674_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(179_118_000 as u64) - // Standard Error: 572_000 - .saturating_add(Weight::from_ref_time(311_083_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + Weight::from_ref_time(194_849_000 as u64) + // Standard Error: 310_000 + .saturating_add(Weight::from_ref_time(320_113_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_056_000 as u64) - // Standard Error: 2_037_000 - .saturating_add(Weight::from_ref_time(69_665_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(54 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(416_712_000 as u64) + // Standard Error: 932_000 + .saturating_add(Weight::from_ref_time(63_903_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_884_000 as u64) - // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(432_781_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) + Weight::from_ref_time(194_760_000 as u64) + // Standard Error: 375_000 + .saturating_add(Weight::from_ref_time(438_855_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_408_000 as u64) - // Standard Error: 3_348_000 - .saturating_add(Weight::from_ref_time(164_943_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(55 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(53 as u64)) - .saturating_add(RocksDbWeight::get().writes((5 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(509_485_000 as u64) + // Standard Error: 1_486_000 + .saturating_add(Weight::from_ref_time(162_250_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(52 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(49 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(127_181_000 as u64) - // Standard Error: 1_495_000 - .saturating_add(Weight::from_ref_time(1_500_589_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) + Weight::from_ref_time(162_740_000 as u64) + // Standard Error: 691_000 + .saturating_add(Weight::from_ref_time(1_338_567_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 3_803_000 - .saturating_add(Weight::from_ref_time(14_860_909_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) + // Standard Error: 8_018_000 + .saturating_add(Weight::from_ref_time(16_181_274_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((160 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 6_045_000 - .saturating_add(Weight::from_ref_time(14_797_140_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads((79 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + // Standard Error: 8_806_000 + .saturating_add(Weight::from_ref_time(15_988_209_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) // Storage: Contracts CodeStorage (r:2 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_196_444_000 as u64) - // Standard Error: 20_486_000 - .saturating_add(Weight::from_ref_time(1_458_153_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(9_718_000 as u64).saturating_mul(c as u64)) - .saturating_add(RocksDbWeight::get().reads(85 as u64)) + Weight::from_ref_time(9_685_415_000 as u64) + // Standard Error: 6_048_000 + .saturating_add(Weight::from_ref_time(1_132_315_000 as u64).saturating_mul(t as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(9_753_000 as u64).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(81 as u64)) + .saturating_add(RocksDbWeight::get().writes(163 as u64)) .saturating_add(RocksDbWeight::get().writes((81 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 36_253_000 - .saturating_add(Weight::from_ref_time(21_201_529_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((320 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - .saturating_add(RocksDbWeight::get().writes((320 as u64).saturating_mul(r as u64))) + // Standard Error: 30_079_000 + .saturating_add(Weight::from_ref_time(21_155_990_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1814,456 +1896,466 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:0) // Storage: Contracts Nonce (r:1 w:1) // Storage: Contracts OwnerInfoOf (r:1 w:1) + // Storage: System EventTopics (r:82 w:82) /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_282_498_000 as u64) - // Standard Error: 48_112_000 - .saturating_add(Weight::from_ref_time(720_795_000 as u64).saturating_mul(t as u64)) - // Standard Error: 22_000 - .saturating_add(Weight::from_ref_time(124_274_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(167 as u64)) + Weight::from_ref_time(12_321_649_000 as u64) + // Standard Error: 77_000 + .saturating_add(Weight::from_ref_time(125_883_000 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) - .saturating_add(RocksDbWeight::get().writes(165 as u64)) + .saturating_add(RocksDbWeight::get().writes(247 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(t as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(203_959_000 as u64) - // Standard Error: 142_000 - .saturating_add(Weight::from_ref_time(61_311_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(224_020_000 as u64) + // Standard Error: 63_000 + .saturating_add(Weight::from_ref_time(57_966_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(349_915_000 as u64) - // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(320_652_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(319_801_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(209_219_000 as u64) - // Standard Error: 157_000 - .saturating_add(Weight::from_ref_time(73_728_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(222_308_000 as u64) + // Standard Error: 78_000 + .saturating_add(Weight::from_ref_time(70_579_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(208_860_000 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(245_718_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 97_000 + .saturating_add(Weight::from_ref_time(247_328_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(206_165_000 as u64) - // Standard Error: 138_000 - .saturating_add(Weight::from_ref_time(51_644_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_851_000 as u64) + // Standard Error: 57_000 + .saturating_add(Weight::from_ref_time(48_446_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(255_955_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_090_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 93_000 + .saturating_add(Weight::from_ref_time(97_258_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(208_153_000 as u64) - // Standard Error: 140_000 - .saturating_add(Weight::from_ref_time(51_264_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(221_346_000 as u64) + // Standard Error: 46_000 + .saturating_add(Weight::from_ref_time(47_910_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(278_368_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(95_006_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 116_000 + .saturating_add(Weight::from_ref_time(97_458_000 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(331_955_000 as u64) - // Standard Error: 1_155_000 - .saturating_add(Weight::from_ref_time(3_069_955_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(355_672_000 as u64) + // Standard Error: 678_000 + .saturating_add(Weight::from_ref_time(2_970_861_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(207_838_000 as u64) - // Standard Error: 783_000 - .saturating_add(Weight::from_ref_time(2_058_503_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + Weight::from_ref_time(232_179_000 as u64) + // Standard Error: 463_000 + .saturating_add(Weight::from_ref_time(2_059_953_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) + // Storage: System EventTopics (r:2 w:2) // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_567_000 - .saturating_add(Weight::from_ref_time(774_380_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads((79 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) + // Standard Error: 1_725_000 + .saturating_add(Weight::from_ref_time(1_107_351_000 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(73_955_000 as u64) + Weight::from_ref_time(70_023_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(612_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(848_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(74_057_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_335_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_761_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(74_137_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_875_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_289_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(73_844_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_773_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_128_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(2_288_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_952_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_850_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_558_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(73_924_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(941_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_585_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_293_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(73_574_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_439_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_492_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(73_343_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_603_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_344_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_089_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(76_267_000 as u64) + Weight::from_ref_time(72_833_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(74_877_000 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(7_144_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_722_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(6_545_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(88_665_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(9_142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(84_465_000 as u64) + // Standard Error: 13_000 + .saturating_add(Weight::from_ref_time(8_307_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(98_600_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(469_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_185_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(599_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(74_555_000 as u64) + Weight::from_ref_time(70_473_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(624_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(875_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(74_329_000 as u64) + Weight::from_ref_time(70_352_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(688_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(907_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(74_612_000 as u64) + Weight::from_ref_time(70_239_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(909_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_219_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(76_906_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_192_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_049_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_543_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(76_979_000 as u64) + Weight::from_ref_time(72_551_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_361_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_693_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(74_370_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(661_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_228_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(889_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(73_584_000 as u64) - // Standard Error: 353_000 - .saturating_add(Weight::from_ref_time(187_114_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_987_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(186_348_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(74_206_000 as u64) + Weight::from_ref_time(70_186_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(884_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_304_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(73_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(893_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_796_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(73_985_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_184_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_315_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(74_117_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_059_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_316_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(73_981_000 as u64) + Weight::from_ref_time(70_066_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(866_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_282_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(74_104_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(868_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_100_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_285_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(74_293_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(878_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_992_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(74_055_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_935_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(73_710_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_024_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(73_917_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_794_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(74_048_000 as u64) + Weight::from_ref_time(69_997_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_817_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(74_029_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_349_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_610_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_804_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(74_267_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_759_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_846_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(73_952_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_350_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_405_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(1_808_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(73_851_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_368_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_968_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_838_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(74_034_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_348_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_825_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_854_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(73_979_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_353_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_973_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_822_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(74_000_000 as u64) + Weight::from_ref_time(69_907_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_328_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_824_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(73_883_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_331_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_280_000 as u64) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(1_756_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(74_216_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_827_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(73_989_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_998_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_995_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_461_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(73_857_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_073_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_627_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_453_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(73_801_000 as u64) + Weight::from_ref_time(70_040_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_027_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_456_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(74_130_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_064_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_754_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_450_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(74_071_000 as u64) + Weight::from_ref_time(69_848_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_327_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(74_201_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_904_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_837_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(74_241_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_321_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_912_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_839_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(74_331_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_347_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_656_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_858_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(73_674_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_359_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_927_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(73_807_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_025_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(73_725_000 as u64) + Weight::from_ref_time(69_916_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_843_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(73_755_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_360_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_988_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_842_000 as u64).saturating_mul(r as u64)) } } From 5e2ffeb21b0864f7b78ef819a33cba5da357beb1 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 7 Sep 2022 23:41:45 +0800 Subject: [PATCH 104/348] Move Get and bounded types to sp-core (#12203) * Move Get and bounded types to sp-core * Fixes * cargo fmt * Fixes --- primitives/{runtime => core}/src/bounded.rs | 0 .../src/bounded/bounded_btree_map.rs | 4 +- .../src/bounded/bounded_btree_set.rs | 4 +- .../src/bounded/bounded_vec.rs | 4 +- .../src/bounded/weak_bounded_vec.rs | 4 +- primitives/core/src/lib.rs | 237 ++++++++++++++++++ primitives/runtime/src/lib.rs | 47 +--- primitives/runtime/src/traits.rs | 202 +-------------- 8 files changed, 254 insertions(+), 248 deletions(-) rename primitives/{runtime => core}/src/bounded.rs (100%) rename primitives/{runtime => core}/src/bounded/bounded_btree_map.rs (99%) rename primitives/{runtime => core}/src/bounded/bounded_btree_set.rs (99%) rename primitives/{runtime => core}/src/bounded/bounded_vec.rs (99%) rename primitives/{runtime => core}/src/bounded/weak_bounded_vec.rs (99%) diff --git a/primitives/runtime/src/bounded.rs b/primitives/core/src/bounded.rs similarity index 100% rename from primitives/runtime/src/bounded.rs rename to primitives/core/src/bounded.rs diff --git a/primitives/runtime/src/bounded/bounded_btree_map.rs b/primitives/core/src/bounded/bounded_btree_map.rs similarity index 99% rename from primitives/runtime/src/bounded/bounded_btree_map.rs rename to primitives/core/src/bounded/bounded_btree_map.rs index 725864b462694..32f8d85ca795b 100644 --- a/primitives/runtime/src/bounded/bounded_btree_map.rs +++ b/primitives/core/src/bounded/bounded_btree_map.rs @@ -17,7 +17,7 @@ //! Traits, types and structs to support a bounded BTreeMap. -use crate::traits::{Get, TryCollect}; +use crate::{Get, TryCollect}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{borrow::Borrow, collections::btree_map::BTreeMap, marker::PhantomData, ops::Deref}; @@ -363,7 +363,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::traits::ConstU32; + use crate::ConstU32; fn map_from_keys(keys: &[K]) -> BTreeMap where diff --git a/primitives/runtime/src/bounded/bounded_btree_set.rs b/primitives/core/src/bounded/bounded_btree_set.rs similarity index 99% rename from primitives/runtime/src/bounded/bounded_btree_set.rs rename to primitives/core/src/bounded/bounded_btree_set.rs index c19d176f11bef..5feac6b7150f0 100644 --- a/primitives/runtime/src/bounded/bounded_btree_set.rs +++ b/primitives/core/src/bounded/bounded_btree_set.rs @@ -17,7 +17,7 @@ //! Traits, types and structs to support a bounded `BTreeSet`. -use crate::traits::{Get, TryCollect}; +use crate::{Get, TryCollect}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{borrow::Borrow, collections::btree_set::BTreeSet, marker::PhantomData, ops::Deref}; @@ -321,7 +321,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::traits::ConstU32; + use crate::ConstU32; fn set_from_keys(keys: &[T]) -> BTreeSet where diff --git a/primitives/runtime/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs similarity index 99% rename from primitives/runtime/src/bounded/bounded_vec.rs rename to primitives/core/src/bounded/bounded_vec.rs index aed1a156ad699..85f2bed316793 100644 --- a/primitives/runtime/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -19,7 +19,7 @@ //! or a double map. use super::WeakBoundedVec; -use crate::traits::{Get, TryCollect}; +use crate::{Get, TryCollect}; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut, RangeBounds}, @@ -884,7 +884,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::{bounded_vec, traits::ConstU32}; + use crate::{bounded_vec, ConstU32}; #[test] fn slide_works() { diff --git a/primitives/runtime/src/bounded/weak_bounded_vec.rs b/primitives/core/src/bounded/weak_bounded_vec.rs similarity index 99% rename from primitives/runtime/src/bounded/weak_bounded_vec.rs rename to primitives/core/src/bounded/weak_bounded_vec.rs index a447e7285f906..5aff35f010c8b 100644 --- a/primitives/runtime/src/bounded/weak_bounded_vec.rs +++ b/primitives/core/src/bounded/weak_bounded_vec.rs @@ -19,7 +19,7 @@ //! or a double map. use super::{BoundedSlice, BoundedVec}; -use crate::traits::Get; +use crate::Get; use codec::{Decode, Encode, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut}, @@ -453,7 +453,7 @@ where #[cfg(test)] pub mod test { use super::*; - use crate::traits::ConstU32; + use crate::ConstU32; #[test] fn bound_returns_correct_value() { diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index f48adc274f524..fadbdada1f004 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -53,6 +53,7 @@ pub mod hashing; #[cfg(feature = "full_crypto")] pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; +pub mod bounded; pub mod crypto; pub mod hexdisplay; @@ -466,3 +467,239 @@ macro_rules! impl_maybe_marker { // The maximum possible allocation size was chosen rather arbitrary, 32 MiB should be enough for // everybody. pub const MAX_POSSIBLE_ALLOCATION: u32 = 33554432; // 2^25 bytes, 32 MiB + +/// A trait for querying a single value from a type defined in the trait. +/// +/// It is not required that the value is constant. +pub trait TypedGet { + /// The type which is returned. + type Type; + /// Return the current value. + fn get() -> Self::Type; +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { + T::default() + } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +macro_rules! impl_const_get { + ($name:ident, $t:ty) => { + #[doc = "Const getter for a basic type."] + #[derive($crate::RuntimeDebug)] + pub struct $name; + impl Get<$t> for $name { + fn get() -> $t { + T + } + } + impl Get> for $name { + fn get() -> Option<$t> { + Some(T) + } + } + impl TypedGet for $name { + type Type = $t; + fn get() -> $t { + T + } + } + }; +} + +impl_const_get!(ConstBool, bool); +impl_const_get!(ConstU8, u8); +impl_const_get!(ConstU16, u16); +impl_const_get!(ConstU32, u32); +impl_const_get!(ConstU64, u64); +impl_const_get!(ConstU128, u128); +impl_const_get!(ConstI8, i8); +impl_const_get!(ConstI16, i16); +impl_const_get!(ConstI32, i32); +impl_const_get!(ConstI64, i64); +impl_const_get!(ConstI128, i128); + +/// Try and collect into a collection `C`. +pub trait TryCollect { + /// The error type that gets returned when a collection can't be made from `self`. + type Error; + /// Consume self and try to collect the results into `C`. + /// + /// This is useful in preventing the undesirable `.collect().try_into()` call chain on + /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). + fn try_collect(self) -> Result; +} + +/// Create new implementations of the [`Get`](crate::Get) trait. +/// +/// The so-called parameter type can be created in four different ways: +/// +/// - Using `const` to create a parameter type that provides a `const` getter. It is required that +/// the `value` is const. +/// +/// - Declare the parameter type without `const` to have more freedom when creating the value. +/// +/// NOTE: A more substantial version of this macro is available in `frame_support` crate which +/// allows mutable and persistant variants. +/// +/// # Examples +/// +/// ``` +/// # use sp_core::Get; +/// # use sp_core::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// const FIXED_VALUE: u64 = 10; +/// parameter_types! { +/// pub const Argument: u64 = 42 + FIXED_VALUE; +/// /// Visibility of the type is optional +/// OtherArgument: u64 = non_const_expression(); +/// } +/// +/// trait Config { +/// type Parameter: Get; +/// type OtherParameter: Get; +/// } +/// +/// struct Runtime; +/// impl Config for Runtime { +/// type Parameter = Argument; +/// type OtherParameter = OtherArgument; +/// } +/// ``` +/// +/// # Invalid example: +/// +/// ```compile_fail +/// # use sp_core::Get; +/// # use sp_core::parameter_types; +/// // This function cannot be used in a const context. +/// fn non_const_expression() -> u64 { 99 } +/// +/// parameter_types! { +/// pub const Argument: u64 = non_const_expression(); +/// } +/// ``` +#[macro_export] +macro_rules! parameter_types { + ( + $( #[ $attr:meta ] )* + $vis:vis const $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL_CONST $name , $type , $value); + $crate::parameter_types!( $( $rest )* ); + ); + ( + $( #[ $attr:meta ] )* + $vis:vis $name:ident: $type:ty = $value:expr; + $( $rest:tt )* + ) => ( + $( #[ $attr ] )* + $vis struct $name; + $crate::parameter_types!(@IMPL $name, $type, $value); + $crate::parameter_types!( $( $rest )* ); + ); + () => (); + (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub const fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; + (@IMPL $name:ident, $type:ty, $value:expr) => { + impl $name { + /// Returns the value of this parameter type. + pub fn get() -> $type { + $value + } + } + + impl> $crate::Get for $name { + fn get() -> I { + I::from(Self::get()) + } + } + + impl $crate::TypedGet for $name { + type Type = $type; + fn get() -> $type { + Self::get() + } + } + }; +} + +/// Build a bounded vec from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_vec { + ($ ($values:expr),* $(,)?) => { + { + $crate::sp_std::vec![$($values),*].try_into().unwrap() + } + }; + ( $value:expr ; $repetition:expr ) => { + { + $crate::sp_std::vec![$value ; $repetition].try_into().unwrap() + } + } +} + +/// Build a bounded btree-map from the given literals. +/// +/// The type of the outcome must be known. +/// +/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding +/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! bounded_btree_map { + ($ ( $key:expr => $value:expr ),* $(,)?) => { + { + $crate::TryCollect::<$crate::bounded::BoundedBTreeMap<_, _, _>>::try_collect( + $crate::sp_std::vec![$(($key, $value)),*].into_iter() + ).unwrap() + } + }; +} diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 811ca33dbfa25..7006a13e415e4 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -55,7 +55,6 @@ use sp_std::prelude::*; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; -pub mod bounded; pub mod curve; pub mod generic; pub mod legacy; @@ -70,9 +69,6 @@ pub mod transaction_validity; pub use crate::runtime_string::*; -// Re-export bounded types -pub use bounded::{BoundedBTreeMap, BoundedBTreeSet, BoundedSlice, BoundedVec, WeakBoundedVec}; - // Re-export Multiaddress pub use multiaddress::MultiAddress; @@ -82,9 +78,13 @@ pub use generic::{Digest, DigestItem}; pub use sp_application_crypto::{BoundToRuntimeAppPublic, RuntimeAppPublic}; /// Re-export this since it's part of the API of this crate. pub use sp_core::{ + bounded::{BoundedBTreeMap, BoundedBTreeSet, BoundedSlice, BoundedVec, WeakBoundedVec}, crypto::{key_types, AccountId32, CryptoType, CryptoTypeId, KeyTypeId}, TypeId, }; +/// Re-export bounded_vec and bounded_btree_map macros only when std is enabled. +#[cfg(feature = "std")] +pub use sp_core::{bounded_btree_map, bounded_vec}; /// Re-export `RuntimeDebug`, to avoid dependency clutter. pub use sp_core::RuntimeDebug; @@ -834,45 +834,6 @@ macro_rules! assert_eq_error_rate { }; } -/// Build a bounded vec from the given literals. -/// -/// The type of the outcome must be known. -/// -/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding -/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. -#[macro_export] -#[cfg(feature = "std")] -macro_rules! bounded_vec { - ($ ($values:expr),* $(,)?) => { - { - $crate::sp_std::vec![$($values),*].try_into().unwrap() - } - }; - ( $value:expr ; $repetition:expr ) => { - { - $crate::sp_std::vec![$value ; $repetition].try_into().unwrap() - } - } -} - -/// Build a bounded btree-map from the given literals. -/// -/// The type of the outcome must be known. -/// -/// Will not handle any errors and just panic if the given literals cannot fit in the corresponding -/// bounded vec type. Thus, this is only suitable for testing and non-consensus code. -#[macro_export] -#[cfg(feature = "std")] -macro_rules! bounded_btree_map { - ($ ( $key:expr => $value:expr ),* $(,)?) => { - { - $crate::traits::TryCollect::<$crate::BoundedBTreeMap<_, _, _>>::try_collect( - $crate::sp_std::vec![$(($key, $value)),*].into_iter() - ).unwrap() - } - }; -} - /// Simple blob to hold an extrinsic without committing to its format and ensure it is serialized /// correctly. #[derive(PartialEq, Eq, Clone, Default, Encode, Decode, TypeInfo)] diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index a82ae1d62f56a..2cf98b4e638d3 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -38,6 +38,11 @@ pub use sp_arithmetic::traits::{ }; use sp_core::{self, storage::StateVersion, Hasher, RuntimeDebug, TypeId}; #[doc(hidden)] +pub use sp_core::{ + parameter_types, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, + ConstU16, ConstU32, ConstU64, ConstU8, Get, GetDefault, TryCollect, TypedGet, +}; +#[doc(hidden)] pub use sp_std::marker::PhantomData; use sp_std::{self, fmt::Debug, prelude::*}; #[cfg(feature = "std")] @@ -276,203 +281,6 @@ where } } -/// A trait for querying a single value from a type defined in the trait. -/// -/// It is not required that the value is constant. -pub trait TypedGet { - /// The type which is returned. - type Type; - /// Return the current value. - fn get() -> Self::Type; -} - -/// A trait for querying a single value from a type. -/// -/// It is not required that the value is constant. -pub trait Get { - /// Return the current value. - fn get() -> T; -} - -impl Get for () { - fn get() -> T { - T::default() - } -} - -/// Implement Get by returning Default for any type that implements Default. -pub struct GetDefault; -impl Get for GetDefault { - fn get() -> T { - T::default() - } -} - -/// Try and collect into a collection `C`. -pub trait TryCollect { - /// The error type that gets returned when a collection can't be made from `self`. - type Error; - /// Consume self and try to collect the results into `C`. - /// - /// This is useful in preventing the undesirable `.collect().try_into()` call chain on - /// collections that need to be converted into a bounded type (e.g. `BoundedVec`). - fn try_collect(self) -> Result; -} - -macro_rules! impl_const_get { - ($name:ident, $t:ty) => { - #[doc = "Const getter for a basic type."] - #[derive($crate::RuntimeDebug)] - pub struct $name; - impl Get<$t> for $name { - fn get() -> $t { - T - } - } - impl Get> for $name { - fn get() -> Option<$t> { - Some(T) - } - } - impl TypedGet for $name { - type Type = $t; - fn get() -> $t { - T - } - } - }; -} - -impl_const_get!(ConstBool, bool); -impl_const_get!(ConstU8, u8); -impl_const_get!(ConstU16, u16); -impl_const_get!(ConstU32, u32); -impl_const_get!(ConstU64, u64); -impl_const_get!(ConstU128, u128); -impl_const_get!(ConstI8, i8); -impl_const_get!(ConstI16, i16); -impl_const_get!(ConstI32, i32); -impl_const_get!(ConstI64, i64); -impl_const_get!(ConstI128, i128); - -/// Create new implementations of the [`Get`](crate::traits::Get) trait. -/// -/// The so-called parameter type can be created in four different ways: -/// -/// - Using `const` to create a parameter type that provides a `const` getter. It is required that -/// the `value` is const. -/// -/// - Declare the parameter type without `const` to have more freedom when creating the value. -/// -/// NOTE: A more substantial version of this macro is available in `frame_support` crate which -/// allows mutable and persistant variants. -/// -/// # Examples -/// -/// ``` -/// # use sp_runtime::traits::Get; -/// # use sp_runtime::parameter_types; -/// // This function cannot be used in a const context. -/// fn non_const_expression() -> u64 { 99 } -/// -/// const FIXED_VALUE: u64 = 10; -/// parameter_types! { -/// pub const Argument: u64 = 42 + FIXED_VALUE; -/// /// Visibility of the type is optional -/// OtherArgument: u64 = non_const_expression(); -/// } -/// -/// trait Config { -/// type Parameter: Get; -/// type OtherParameter: Get; -/// } -/// -/// struct Runtime; -/// impl Config for Runtime { -/// type Parameter = Argument; -/// type OtherParameter = OtherArgument; -/// } -/// ``` -/// -/// # Invalid example: -/// -/// ```compile_fail -/// # use sp_runtime::traits::Get; -/// # use sp_runtime::parameter_types; -/// // This function cannot be used in a const context. -/// fn non_const_expression() -> u64 { 99 } -/// -/// parameter_types! { -/// pub const Argument: u64 = non_const_expression(); -/// } -/// ``` -#[macro_export] -macro_rules! parameter_types { - ( - $( #[ $attr:meta ] )* - $vis:vis const $name:ident: $type:ty = $value:expr; - $( $rest:tt )* - ) => ( - $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(@IMPL_CONST $name , $type , $value); - $crate::parameter_types!( $( $rest )* ); - ); - ( - $( #[ $attr:meta ] )* - $vis:vis $name:ident: $type:ty = $value:expr; - $( $rest:tt )* - ) => ( - $( #[ $attr ] )* - $vis struct $name; - $crate::parameter_types!(@IMPL $name, $type, $value); - $crate::parameter_types!( $( $rest )* ); - ); - () => (); - (@IMPL_CONST $name:ident, $type:ty, $value:expr) => { - impl $name { - /// Returns the value of this parameter type. - pub const fn get() -> $type { - $value - } - } - - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) - } - } - - impl $crate::traits::TypedGet for $name { - type Type = $type; - fn get() -> $type { - Self::get() - } - } - }; - (@IMPL $name:ident, $type:ty, $value:expr) => { - impl $name { - /// Returns the value of this parameter type. - pub fn get() -> $type { - $value - } - } - - impl> $crate::traits::Get for $name { - fn get() -> I { - I::from(Self::get()) - } - } - - impl $crate::traits::TypedGet for $name { - type Type = $type; - fn get() -> $type { - Self::get() - } - } - }; -} - /// Extensible conversion trait. Generic over only source type, with destination type being /// associated. pub trait Morph { From c0e1cdbff91bc0e1db54a2c369e2e1fc5fb17c2f Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 7 Sep 2022 23:16:54 +0200 Subject: [PATCH 105/348] Add `--header` to `benchmark overhead + storage` (#12204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add header to 'overhead' command Signed-off-by: Oliver Tale-Yazdi * Add header to 'storage' command Signed-off-by: Oliver Tale-Yazdi * Update READMEs Signed-off-by: Oliver Tale-Yazdi * Apply suggestions from code review Co-authored-by: Bastian Köcher * .as_ref() and fmt Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- .../benchmarking-cli/src/overhead/README.md | 1 + .../frame/benchmarking-cli/src/overhead/cmd.rs | 8 +++++++- .../benchmarking-cli/src/overhead/template.rs | 9 +++++++++ .../benchmarking-cli/src/overhead/weights.hbs | 18 +----------------- .../benchmarking-cli/src/shared/README.md | 1 + .../benchmarking-cli/src/storage/README.md | 1 + .../frame/benchmarking-cli/src/storage/cmd.rs | 8 +++++++- .../benchmarking-cli/src/storage/template.rs | 16 +++++++++++++--- .../benchmarking-cli/src/storage/weights.hbs | 18 +----------------- 9 files changed, 41 insertions(+), 39 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/overhead/README.md b/utils/frame/benchmarking-cli/src/overhead/README.md index 9d723c3424f7b..b21d051e9d44c 100644 --- a/utils/frame/benchmarking-cli/src/overhead/README.md +++ b/utils/frame/benchmarking-cli/src/overhead/README.md @@ -126,6 +126,7 @@ Minimizing this is important to have a large transaction throughput. - [`--add`](../shared/README.md#arguments) - [`--metric`](../shared/README.md#arguments) - [`--weight-path`](../shared/README.md#arguments) +- [`--header`](../shared/README.md#arguments) License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 4d0e2e52df17d..1c6753b4a20ea 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -28,7 +28,7 @@ use sp_runtime::{traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; use clap::{Args, Parser}; use log::info; use serde::Serialize; -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, path::PathBuf, sync::Arc}; use crate::{ extrinsic::{ @@ -69,6 +69,12 @@ pub struct OverheadParams { #[allow(missing_docs)] #[clap(flatten)] pub hostinfo: HostInfoParams, + + /// Add a header to the generated weight output file. + /// + /// Good for adding LICENSE headers. + #[clap(long, value_name = "PATH")] + pub header: Option, } /// Type of a benchmark. diff --git a/utils/frame/benchmarking-cli/src/overhead/template.rs b/utils/frame/benchmarking-cli/src/overhead/template.rs index aa82e45cf6db9..ceed34d1981f9 100644 --- a/utils/frame/benchmarking-cli/src/overhead/template.rs +++ b/utils/frame/benchmarking-cli/src/overhead/template.rs @@ -51,6 +51,8 @@ pub(crate) struct TemplateData { hostname: String, /// CPU name of the machine that executed the benchmarks. cpuname: String, + /// Header for the generated file. + header: String, /// Command line arguments that were passed to the CLI. args: Vec, /// Params of the executed command. @@ -70,6 +72,12 @@ impl TemplateData { stats: &Stats, ) -> Result { let weight = params.weight.calc_weight(stats)?; + let header = params + .header + .as_ref() + .map(|p| std::fs::read_to_string(p)) + .transpose()? + .unwrap_or_default(); Ok(TemplateData { short_name: t.short_name().into(), @@ -79,6 +87,7 @@ impl TemplateData { date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), hostname: params.hostinfo.hostname(), cpuname: params.hostinfo.cpuname(), + header, args: env::args().collect::>(), params: params.clone(), stats: stats.clone(), diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index bddaa642a5a29..5feac07b3a7d8 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -1,20 +1,4 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - +{{header}} //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}} //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` diff --git a/utils/frame/benchmarking-cli/src/shared/README.md b/utils/frame/benchmarking-cli/src/shared/README.md index 2a3719b85498c..08e25b0e08f76 100644 --- a/utils/frame/benchmarking-cli/src/shared/README.md +++ b/utils/frame/benchmarking-cli/src/shared/README.md @@ -11,5 +11,6 @@ Contains code that is shared among multiple sub-commands. - `--db` The database backend to use. This depends on your snapshot. - `--pruning` Set the pruning mode of the node. Some benchmarks require you to set this to `archive`. - `--base-path` The location on the disk that should be used for the benchmarks. You can try this on different disks or even on a mounted RAM-disk. It is important to use the same location that will later-on be used to store the chain data to get the correct results. +- `--header` Optional file header which will be prepended to the weight output file. Can be used for adding LICENSE headers. License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/storage/README.md b/utils/frame/benchmarking-cli/src/storage/README.md index 820785f7ea20c..ecaf4edadab38 100644 --- a/utils/frame/benchmarking-cli/src/storage/README.md +++ b/utils/frame/benchmarking-cli/src/storage/README.md @@ -97,6 +97,7 @@ write: 71_347 * constants::WEIGHT_PER_NANOS, - [`--weight-path`](../shared/README.md#arguments) - `--json-read-path` Write the raw 'read' results to this file or directory. - `--json-write-path` Write the raw 'write' results to this file or directory. +- [`--header`](../shared/README.md#arguments) License: Apache-2.0 diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index de5e189b40db0..d28ac4102152f 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -79,6 +79,12 @@ pub struct StorageParams { #[clap(long)] pub template_path: Option, + /// Add a header to the generated weight output file. + /// + /// Good for adding LICENSE headers. + #[clap(long, value_name = "PATH")] + pub header: Option, + /// Path to write the raw 'read' results in JSON format to. Can be a file or directory. #[clap(long)] pub json_read_path: Option, @@ -122,7 +128,7 @@ impl StorageCmd { Block: BlockT, C: UsageProvider + StorageProvider + HeaderBackend, { - let mut template = TemplateData::new(&cfg, &self.params); + let mut template = TemplateData::new(&cfg, &self.params)?; let block_id = BlockId::::Number(client.usage_info().chain.best_number); template.set_block_number(block_id.to_string()); diff --git a/utils/frame/benchmarking-cli/src/storage/template.rs b/utils/frame/benchmarking-cli/src/storage/template.rs index 20fbd58134f20..ebc415ccb8189 100644 --- a/utils/frame/benchmarking-cli/src/storage/template.rs +++ b/utils/frame/benchmarking-cli/src/storage/template.rs @@ -45,6 +45,8 @@ pub(crate) struct TemplateData { hostname: String, /// CPU name of the machine that executed the benchmarks. cpuname: String, + /// Header for the generated file. + header: String, /// Command line arguments that were passed to the CLI. args: Vec, /// Storage params of the executed command. @@ -63,18 +65,26 @@ pub(crate) struct TemplateData { impl TemplateData { /// Returns a new [`Self`] from the given configuration. - pub fn new(cfg: &Configuration, params: &StorageParams) -> Self { - TemplateData { + pub fn new(cfg: &Configuration, params: &StorageParams) -> Result { + let header = params + .header + .as_ref() + .map(|p| std::fs::read_to_string(p)) + .transpose()? + .unwrap_or_default(); + + Ok(TemplateData { db_name: format!("{}", cfg.database), runtime_name: cfg.chain_spec.name().into(), version: VERSION.into(), date: chrono::Utc::now().format("%Y-%m-%d (Y/M/D)").to_string(), hostname: params.hostinfo.hostname(), cpuname: params.hostinfo.cpuname(), + header, args: env::args().collect::>(), params: params.clone(), ..Default::default() - } + }) } /// Sets the stats and calculates the final weights. diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 8c19aaa0dff36..3aa4703e29eb2 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -1,20 +1,4 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - +{{header}} //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}} //! HOSTNAME: `{{hostname}}`, CPU: `{{cpuname}}` From 26193945883c773f2f14f976da3c3bd11dc0eb79 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 8 Sep 2022 12:22:41 +0800 Subject: [PATCH 106/348] Remove Ord impl for Weights V2 and add comparison fns (#12183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove Ord impl for Weights V2 and add comparison fns * Remove TODO * Update frame/multisig/src/lib.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/election-provider-multi-phase/src/unsigned.rs Co-authored-by: Oliver Tale-Yazdi * Remove unused import * cargo fmt * Fix tests * Fix more tests * cargo fmt * Fix more tests * Update frame/contracts/src/wasm/mod.rs Co-authored-by: Alexander Theißen * Update weight benchmarking templates Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Alexander Theißen --- frame/babe/src/tests.rs | 4 +- frame/collective/src/lib.rs | 2 +- frame/contracts/src/gas.rs | 2 +- frame/contracts/src/wasm/mod.rs | 6 +- .../election-provider-multi-phase/src/lib.rs | 6 +- .../src/unsigned.rs | 29 +++++----- frame/examples/basic/src/tests.rs | 4 +- frame/executive/src/lib.rs | 2 +- frame/grandpa/src/tests.rs | 4 +- frame/multisig/src/lib.rs | 5 +- frame/scheduler/src/lib.rs | 2 +- frame/staking/src/pallet/mod.rs | 10 ++-- frame/staking/src/tests.rs | 16 ++--- frame/support/src/weights/block_weights.rs | 10 +++- .../support/src/weights/extrinsic_weights.rs | 10 +++- frame/support/src/weights/paritydb_weights.rs | 8 +-- frame/support/src/weights/rocksdb_weights.rs | 8 +-- frame/support/src/weights/weight_v2.rs | 58 ++++++++++++++++++- frame/system/src/extensions/check_weight.rs | 16 ++--- frame/system/src/limits.rs | 18 +++--- frame/whitelist/src/lib.rs | 2 +- .../benchmarking-cli/src/overhead/weights.hbs | 8 +-- .../benchmarking-cli/src/storage/weights.hbs | 8 +-- 23 files changed, 156 insertions(+), 82 deletions(-) diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 2f967b658e396..065105eda1c32 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -823,7 +823,7 @@ fn report_equivocation_has_valid_weight() { .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) - .all(|w| w[0] < w[1])); + .all(|w| w[0].ref_time() < w[1].ref_time())); } #[test] @@ -852,7 +852,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > Weight::zero()); + assert!(info.weight.all_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 0323be1382392..02faf876806e1 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -881,7 +881,7 @@ impl, I: 'static> Pallet { ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; let proposal_weight = proposal.get_dispatch_info().weight; - ensure!(proposal_weight <= weight_bound, Error::::WrongProposalWeight); + ensure!(proposal_weight.all_lte(weight_bound), Error::::WrongProposalWeight); Ok((proposal, proposal_len as usize)) } diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index ae20e4eeb0def..215b4d42daa06 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -111,7 +111,7 @@ where // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - if self.gas_left < amount { + if self.gas_left.any_lt(amount) { Err(>::OutOfGas.into()) } else { self.gas_left -= amount; diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 6790830b97e02..3718be756e39d 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -1555,8 +1555,8 @@ mod tests { let gas_left = Weight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - assert!(gas_left < gas_limit, "gas_left must be less than initial"); - assert!(gas_left > actual_left, "gas_left must be greater than final"); + assert!(gas_left.all_lt(gas_limit), "gas_left must be less than initial"); + assert!(gas_left.all_gt(actual_left), "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1953,7 +1953,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left() > Weight::zero()); + assert!(mock_ext.gas_meter.gas_left().all_gt(Weight::zero())); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 84949080aa58c..bfe5cb2e96728 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -985,7 +985,7 @@ pub mod pallet { let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; ensure!( - Self::solution_weight_of(&raw_solution, size) < T::SignedMaxWeight::get(), + Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), Error::::SignedTooMuchWeight, ); @@ -2299,8 +2299,8 @@ mod tests { }; let mut active = 1; - while weight_with(active) <= - ::BlockWeights::get().max_block || + while weight_with(active) + .all_lte(::BlockWeights::get().max_block) || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 8ef7d87473159..e99f2e9cef028 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -34,7 +34,7 @@ use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, DispatchError, SaturatedConversion, }; -use sp_std::{cmp::Ordering, prelude::*}; +use sp_std::prelude::*; /// Storage key used to store the last block number at which offchain worker ran. pub(crate) const OFFCHAIN_LAST_BLOCK: &[u8] = b"parity/multi-phase-unsigned-election"; @@ -638,16 +638,17 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - match current_weight.cmp(&max_weight) { - Ordering::Less => { - let next_voters = voters.checked_add(step); - match next_voters { - Some(voters) if voters < max_voters => Ok(voters), - _ => Err(()), - } - }, - Ordering::Greater => voters.checked_sub(step).ok_or(()), - Ordering::Equal => Ok(voters), + if current_weight.all_lt(max_weight) { + let next_voters = voters.checked_add(step); + match next_voters { + Some(voters) if voters < max_voters => Ok(voters), + _ => Err(()), + } + } else if current_weight.any_gt(max_weight) { + voters.checked_sub(step).ok_or(()) + } else { + // If any of the constituent weights is equal to the max weight, we're at max + Ok(voters) } }; @@ -672,16 +673,16 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - while voters < max_voters && weight_with(voters + 1) < max_weight { + while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { voters += 1; } - while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { + while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { voters -= 1; } let final_decision = voters.min(size.voters); debug_assert!( - weight_with(final_decision) <= max_weight, + weight_with(final_decision).all_lte(max_weight), "weight_with({}) <= {}", final_decision, max_weight, diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index f6afb7a0c77f1..567117e621921 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -190,11 +190,11 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight > Weight::zero()); + assert!(info1.weight.all_gt(Weight::zero())); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); - assert!(info1.weight > info2.weight); + assert!(info1.weight.all_gt(info2.weight)); } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 5d3954ded0998..87ed8b93bbf8e 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -462,7 +462,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight > Weight::zero() { + if remaining_weight.all_gt(Weight::zero()) { let used_weight = >::on_idle( block_number, remaining_weight, diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 9c39069bf9538..07c4fcad3078d 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -823,7 +823,7 @@ fn report_equivocation_has_valid_weight() { .map(::WeightInfo::report_equivocation) .collect::>() .windows(2) - .all(|w| w[0] < w[1])); + .all(|w| w[0].ref_time() < w[1].ref_time())); } #[test] @@ -856,7 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight > Weight::zero()); + assert!(info.weight.all_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 10184ce84a9a8..01904de9a96fa 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -563,7 +563,10 @@ impl Pallet { if let Some((call, call_len)) = maybe_approved_call { // verify weight - ensure!(call.get_dispatch_info().weight <= max_weight, Error::::MaxWeightTooLow); + ensure!( + call.get_dispatch_info().weight.all_lte(max_weight), + Error::::MaxWeightTooLow + ); // Clean up storage before executing call to avoid an possibility of reentrancy // attack. diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index a005c051a1abc..7746b7d0123f4 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -368,7 +368,7 @@ pub mod pallet { let hard_deadline = s.priority <= schedule::HARD_DEADLINE; let test_weight = total_weight.saturating_add(call_weight).saturating_add(item_weight); - if !hard_deadline && order > 0 && test_weight > limit { + if !hard_deadline && order > 0 && test_weight.any_gt(limit) { // Cannot be scheduled this block - postpone until next. total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); if let Some(ref id) = s.maybe_id { diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 74374cc586a23..f651cc06b1cb0 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -33,7 +33,7 @@ use sp_runtime::{ Perbill, Percent, }; use sp_staking::{EraIndex, SessionIndex}; -use sp_std::{cmp::max, prelude::*}; +use sp_std::prelude::*; mod impls; @@ -1571,10 +1571,10 @@ pub mod pallet { /// to kick people under the new limits, `chill_other` should be called. // We assume the worst case for this call is either: all items are set or all items are // removed. - #[pallet::weight(max( - T::WeightInfo::set_staking_configs_all_set(), - T::WeightInfo::set_staking_configs_all_remove() - ))] + #[pallet::weight( + T::WeightInfo::set_staking_configs_all_set() + .max(T::WeightInfo::set_staking_configs_all_remove()) + )] pub fn set_staking_configs( origin: OriginFor, min_nominator_bond: ConfigOp>, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 0d00f5a5e61df..448fbd2382454 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -3759,9 +3759,9 @@ fn payout_stakers_handles_weight_refund() { let half_max_nom_rewarded_weight = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); - assert!(zero_nom_payouts_weight > Weight::zero()); - assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); - assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); + assert!(zero_nom_payouts_weight.any_gt(Weight::zero())); + assert!(half_max_nom_rewarded_weight.any_gt(zero_nom_payouts_weight)); + assert!(max_nom_rewarded_weight.any_gt(half_max_nom_rewarded_weight)); let balance = 1000; bond_validator(11, 10, balance); @@ -4238,7 +4238,7 @@ fn do_not_die_when_active_is_ed() { fn on_finalize_weight_is_nonzero() { ExtBuilder::default().build_and_execute(|| { let on_finalize_weight = ::DbWeight::get().reads(1); - assert!(>::on_initialize(1) >= on_finalize_weight); + assert!(>::on_initialize(1).all_gte(on_finalize_weight)); }) } @@ -4249,8 +4249,8 @@ mod election_data_provider { #[test] fn targets_2sec_block() { let mut validators = 1000; - while ::WeightInfo::get_npos_targets(validators) < - 2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND + while ::WeightInfo::get_npos_targets(validators) + .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) { validators += 1; } @@ -4267,8 +4267,8 @@ mod election_data_provider { let slashing_spans = validators; let mut nominators = 1000; - while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) < - 2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND + while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) + .all_lt(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND) { nominators += 1; } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 51f707b2b0df5..57978f60a4ecb 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -68,8 +68,14 @@ mod test_weights { let w = super::BlockExecutionWeight::get(); // At least 100 µs. - assert!(w >= 100u32 * constants::WEIGHT_PER_MICROS, "Weight should be at least 100 µs."); + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), + "Weight should be at least 100 µs." + ); // At most 50 ms. - assert!(w <= 50u32 * constants::WEIGHT_PER_MILLIS, "Weight should be at most 50 ms."); + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), + "Weight should be at most 50 ms." + ); } } diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 2bee1059b0dad..54f70a063cdec 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -68,8 +68,14 @@ mod test_weights { let w = super::ExtrinsicBaseWeight::get(); // At least 10 µs. - assert!(w >= 10u32 * constants::WEIGHT_PER_MICROS, "Weight should be at least 10 µs."); + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), + "Weight should be at least 10 µs." + ); // At most 1 ms. - assert!(w <= constants::WEIGHT_PER_MILLIS, "Weight should be at most 1 ms."); + assert!( + w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + "Weight should be at most 1 ms." + ); } } diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index f498991729d7a..9f6ba82dbbff6 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -42,20 +42,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1) >= constants::WEIGHT_PER_MICROS, + W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1) >= constants::WEIGHT_PER_MICROS, + W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, + W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, + W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Write weight should be at most 1 ms." ); } diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index 67571c4723f94..cab983de0ad0a 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -42,20 +42,20 @@ pub mod constants { fn sane() { // At least 1 µs. assert!( - W::get().reads(1) >= constants::WEIGHT_PER_MICROS, + W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1) >= constants::WEIGHT_PER_MICROS, + W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, + W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, + W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Write weight should be at most 1 ms." ); } diff --git a/frame/support/src/weights/weight_v2.rs b/frame/support/src/weights/weight_v2.rs index 4bfab36663394..2d50f07add596 100644 --- a/frame/support/src/weights/weight_v2.rs +++ b/frame/support/src/weights/weight_v2.rs @@ -35,8 +35,6 @@ use super::*; Clone, RuntimeDebug, Default, - Ord, - PartialOrd, CompactAs, )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -163,6 +161,62 @@ impl Weight { pub const fn zero() -> Self { Self { ref_time: 0 } } + + /// Returns true if any of `self`'s constituent weights is strictly greater than that of the + /// `other`'s, otherwise returns false. + pub const fn any_gt(self, other: Self) -> bool { + self.ref_time > other.ref_time + } + + /// Returns true if all of `self`'s constituent weights is strictly greater than that of the + /// `other`'s, otherwise returns false. + pub const fn all_gt(self, other: Self) -> bool { + self.ref_time > other.ref_time + } + + /// Returns true if any of `self`'s constituent weights is strictly less than that of the + /// `other`'s, otherwise returns false. + pub const fn any_lt(self, other: Self) -> bool { + self.ref_time < other.ref_time + } + + /// Returns true if all of `self`'s constituent weights is strictly less than that of the + /// `other`'s, otherwise returns false. + pub const fn all_lt(self, other: Self) -> bool { + self.ref_time < other.ref_time + } + + /// Returns true if any of `self`'s constituent weights is greater than or equal to that of the + /// `other`'s, otherwise returns false. + pub const fn any_gte(self, other: Self) -> bool { + self.ref_time >= other.ref_time + } + + /// Returns true if all of `self`'s constituent weights is greater than or equal to that of the + /// `other`'s, otherwise returns false. + pub const fn all_gte(self, other: Self) -> bool { + self.ref_time >= other.ref_time + } + + /// Returns true if any of `self`'s constituent weights is less than or equal to that of the + /// `other`'s, otherwise returns false. + pub const fn any_lte(self, other: Self) -> bool { + self.ref_time <= other.ref_time + } + + /// Returns true if all of `self`'s constituent weights is less than or equal to that of the + /// `other`'s, otherwise returns false. + pub const fn all_lte(self, other: Self) -> bool { + self.ref_time <= other.ref_time + } + + /// Returns true if any of `self`'s constituent weights is equal to that of the `other`'s, + /// otherwise returns false. + pub const fn any_eq(self, other: Self) -> bool { + self.ref_time == other.ref_time + } + + // NOTE: `all_eq` does not exist, as it's simply the `eq` method from the `PartialEq` trait. } impl Zero for Weight { diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index d6434b6c63d8a..6e73a9e40179f 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -49,7 +49,8 @@ where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight > max => Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if info.weight.any_gt(max) => + Err(InvalidTransaction::ExhaustsResources.into()), _ => Ok(()), } } @@ -144,7 +145,8 @@ where // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class > max => return Err(InvalidTransaction::ExhaustsResources.into()), + Some(max) if per_class.any_gt(max) => + return Err(InvalidTransaction::ExhaustsResources.into()), // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -152,10 +154,10 @@ where // In cases total block weight is exceeded, we need to fall back // to `reserved` pool if there is any. - if all_weight.total() > maximum_weight.max_block { + if all_weight.total().any_gt(maximum_weight.max_block) { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class > reserved => + Some(reserved) if per_class.any_gt(reserved) => return Err(InvalidTransaction::ExhaustsResources.into()), // There is either no limit in reserved pool (`None`), // or we are below the limit. @@ -238,7 +240,7 @@ where } let unspent = post_info.calc_unspent(info); - if unspent > Weight::zero() { + if unspent.any_gt(Weight::zero()) { crate::BlockWeight::::mutate(|current_weight| { current_weight.sub(unspent, info.class); }) @@ -310,7 +312,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total() > block_weight_limit()); + assert!(System::block_weight().total().all_gt(block_weight_limit())); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -367,7 +369,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total() > block_weight_limit()); + assert!(System::block_weight().total().all_gt(block_weight_limit())); }); } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index 7626c7e8665e6..ba73d29806bba 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -229,7 +229,7 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class > self.base_block && max_for_class > base_for_class) + (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", @@ -237,8 +237,10 @@ impl BlockWeights { ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(Weight::zero()) <= - max_for_class.saturating_sub(base_for_class), + weights + .max_extrinsic + .unwrap_or(Weight::zero()) + .all_lte(max_for_class.saturating_sub(base_for_class)), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -247,14 +249,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value) > Weight::zero(), + weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved > base_for_class || reserved == Weight::zero(), + reserved.all_gt(base_for_class) || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -263,7 +265,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block >= weights.max_total.unwrap_or(Weight::zero()), + self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -272,7 +274,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block > base_for_class + self.base_block, + self.max_block.all_gt(base_for_class + self.base_block), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, @@ -400,7 +402,7 @@ impl BlockWeightsBuilder { // compute max block size. for class in DispatchClass::all() { weights.max_block = match weights.per_class.get(*class).max_total { - Some(max) if max > weights.max_block => max, + Some(max) => max.max(weights.max_block), _ => weights.max_block, }; } diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 5fb4e3aeb9ba6..a3e02b6caf740 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -172,7 +172,7 @@ pub mod pallet { .map_err(|_| Error::::UndecodableCall)?; ensure!( - call.get_dispatch_info().weight <= call_weight_witness, + call.get_dispatch_info().weight.all_lte(call_weight_witness), Error::::InvalidCallWeightWitness ); diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 5feac07b3a7d8..0a4100953401f 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -53,14 +53,14 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. - assert!(w >= Weight::from_ref_time(100 * constants::WEIGHT_PER_MICROS), "Weight should be at least 100 µs."); + assert!(w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), "Weight should be at least 100 µs."); // At most 50 ms. - assert!(w <= Weight::from_ref_time(50 * constants::WEIGHT_PER_MILLIS), "Weight should be at most 50 ms."); + assert!(w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), "Weight should be at most 50 ms."); {{else}} // At least 10 µs. - assert!(w >= Weight::from_ref_time(10 * constants::WEIGHT_PER_MICROS), "Weight should be at least 10 µs."); + assert!(w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), "Weight should be at least 10 µs."); // At most 1 ms. - assert!(w <= Weight::from_ref_time(constants::WEIGHT_PER_MILLIS), "Weight should be at most 1 ms."); + assert!(w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Weight should be at most 1 ms."); {{/if}} } } diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 3aa4703e29eb2..95f42c411d1fd 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -75,20 +75,20 @@ pub mod constants { fn bound() { // At least 1 µs. assert!( - W::get().reads(1) >= constants::WEIGHT_PER_MICROS, + W::get().reads(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Read weight should be at least 1 µs." ); assert!( - W::get().writes(1) >= constants::WEIGHT_PER_MICROS, + W::get().writes(1).ref_time() >= constants::WEIGHT_PER_MICROS.ref_time(), "Write weight should be at least 1 µs." ); // At most 1 ms. assert!( - W::get().reads(1) <= constants::WEIGHT_PER_MILLIS, + W::get().reads(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Read weight should be at most 1 ms." ); assert!( - W::get().writes(1) <= constants::WEIGHT_PER_MILLIS, + W::get().writes(1).ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Write weight should be at most 1 ms." ); } From 8075aa489e6cebf5f5855fe614e5107db2b1b1aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 8 Sep 2022 09:52:25 +0100 Subject: [PATCH 107/348] pallet-identity: Be more paranoid ;) (#12170) * pallet-identity: Be more paranoid ;) Check that a registrar is providing judgement for the correct identity. * Fixes * Fix alliance * :facepalm: * Fixes * ... --- frame/alliance/src/mock.rs | 56 +++++++++++++++++--- frame/benchmarking/src/lib.rs | 4 +- frame/identity/src/benchmarking.rs | 46 +++++++++------- frame/identity/src/lib.rs | 12 ++++- frame/identity/src/tests.rs | 85 ++++++++++++++++++++++++++---- 5 files changed, 163 insertions(+), 40 deletions(-) diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index c140c040fbd0f..45c13b76cc525 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -297,20 +297,62 @@ pub fn new_test_ext() -> sp_io::TestExternalities { twitter: Data::default(), }; assert_ok!(Identity::set_identity(Origin::signed(1), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 1, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 1, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(2), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 2, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 2, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(3), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 3, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 3, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(4), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 4, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 4, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(5), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 5, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 5, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); assert_ok!(Identity::set_identity(Origin::signed(8), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 8, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 8, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); assert_ok!(Identity::set_identity(Origin::signed(9), Box::new(info.clone()))); - assert_ok!(Identity::provide_judgement(Origin::signed(1), 0, 9, Judgement::KnownGood)); + assert_ok!(Identity::provide_judgement( + Origin::signed(1), + 0, + 9, + Judgement::KnownGood, + BlakeTwo256::hash_of(&info) + )); // Joining before init should fail. assert_noop!( diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 18472595f15b9..614216a96940a 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -86,12 +86,12 @@ macro_rules! whitelist { /// ``` /// /// Note that due to parsing restrictions, if the `from` expression is not a single token (i.e. a -/// literal or constant), then it must be parenthesised. +/// literal or constant), then it must be parenthesized. /// /// The macro allows for a number of "arms", each representing an individual benchmark. Using the /// simple syntax, the associated dispatchable function maps 1:1 with the benchmark and the name of /// the benchmark is the same as that of the associated function. However, extended syntax allows -/// for arbitrary expresions to be evaluated in a benchmark (including for example, +/// for arbitrary expressions to be evaluated in a benchmark (including for example, /// `on_initialize`). /// /// Note that the ranges are *inclusive* on both sides. This is in contrast to ranges in Rust which diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 5d409f48bf567..6596f227383f2 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -76,9 +76,11 @@ fn create_sub_accounts( } // Set identity so `set_subs` does not fail. - let _ = T::Currency::make_free_balance_be(who, BalanceOf::::max_value() / 2u32.into()); - let info = create_identity_info::(1); - Identity::::set_identity(who_origin.into(), Box::new(info))?; + if IdentityOf::::get(who).is_none() { + let _ = T::Currency::make_free_balance_be(who, BalanceOf::::max_value() / 2u32.into()); + let info = create_identity_info::(1); + Identity::::set_identity(who_origin.into(), Box::new(info))?; + } Ok(subs) } @@ -138,7 +140,7 @@ benchmarks! { // Add an initial identity let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), Box::new(initial_info))?; + Identity::::set_identity(caller_origin.clone(), Box::new(initial_info.clone()))?; // User requests judgement from all the registrars, and they approve for i in 0..r { @@ -147,7 +149,8 @@ benchmarks! { RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), - Judgement::Reasonable + Judgement::Reasonable, + T::Hashing::hash_of(&initial_info), )?; } caller @@ -200,13 +203,13 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; }; - let x in 1 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, Box::new(info))?; - }; + let x in 1 .. T::MaxAdditionalFields::get(); + + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller: T::AccountId = whitelisted_caller(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + Identity::::set_identity(caller_origin.clone(), Box::new(info.clone()))?; // User requests judgement from all the registrars, and they approve for i in 0..r { @@ -215,7 +218,8 @@ benchmarks! { RawOrigin::Signed(account("registrar", i, SEED)).into(), i, caller_lookup.clone(), - Judgement::Reasonable + Judgement::Reasonable, + T::Hashing::hash_of(&info), )?; } ensure!(IdentityOf::::contains_key(&caller), "Identity does not exist."); @@ -328,15 +332,16 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get() => { - let info = create_identity_info::(x); - Identity::::set_identity(user_origin.clone(), Box::new(info))?; - }; + let x in 1 .. T::MaxAdditionalFields::get(); + + let info = create_identity_info::(x); + let info_hash = T::Hashing::hash_of(&info); + Identity::::set_identity(user_origin.clone(), Box::new(info))?; let registrar_origin = T::RegistrarOrigin::successful_origin(); Identity::::add_registrar(registrar_origin, caller_lookup)?; Identity::::request_judgement(user_origin, r, 10u32.into())?; - }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) + }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable, info_hash) verify { assert_last_event::(Event::::JudgementGiven { target: user, registrar_index: r }.into()) } @@ -352,7 +357,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); let info = create_identity_info::(x); - Identity::::set_identity(target_origin.clone(), Box::new(info))?; + Identity::::set_identity(target_origin.clone(), Box::new(info.clone()))?; let _ = add_sub_accounts::(&target, s)?; // User requests judgement from all the registrars, and they approve @@ -362,7 +367,8 @@ benchmarks! { RawOrigin::Signed(account("registrar", i, SEED)).into(), i, target_lookup.clone(), - Judgement::Reasonable + Judgement::Reasonable, + T::Hashing::hash_of(&info), )?; } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 0f80acceb949c..d24b17eb86ee0 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -79,7 +79,7 @@ mod types; pub mod weights; use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; -use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; +use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -236,6 +236,8 @@ pub mod pallet { NotSub, /// Sub-account isn't owned by sender. NotOwned, + /// The provided judgement was for a different identity. + JudgementForDifferentIdentity, } #[pallet::event] @@ -746,6 +748,7 @@ pub mod pallet { /// - `target`: the account whose identity the judgement is upon. This must be an account /// with a registered identity. /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. + /// - `identity`: The hash of the [`IdentityInfo`] for that the judgement is provided. /// /// Emits `JudgementGiven` if successful. /// @@ -765,6 +768,7 @@ pub mod pallet { #[pallet::compact] reg_index: RegistrarIndex, target: AccountIdLookupOf, judgement: Judgement>, + identity: T::Hash, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; @@ -772,10 +776,14 @@ pub mod pallet { >::get() .get(reg_index as usize) .and_then(Option::as_ref) - .and_then(|r| if r.account == sender { Some(r) } else { None }) + .filter(|r| r.account == sender) .ok_or(Error::::InvalidIndex)?; let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; + if T::Hashing::hash_of(&id.info) != identity { + return Err(Error::::JudgementForDifferentIdentity.into()) + } + let item = (reg_index, judgement); match id.judgements.binary_search_by_key(®_index, |x| x.0) { Ok(position) => { diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index a0773e9904a1c..4c1ced0a83324 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -279,27 +279,70 @@ fn registration_should_work() { fn uninvited_judgement_should_work() { new_test_ext().execute_with(|| { assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + H256::random() + ), Error::::InvalidIndex ); assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + H256::random() + ), Error::::InvalidTarget ); assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); assert_noop!( - Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), + Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + H256::random() + ), + Error::::JudgementForDifferentIdentity + ); + + let identity_hash = BlakeTwo256::hash_of(&ten()); + + assert_noop!( + Identity::provide_judgement( + Origin::signed(10), + 0, + 10, + Judgement::Reasonable, + identity_hash + ), Error::::InvalidIndex ); assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), + Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::FeePaid(1), + identity_hash + ), Error::::InvalidJudgement ); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + identity_hash + )); assert_eq!(Identity::identity(10).unwrap().judgements, vec![(0, Judgement::Reasonable)]); }); } @@ -309,7 +352,13 @@ fn clearing_judgement_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + BlakeTwo256::hash_of(&ten()) + )); assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Identity::identity(10), None); }); @@ -411,7 +460,13 @@ fn cancelling_requested_judgement_should_work() { assert_eq!(Balances::free_balance(10), 90); assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable, + BlakeTwo256::hash_of(&ten()) + )); assert_noop!( Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven @@ -438,7 +493,13 @@ fn requesting_judgement_should_work() { Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement ); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Erroneous, + BlakeTwo256::hash_of(&ten()) + )); // Registrar got their payment now. assert_eq!(Balances::free_balance(3), 20); @@ -453,7 +514,13 @@ fn requesting_judgement_should_work() { assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); // Re-requesting after the judgement has been reduced works. - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::OutOfDate)); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::OutOfDate, + BlakeTwo256::hash_of(&ten()) + )); assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); }); } From e20209240e7a5e7b32b91e9fcbe0e78a7397c8d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 8 Sep 2022 09:57:23 +0100 Subject: [PATCH 108/348] Upgrade wasmtime to 0.40.0 (#12096) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Upgrade wasmtime to 0.40.0 * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts * Update in the other Cargo.toml * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts Co-authored-by: command-bot <> Co-authored-by: Alexander Theißen --- Cargo.lock | 237 ++--- client/executor/wasmtime/Cargo.toml | 9 +- client/executor/wasmtime/src/runtime.rs | 8 +- frame/contracts/src/weights.rs | 1254 +++++++++++------------ primitives/wasm-interface/Cargo.toml | 2 +- 5 files changed, 739 insertions(+), 771 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6c2df81d6ee1..223a14efd73b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1137,11 +1137,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899dc8d22f7771e7f887fb8bafa0c0d3ac1dea0c7f2c0ded6e20a855a7a1e890" +checksum = "9f91425bea5a5ac6d76b788477064944a7e21f0e240fd93f6f368a774a3efdd1" dependencies = [ - "cranelift-entity 0.85.0", + "cranelift-entity 0.87.1", ] [[package]] @@ -1163,14 +1163,14 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dbdc03f695cf67e7bc45da57155528274f47390b85060af8107eb304ef167c4" +checksum = "8b83b4bbf7bc96db77b7b5b5e41fafc4001536e9f0cbfd702ed7d4d8f848dc06" dependencies = [ - "cranelift-bforest 0.85.0", - "cranelift-codegen-meta 0.85.0", - "cranelift-codegen-shared 0.85.0", - "cranelift-entity 0.85.0", + "cranelift-bforest 0.87.1", + "cranelift-codegen-meta 0.87.1", + "cranelift-codegen-shared 0.87.1", + "cranelift-entity 0.87.1", "cranelift-isle", "gimli 0.26.1", "log", @@ -1191,11 +1191,11 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea66cbba3eb7fcb3ec9f42839a6d381bd40cf97780397e7167daf9725d4ffa0" +checksum = "da02e8fff048c381b313a3dfef4deb2343976fb6d7acc8e7d9c86d4c93e3fa06" dependencies = [ - "cranelift-codegen-shared 0.85.0", + "cranelift-codegen-shared 0.87.1", ] [[package]] @@ -1206,9 +1206,9 @@ checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" [[package]] name = "cranelift-codegen-shared" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712fbebd119a476f59122b4ba51fdce893a66309b5c92bd5506bfb11a0587496" +checksum = "9abc2a06e8fc29e36660ebbc9e2503e18a051057072acbb1e75e7f7cf19cb95e" [[package]] name = "cranelift-entity" @@ -1218,9 +1218,9 @@ checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" [[package]] name = "cranelift-entity" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cb8b95859c4e14c9e860db78d596a904fdbe9261990233b62bd526346cb56cb" +checksum = "aeced7874890fc25d85cacc5e626c4d67931c7c25aad1c2ad521684744c1ff5c" dependencies = [ "serde", ] @@ -1239,11 +1239,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7b91b19a7d1221a73f190c0e865c12be77a84f661cac89abfd4ab5820142886" +checksum = "fc1d301ccad6fce05d9c9793d433d225fafdd57661b98d268d8d162e9291ff2e" dependencies = [ - "cranelift-codegen 0.85.0", + "cranelift-codegen 0.87.1", "log", "smallvec", "target-lexicon", @@ -1251,34 +1251,34 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d4f53bc86fb458e59c695c6a95ce8346e6a8377ee7ffc058e3ac08b5f94cb1" +checksum = "bd7b100db19320848986b4df1da19501dbddeb706a799f502222f72f889b0fab" [[package]] name = "cranelift-native" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "592f035d0ed41214dfeeb37abd536233536a27be6b4c2d39f380cd402f0cff4f" +checksum = "7be18d8b976cddc822e52343f328b7593d26dd2f1aeadd90da071596a210d524" dependencies = [ - "cranelift-codegen 0.85.0", + "cranelift-codegen 0.87.1", "libc", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.85.0" +version = "0.87.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295add6bf0b527a8bc50d02e31ff878585d2d2db53cb7e8754d6d82b84480086" +checksum = "2f9e48bb632a2e189b38a9fa89fa5a6eea687a5a4c613bbef7c2b7522c3ad0e0" dependencies = [ - "cranelift-codegen 0.85.0", - "cranelift-entity 0.85.0", - "cranelift-frontend 0.85.0", + "cranelift-codegen 0.87.1", + "cranelift-entity 0.87.1", + "cranelift-frontend 0.87.1", "itertools", "log", "smallvec", - "wasmparser 0.85.0", + "wasmparser 0.88.0", "wasmtime-types", ] @@ -3087,12 +3087,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "io-lifetimes" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" - [[package]] name = "io-lifetimes" version = "0.7.2" @@ -4154,12 +4148,6 @@ dependencies = [ "statrs", ] -[[package]] -name = "linux-raw-sys" -version = "0.0.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5284f00d480e1c39af34e72f8ad60b94f47007e3481cd3b731c1d67190ddc7b7" - [[package]] name = "linux-raw-sys" version = "0.0.46" @@ -4315,11 +4303,11 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memfd" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6627dc657574b49d6ad27105ed671822be56e0d2547d413bfbf3e8d8fa92e7a" +checksum = "480b5a5de855d11ff13195950bdc8b98b5e942ef47afc447f6615cdcc4e15d80" dependencies = [ - "libc", + "rustix", ] [[package]] @@ -5096,6 +5084,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "object" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +dependencies = [ + "crc32fast", + "hashbrown 0.12.3", + "indexmap", + "memchr", +] + [[package]] name = "once_cell" version = "1.12.0" @@ -7378,9 +7378,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.2.2" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d37148700dbb38f994cd99a1431613057f37ed934d7e4d799b7ab758c482461" +checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779" dependencies = [ "fxhash", "log", @@ -7415,18 +7415,6 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" -[[package]] -name = "region" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" -dependencies = [ - "bitflags", - "libc", - "mach", - "winapi", -] - [[package]] name = "region" version = "3.0.0" @@ -7610,29 +7598,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.33.7" +version = "0.35.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938a344304321a9da4973b9ff4f9f8db9caf4597dfd9dda6a60b523340a0fff0" +checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" dependencies = [ "bitflags", "errno", - "io-lifetimes 0.5.3", + "io-lifetimes", "libc", - "linux-raw-sys 0.0.42", - "winapi", -] - -[[package]] -name = "rustix" -version = "0.35.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef258c11e17f5c01979a10543a30a4e12faef6aab217a74266e747eefa3aed88" -dependencies = [ - "bitflags", - "errno", - "io-lifetimes 0.7.2", - "libc", - "linux-raw-sys 0.0.46", + "linux-raw-sys", "windows-sys 0.36.1", ] @@ -8264,8 +8238,7 @@ dependencies = [ "parity-scale-codec", "parity-wasm 0.42.2", "paste 1.0.6", - "rustix 0.33.7", - "rustix 0.35.6", + "rustix", "sc-allocator", "sc-executor-common", "sc-runtime-test", @@ -11620,7 +11593,7 @@ dependencies = [ "enumset", "leb128", "loupe", - "region 3.0.0", + "region", "rkyv", "wasmer-compiler", "wasmer-engine", @@ -11669,7 +11642,7 @@ dependencies = [ "loupe", "memoffset", "more-asserts", - "region 3.0.0", + "region", "rkyv", "serde", "thiserror", @@ -11711,49 +11684,55 @@ checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmparser" -version = "0.85.0" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "570460c58b21e9150d2df0eaaedbb7816c34bcec009ae0dcc976e40ba81463e7" +checksum = "fb8cf7dd82407fe68161bedcd57fde15596f32ebf6e9b3bdbf3ae1da20e38e5e" dependencies = [ "indexmap", ] [[package]] name = "wasmtime" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c842f9c8e190fe01300fc8d715e9368c775670fb9856247c67abffdb5236d6db" +checksum = "a020a3f6587fa7a7d98a021156177735ebb07212a6239a85ab5f14b2f728508f" dependencies = [ "anyhow", - "backtrace", "bincode", "cfg-if 1.0.0", "indexmap", - "lazy_static", "libc", "log", - "object 0.28.3", + "object 0.29.0", "once_cell", "paste 1.0.6", "psm", "rayon", - "region 2.2.0", "serde", "target-lexicon", - "wasmparser 0.85.0", + "wasmparser 0.88.0", "wasmtime-cache", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "winapi", + "windows-sys 0.36.1", +] + +[[package]] +name = "wasmtime-asm-macros" +version = "0.40.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed4ada1fdd4d9a2aa37be652abcc31ae3188ad0efcefb4571ef4f785be2d777" +dependencies = [ + "cfg-if 1.0.0", ] [[package]] name = "wasmtime-cache" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce2aa752e864a33eef2a6629edc59554e75f0bc1719431dac5e49eed516af69" +checksum = "d96a03a5732ef39b83943d9d72de8ac2d58623d3bfaaea4d9a92aea5fcd9acf5" dependencies = [ "anyhow", "base64", @@ -11761,61 +11740,59 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix 0.33.7", + "rustix", "serde", "sha2 0.9.8", "toml", - "winapi", + "windows-sys 0.36.1", "zstd", ] [[package]] name = "wasmtime-cranelift" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "922361eb8c03cea8909bc922471202f6c6bc2f0c682fac2fe473740441c86b3b" +checksum = "1fc59c28fe895112db09e262fb9c483f9e7b82c78a82a6ded69567ccc0e9795b" dependencies = [ "anyhow", - "cranelift-codegen 0.85.0", - "cranelift-entity 0.85.0", - "cranelift-frontend 0.85.0", + "cranelift-codegen 0.87.1", + "cranelift-entity 0.87.1", + "cranelift-frontend 0.87.1", "cranelift-native", "cranelift-wasm", "gimli 0.26.1", "log", - "more-asserts", - "object 0.28.3", + "object 0.29.0", "target-lexicon", "thiserror", - "wasmparser 0.85.0", + "wasmparser 0.88.0", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e602f1120fc40a3f016f1f69d08c86cfeff7b867bed1462901953e6871f85167" +checksum = "11086e573d2635a45ac0d44697a8e4586e058cf1b190f76bea466ca2ec36c30a" dependencies = [ "anyhow", - "cranelift-entity 0.85.0", + "cranelift-entity 0.87.1", "gimli 0.26.1", "indexmap", "log", - "more-asserts", - "object 0.28.3", + "object 0.29.0", "serde", "target-lexicon", "thiserror", - "wasmparser 0.85.0", + "wasmparser 0.88.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49af1445759a8e797a92f27dd0983c155615648263052e0b80d69e7d223896b7" +checksum = "d5444a78b74144718633f8642eccd7c4858f4c6f0c98ae6a3668998adf177ba2" dependencies = [ "addr2line", "anyhow", @@ -11824,38 +11801,36 @@ dependencies = [ "cpp_demangle", "gimli 0.26.1", "log", - "object 0.28.3", - "region 2.2.0", + "object 0.29.0", "rustc-demangle", - "rustix 0.33.7", + "rustix", "serde", "target-lexicon", "thiserror", "wasmtime-environ", "wasmtime-jit-debug", "wasmtime-runtime", - "winapi", + "windows-sys 0.36.1", ] [[package]] name = "wasmtime-jit-debug" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5dd480cc6dc0a401653e45b79796a3317f8228990d84bc2271bdaf0810071" +checksum = "c2bf6a667d2a29b2b0ed42bcf7564f00c595d92c24acb4d241c7c4d950b1910c" dependencies = [ - "lazy_static", - "object 0.28.3", - "rustix 0.33.7", + "object 0.29.0", + "once_cell", + "rustix", ] [[package]] name = "wasmtime-runtime" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e875bcd02d1ecfc7d099dd58354d55d73467652eb2b103ff470fe3aecb7d0381" +checksum = "ee064ce7b563cc201cdf3bb1cc4b233f386d8c57a96e55f4c4afe6103f4bd6a1" dependencies = [ "anyhow", - "backtrace", "cc", "cfg-if 1.0.0", "indexmap", @@ -11864,26 +11839,26 @@ dependencies = [ "mach", "memfd", "memoffset", - "more-asserts", + "paste 1.0.6", "rand 0.8.4", - "region 2.2.0", - "rustix 0.33.7", + "rustix", "thiserror", + "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", - "winapi", + "windows-sys 0.36.1", ] [[package]] name = "wasmtime-types" -version = "0.38.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd63a19ba61ac7448add4dc1fecb8d78304812af2a52dad04b89f887791b156" +checksum = "01e104bd9e625181d53ead85910bbc0863aa5f0c6ef96836fe9a5cc65da11b69" dependencies = [ - "cranelift-entity 0.85.0", + "cranelift-entity 0.87.1", "serde", "thiserror", - "wasmparser 0.85.0", + "wasmparser 0.88.0", ] [[package]] diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index e75f2d9d043d0..e4df2791c2ff8 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -21,7 +21,7 @@ parity-wasm = "0.42.0" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "0.38.0", default-features = false, features = [ +wasmtime = { version = "0.40.1", default-features = false, features = [ "cache", "cranelift", "jitdump", @@ -35,16 +35,13 @@ sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime- sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } -[target.'cfg(target_os = "linux")'.dependencies] -rustix = { version = "0.35.6", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } -once_cell = "1.12.0" - # Here we include the rustix crate used by wasmtime just to enable its 'use-libc' flag. # # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). -rustix_wasmtime = { package = "rustix", version = "0.33.7", default-features = false, features = ["std", "use-libc"] } +rustix = { version = "0.35.9", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } +once_cell = "1.12.0" [dev-dependencies] wat = "1.0" diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 871de4e2300d2..5925a1792aef2 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -320,9 +320,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result native_stack_max, @@ -334,9 +332,7 @@ fn common_config(semantics: &Semantics) -> std::result::Result 1024 * 1024, }; - config - .max_wasm_stack(native_stack_max as usize) - .map_err(|e| WasmError::Other(format!("cannot set max wasm stack: {:#}", e)))?; + config.max_wasm_stack(native_stack_max as usize); config.parallel_compilation(semantics.parallel_compilation); diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 4ec30437c57f1..6ec10b3590349 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-06, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -166,15 +166,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(2_948_000 as u64) + Weight::from_ref_time(2_985_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(14_858_000 as u64) + Weight::from_ref_time(14_335_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -182,9 +182,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(14_778_000 as u64) + Weight::from_ref_time(13_905_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_228_000 as u64).saturating_mul(q as u64)) + .saturating_add(Weight::from_ref_time(1_257_000 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -192,9 +192,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(21_622_000 as u64) + Weight::from_ref_time(26_333_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(44_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -205,9 +205,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(207_601_000 as u64) + Weight::from_ref_time(238_784_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -222,9 +222,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(287_355_000 as u64) + Weight::from_ref_time(325_497_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) @@ -239,7 +239,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(178_912_000 as u64) + Weight::from_ref_time(201_129_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) @@ -251,7 +251,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(159_188_000 as u64) + Weight::from_ref_time(176_645_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -261,9 +261,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(52_177_000 as u64) + Weight::from_ref_time(53_613_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(47_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -272,7 +272,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_011_000 as u64) + Weight::from_ref_time(37_441_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -280,7 +280,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_206_000 as u64) + Weight::from_ref_time(40_030_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -291,9 +291,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(224_309_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(36_885_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_426_000 as u64) + // Standard Error: 46_000 + .saturating_add(Weight::from_ref_time(37_834_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -304,9 +304,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(166_137_000 as u64) - // Standard Error: 428_000 - .saturating_add(Weight::from_ref_time(214_063_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(181_406_000 as u64) + // Standard Error: 474_000 + .saturating_add(Weight::from_ref_time(206_995_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -318,9 +318,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(174_069_000 as u64) - // Standard Error: 443_000 - .saturating_add(Weight::from_ref_time(272_524_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(198_301_000 as u64) + // Standard Error: 432_000 + .saturating_add(Weight::from_ref_time(264_877_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -332,9 +332,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(225_577_000 as u64) - // Standard Error: 39_000 - .saturating_add(Weight::from_ref_time(40_425_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(243_754_000 as u64) + // Standard Error: 51_000 + .saturating_add(Weight::from_ref_time(41_219_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -345,9 +345,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(221_162_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(15_192_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_352_000 as u64) + // Standard Error: 33_000 + .saturating_add(Weight::from_ref_time(15_228_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -358,9 +358,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(224_517_000 as u64) - // Standard Error: 38_000 - .saturating_add(Weight::from_ref_time(36_722_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_637_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(37_717_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -371,9 +371,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(223_864_000 as u64) - // Standard Error: 35_000 - .saturating_add(Weight::from_ref_time(36_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_127_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(37_481_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -384,9 +384,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(226_396_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(112_018_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_236_000 as u64) + // Standard Error: 97_000 + .saturating_add(Weight::from_ref_time(110_095_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -397,9 +397,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(222_775_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(36_691_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_552_000 as u64) + // Standard Error: 38_000 + .saturating_add(Weight::from_ref_time(37_189_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -410,9 +410,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(223_202_000 as u64) - // Standard Error: 42_000 - .saturating_add(Weight::from_ref_time(36_393_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_622_000 as u64) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(37_630_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -423,9 +423,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(223_507_000 as u64) - // Standard Error: 44_000 - .saturating_add(Weight::from_ref_time(36_211_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_284_000 as u64) + // Standard Error: 49_000 + .saturating_add(Weight::from_ref_time(37_375_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -436,9 +436,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(225_500_000 as u64) - // Standard Error: 58_000 - .saturating_add(Weight::from_ref_time(36_290_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_463_000 as u64) + // Standard Error: 43_000 + .saturating_add(Weight::from_ref_time(37_313_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -450,9 +450,9 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(225_310_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(98_714_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_287_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(99_890_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -463,9 +463,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(149_071_000 as u64) - // Standard Error: 30_000 - .saturating_add(Weight::from_ref_time(18_084_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(168_156_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(17_991_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -476,9 +476,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(222_918_000 as u64) - // Standard Error: 48_000 - .saturating_add(Weight::from_ref_time(34_848_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_833_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_622_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -489,9 +489,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(280_543_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_599_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(330_765_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(8_609_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -502,9 +502,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(219_310_000 as u64) - // Standard Error: 303_000 - .saturating_add(Weight::from_ref_time(1_187_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(237_729_000 as u64) + // Standard Error: 488_000 + .saturating_add(Weight::from_ref_time(2_053_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -515,9 +515,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(221_399_000 as u64) + Weight::from_ref_time(240_005_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(n as u64)) + .saturating_add(Weight::from_ref_time(169_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -530,9 +530,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(222_285_000 as u64) - // Standard Error: 163_000 - .saturating_add(Weight::from_ref_time(52_104_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_524_000 as u64) + // Standard Error: 426_000 + .saturating_add(Weight::from_ref_time(51_863_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -546,9 +546,9 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(233_586_000 as u64) - // Standard Error: 90_000 - .saturating_add(Weight::from_ref_time(133_209_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(250_710_000 as u64) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(132_741_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -559,9 +559,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(231_267_000 as u64) - // Standard Error: 118_000 - .saturating_add(Weight::from_ref_time(232_306_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_816_000 as u64) + // Standard Error: 115_000 + .saturating_add(Weight::from_ref_time(235_313_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -573,11 +573,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(438_016_000 as u64) - // Standard Error: 538_000 - .saturating_add(Weight::from_ref_time(179_846_000 as u64).saturating_mul(t as u64)) - // Standard Error: 147_000 - .saturating_add(Weight::from_ref_time(66_893_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(469_886_000 as u64) + // Standard Error: 463_000 + .saturating_add(Weight::from_ref_time(177_286_000 as u64).saturating_mul(t as u64)) + // Standard Error: 127_000 + .saturating_add(Weight::from_ref_time(64_646_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -590,18 +590,18 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(156_730_000 as u64) - // Standard Error: 48_000 - .saturating_add(Weight::from_ref_time(29_019_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(174_414_000 as u64) + // Standard Error: 26_000 + .saturating_add(Weight::from_ref_time(29_037_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(185_733_000 as u64) - // Standard Error: 384_000 - .saturating_add(Weight::from_ref_time(427_052_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(196_960_000 as u64) + // Standard Error: 414_000 + .saturating_add(Weight::from_ref_time(412_810_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -610,9 +610,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(520_776_000 as u64) - // Standard Error: 1_346_000 - .saturating_add(Weight::from_ref_time(93_389_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(529_493_000 as u64) + // Standard Error: 1_251_000 + .saturating_add(Weight::from_ref_time(88_835_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(50 as u64)) @@ -621,9 +621,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(504_262_000 as u64) - // Standard Error: 1_198_000 - .saturating_add(Weight::from_ref_time(68_581_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(512_531_000 as u64) + // Standard Error: 1_107_000 + .saturating_add(Weight::from_ref_time(67_926_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(50 as u64)) @@ -632,9 +632,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_507_000 as u64) - // Standard Error: 514_000 - .saturating_add(Weight::from_ref_time(416_965_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(208_848_000 as u64) + // Standard Error: 398_000 + .saturating_add(Weight::from_ref_time(400_478_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -643,9 +643,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(481_328_000 as u64) - // Standard Error: 1_304_000 - .saturating_add(Weight::from_ref_time(70_798_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(488_903_000 as u64) + // Standard Error: 1_226_000 + .saturating_add(Weight::from_ref_time(69_519_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(49 as u64)) @@ -654,9 +654,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(200_040_000 as u64) - // Standard Error: 292_000 - .saturating_add(Weight::from_ref_time(341_568_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(211_825_000 as u64) + // Standard Error: 401_000 + .saturating_add(Weight::from_ref_time(328_607_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -664,9 +664,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(443_969_000 as u64) - // Standard Error: 1_076_000 - .saturating_add(Weight::from_ref_time(154_674_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(452_324_000 as u64) + // Standard Error: 1_026_000 + .saturating_add(Weight::from_ref_time(150_657_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -674,9 +674,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(194_849_000 as u64) - // Standard Error: 310_000 - .saturating_add(Weight::from_ref_time(320_113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(218_730_000 as u64) + // Standard Error: 318_000 + .saturating_add(Weight::from_ref_time(302_022_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -684,9 +684,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(416_712_000 as u64) - // Standard Error: 932_000 - .saturating_add(Weight::from_ref_time(63_903_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(425_128_000 as u64) + // Standard Error: 875_000 + .saturating_add(Weight::from_ref_time(60_095_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -694,9 +694,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(194_760_000 as u64) - // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(438_855_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(208_039_000 as u64) + // Standard Error: 445_000 + .saturating_add(Weight::from_ref_time(425_933_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -705,9 +705,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(509_485_000 as u64) - // Standard Error: 1_486_000 - .saturating_add(Weight::from_ref_time(162_250_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(518_112_000 as u64) + // Standard Error: 1_406_000 + .saturating_add(Weight::from_ref_time(159_006_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(49 as u64)) @@ -720,9 +720,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(162_740_000 as u64) - // Standard Error: 691_000 - .saturating_add(Weight::from_ref_time(1_338_567_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(169_573_000 as u64) + // Standard Error: 701_000 + .saturating_add(Weight::from_ref_time(1_345_943_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -736,8 +736,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_018_000 - .saturating_add(Weight::from_ref_time(16_181_274_000 as u64).saturating_mul(r as u64)) + // Standard Error: 8_472_000 + .saturating_add(Weight::from_ref_time(17_569_902_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -751,8 +751,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_806_000 - .saturating_add(Weight::from_ref_time(15_988_209_000 as u64).saturating_mul(r as u64)) + // Standard Error: 7_932_000 + .saturating_add(Weight::from_ref_time(17_368_706_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } @@ -764,11 +764,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_685_415_000 as u64) - // Standard Error: 6_048_000 - .saturating_add(Weight::from_ref_time(1_132_315_000 as u64).saturating_mul(t as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(9_753_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_036_172_000 as u64) + // Standard Error: 5_683_000 + .saturating_add(Weight::from_ref_time(1_154_945_000 as u64).saturating_mul(t as u64)) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(8_670_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(163 as u64)) @@ -784,8 +784,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 30_079_000 - .saturating_add(Weight::from_ref_time(21_155_990_000 as u64).saturating_mul(r as u64)) + // Standard Error: 25_721_000 + .saturating_add(Weight::from_ref_time(22_827_927_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(5 as u64)) @@ -801,9 +801,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_321_649_000 as u64) - // Standard Error: 77_000 - .saturating_add(Weight::from_ref_time(125_883_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_991_911_000 as u64) + // Standard Error: 70_000 + .saturating_add(Weight::from_ref_time(124_789_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(247 as u64)) @@ -816,9 +816,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(224_020_000 as u64) - // Standard Error: 63_000 - .saturating_add(Weight::from_ref_time(57_966_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_026_000 as u64) + // Standard Error: 69_000 + .saturating_add(Weight::from_ref_time(58_077_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -830,8 +830,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(319_801_000 as u64).saturating_mul(n as u64)) + // Standard Error: 104_000 + .saturating_add(Weight::from_ref_time(320_432_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -842,9 +842,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(222_308_000 as u64) - // Standard Error: 78_000 - .saturating_add(Weight::from_ref_time(70_579_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_548_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(70_774_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -856,8 +856,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 97_000 - .saturating_add(Weight::from_ref_time(247_328_000 as u64).saturating_mul(n as u64)) + // Standard Error: 152_000 + .saturating_add(Weight::from_ref_time(246_268_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -868,9 +868,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(221_851_000 as u64) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(48_446_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_368_000 as u64) + // Standard Error: 68_000 + .saturating_add(Weight::from_ref_time(48_368_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -882,8 +882,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 93_000 - .saturating_add(Weight::from_ref_time(97_258_000 as u64).saturating_mul(n as u64)) + // Standard Error: 94_000 + .saturating_add(Weight::from_ref_time(95_913_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -894,9 +894,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(221_346_000 as u64) - // Standard Error: 46_000 - .saturating_add(Weight::from_ref_time(47_910_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_100_000 as u64) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(48_043_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -908,8 +908,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(97_458_000 as u64).saturating_mul(n as u64)) + // Standard Error: 93_000 + .saturating_add(Weight::from_ref_time(96_000_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -920,9 +920,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(355_672_000 as u64) - // Standard Error: 678_000 - .saturating_add(Weight::from_ref_time(2_970_861_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(375_902_000 as u64) + // Standard Error: 613_000 + .saturating_add(Weight::from_ref_time(2_972_514_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -933,9 +933,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(232_179_000 as u64) - // Standard Error: 463_000 - .saturating_add(Weight::from_ref_time(2_059_953_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(260_758_000 as u64) + // Standard Error: 396_000 + .saturating_add(Weight::from_ref_time(2_069_937_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -948,316 +948,316 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_725_000 - .saturating_add(Weight::from_ref_time(1_107_351_000 as u64).saturating_mul(r as u64)) + // Standard Error: 1_629_000 + .saturating_add(Weight::from_ref_time(1_093_632_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_023_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(848_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_130_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(798_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_335_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_761_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_439_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_717_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(69_875_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_289_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_647_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_112_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(71_128_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(2_288_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_045_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_228_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(68_850_000 as u64) + Weight::from_ref_time(71_013_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_558_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_284_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_585_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_293_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_846_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_278_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_492_000 as u64) + Weight::from_ref_time(69_471_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_774_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(69_344_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_089_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_967_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(1_984_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_833_000 as u64) + Weight::from_ref_time(72_756_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(70_722_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(6_545_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_426_000 as u64) + // Standard Error: 13_000 + .saturating_add(Weight::from_ref_time(7_095_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(84_465_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(8_307_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(85_090_000 as u64) + // Standard Error: 34_000 + .saturating_add(Weight::from_ref_time(8_897_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_185_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(599_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(94_306_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(585_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_473_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(875_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_494_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(857_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_352_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(907_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_467_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_239_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_219_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_451_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_196_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_049_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_543_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_857_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_442_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_551_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_693_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_589_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(1_682_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_228_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(889_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_230_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(70_987_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(186_348_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_027_000 as u64) + // Standard Error: 296_000 + .saturating_add(Weight::from_ref_time(177_139_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_186_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_304_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_270_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_250_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_796_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_088_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(70_184_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_315_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_538_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_283_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(70_059_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_316_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_842_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_266_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(70_066_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_282_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_754_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_230_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_100_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_285_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_842_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_193_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_670_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_935_000 as u64) + Weight::from_ref_time(69_728_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_770_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_024_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_522_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_767_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_794_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_224_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(1_752_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(69_997_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_817_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_625_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_763_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(70_610_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_804_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_816_000 as u64) + // Standard Error: 31_000 + .saturating_add(Weight::from_ref_time(1_852_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(69_759_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_846_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_019_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_780_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(70_405_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(1_808_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_881_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(69_968_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_838_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_021_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_766_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(69_825_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_854_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_156_000 as u64) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(1_712_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(69_973_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_822_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_004_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_907_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_824_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_028_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(72_280_000 as u64) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(1_756_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_159_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_813_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(69_827_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_033_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_760_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_995_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_461_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_702_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(2_512_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(69_627_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_453_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_933_000 as u64) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(2_501_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(70_040_000 as u64) + Weight::from_ref_time(69_447_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_456_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_555_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_754_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_450_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_872_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(2_459_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_848_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_867_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_904_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_837_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_982_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(69_912_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_839_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_822_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_779_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_656_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_858_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_511_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_806_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_927_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_956_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_788_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_025_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_069_000 as u64) + // Standard Error: 11_000 + .saturating_add(Weight::from_ref_time(1_789_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_916_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_843_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_719_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_792_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_988_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_842_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_709_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_800_000 as u64).saturating_mul(r as u64)) } } @@ -1265,15 +1265,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(2_948_000 as u64) + Weight::from_ref_time(2_985_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(14_858_000 as u64) + Weight::from_ref_time(14_335_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -1281,9 +1281,9 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(14_778_000 as u64) + Weight::from_ref_time(13_905_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_228_000 as u64).saturating_mul(q as u64)) + .saturating_add(Weight::from_ref_time(1_257_000 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1291,9 +1291,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(21_622_000 as u64) + Weight::from_ref_time(26_333_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(44_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1304,9 +1304,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(207_601_000 as u64) + Weight::from_ref_time(238_784_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1321,9 +1321,9 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(287_355_000 as u64) + Weight::from_ref_time(325_497_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) @@ -1338,7 +1338,7 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(178_912_000 as u64) + Weight::from_ref_time(201_129_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) @@ -1350,7 +1350,7 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(159_188_000 as u64) + Weight::from_ref_time(176_645_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1360,9 +1360,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(52_177_000 as u64) + Weight::from_ref_time(53_613_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(47_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1371,7 +1371,7 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_011_000 as u64) + Weight::from_ref_time(37_441_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1379,7 +1379,7 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_206_000 as u64) + Weight::from_ref_time(40_030_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -1390,9 +1390,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(224_309_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(36_885_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_426_000 as u64) + // Standard Error: 46_000 + .saturating_add(Weight::from_ref_time(37_834_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1403,9 +1403,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(166_137_000 as u64) - // Standard Error: 428_000 - .saturating_add(Weight::from_ref_time(214_063_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(181_406_000 as u64) + // Standard Error: 474_000 + .saturating_add(Weight::from_ref_time(206_995_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1417,9 +1417,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(174_069_000 as u64) - // Standard Error: 443_000 - .saturating_add(Weight::from_ref_time(272_524_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(198_301_000 as u64) + // Standard Error: 432_000 + .saturating_add(Weight::from_ref_time(264_877_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1431,9 +1431,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(225_577_000 as u64) - // Standard Error: 39_000 - .saturating_add(Weight::from_ref_time(40_425_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(243_754_000 as u64) + // Standard Error: 51_000 + .saturating_add(Weight::from_ref_time(41_219_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1444,9 +1444,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(221_162_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(15_192_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_352_000 as u64) + // Standard Error: 33_000 + .saturating_add(Weight::from_ref_time(15_228_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1457,9 +1457,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(224_517_000 as u64) - // Standard Error: 38_000 - .saturating_add(Weight::from_ref_time(36_722_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_637_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(37_717_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1470,9 +1470,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(223_864_000 as u64) - // Standard Error: 35_000 - .saturating_add(Weight::from_ref_time(36_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_127_000 as u64) + // Standard Error: 45_000 + .saturating_add(Weight::from_ref_time(37_481_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1483,9 +1483,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(226_396_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(112_018_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_236_000 as u64) + // Standard Error: 97_000 + .saturating_add(Weight::from_ref_time(110_095_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1496,9 +1496,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(222_775_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(36_691_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_552_000 as u64) + // Standard Error: 38_000 + .saturating_add(Weight::from_ref_time(37_189_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1509,9 +1509,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(223_202_000 as u64) - // Standard Error: 42_000 - .saturating_add(Weight::from_ref_time(36_393_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_622_000 as u64) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(37_630_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1522,9 +1522,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(223_507_000 as u64) - // Standard Error: 44_000 - .saturating_add(Weight::from_ref_time(36_211_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_284_000 as u64) + // Standard Error: 49_000 + .saturating_add(Weight::from_ref_time(37_375_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1535,9 +1535,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(225_500_000 as u64) - // Standard Error: 58_000 - .saturating_add(Weight::from_ref_time(36_290_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_463_000 as u64) + // Standard Error: 43_000 + .saturating_add(Weight::from_ref_time(37_313_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1549,9 +1549,9 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(225_310_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(98_714_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_287_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(99_890_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1562,9 +1562,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(149_071_000 as u64) - // Standard Error: 30_000 - .saturating_add(Weight::from_ref_time(18_084_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(168_156_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(17_991_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1575,9 +1575,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(222_918_000 as u64) - // Standard Error: 48_000 - .saturating_add(Weight::from_ref_time(34_848_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_833_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_622_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1588,9 +1588,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(280_543_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_599_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(330_765_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(8_609_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1601,9 +1601,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(219_310_000 as u64) - // Standard Error: 303_000 - .saturating_add(Weight::from_ref_time(1_187_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(237_729_000 as u64) + // Standard Error: 488_000 + .saturating_add(Weight::from_ref_time(2_053_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1614,9 +1614,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(221_399_000 as u64) + Weight::from_ref_time(240_005_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(182_000 as u64).saturating_mul(n as u64)) + .saturating_add(Weight::from_ref_time(169_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1629,9 +1629,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(222_285_000 as u64) - // Standard Error: 163_000 - .saturating_add(Weight::from_ref_time(52_104_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_524_000 as u64) + // Standard Error: 426_000 + .saturating_add(Weight::from_ref_time(51_863_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1645,9 +1645,9 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(233_586_000 as u64) - // Standard Error: 90_000 - .saturating_add(Weight::from_ref_time(133_209_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(250_710_000 as u64) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(132_741_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1658,9 +1658,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(231_267_000 as u64) - // Standard Error: 118_000 - .saturating_add(Weight::from_ref_time(232_306_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_816_000 as u64) + // Standard Error: 115_000 + .saturating_add(Weight::from_ref_time(235_313_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1672,11 +1672,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(438_016_000 as u64) - // Standard Error: 538_000 - .saturating_add(Weight::from_ref_time(179_846_000 as u64).saturating_mul(t as u64)) - // Standard Error: 147_000 - .saturating_add(Weight::from_ref_time(66_893_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(469_886_000 as u64) + // Standard Error: 463_000 + .saturating_add(Weight::from_ref_time(177_286_000 as u64).saturating_mul(t as u64)) + // Standard Error: 127_000 + .saturating_add(Weight::from_ref_time(64_646_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1689,18 +1689,18 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(156_730_000 as u64) - // Standard Error: 48_000 - .saturating_add(Weight::from_ref_time(29_019_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(174_414_000 as u64) + // Standard Error: 26_000 + .saturating_add(Weight::from_ref_time(29_037_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(185_733_000 as u64) - // Standard Error: 384_000 - .saturating_add(Weight::from_ref_time(427_052_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(196_960_000 as u64) + // Standard Error: 414_000 + .saturating_add(Weight::from_ref_time(412_810_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1709,9 +1709,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(520_776_000 as u64) - // Standard Error: 1_346_000 - .saturating_add(Weight::from_ref_time(93_389_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(529_493_000 as u64) + // Standard Error: 1_251_000 + .saturating_add(Weight::from_ref_time(88_835_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(50 as u64)) @@ -1720,9 +1720,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(504_262_000 as u64) - // Standard Error: 1_198_000 - .saturating_add(Weight::from_ref_time(68_581_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(512_531_000 as u64) + // Standard Error: 1_107_000 + .saturating_add(Weight::from_ref_time(67_926_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(50 as u64)) @@ -1731,9 +1731,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(188_507_000 as u64) - // Standard Error: 514_000 - .saturating_add(Weight::from_ref_time(416_965_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(208_848_000 as u64) + // Standard Error: 398_000 + .saturating_add(Weight::from_ref_time(400_478_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1742,9 +1742,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(481_328_000 as u64) - // Standard Error: 1_304_000 - .saturating_add(Weight::from_ref_time(70_798_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(488_903_000 as u64) + // Standard Error: 1_226_000 + .saturating_add(Weight::from_ref_time(69_519_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(49 as u64)) @@ -1753,9 +1753,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(200_040_000 as u64) - // Standard Error: 292_000 - .saturating_add(Weight::from_ref_time(341_568_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(211_825_000 as u64) + // Standard Error: 401_000 + .saturating_add(Weight::from_ref_time(328_607_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1763,9 +1763,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(443_969_000 as u64) - // Standard Error: 1_076_000 - .saturating_add(Weight::from_ref_time(154_674_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(452_324_000 as u64) + // Standard Error: 1_026_000 + .saturating_add(Weight::from_ref_time(150_657_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1773,9 +1773,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(194_849_000 as u64) - // Standard Error: 310_000 - .saturating_add(Weight::from_ref_time(320_113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(218_730_000 as u64) + // Standard Error: 318_000 + .saturating_add(Weight::from_ref_time(302_022_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1783,9 +1783,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(416_712_000 as u64) - // Standard Error: 932_000 - .saturating_add(Weight::from_ref_time(63_903_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(425_128_000 as u64) + // Standard Error: 875_000 + .saturating_add(Weight::from_ref_time(60_095_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1793,9 +1793,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(194_760_000 as u64) - // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(438_855_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(208_039_000 as u64) + // Standard Error: 445_000 + .saturating_add(Weight::from_ref_time(425_933_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1804,9 +1804,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(509_485_000 as u64) - // Standard Error: 1_486_000 - .saturating_add(Weight::from_ref_time(162_250_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(518_112_000 as u64) + // Standard Error: 1_406_000 + .saturating_add(Weight::from_ref_time(159_006_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(49 as u64)) @@ -1819,9 +1819,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(162_740_000 as u64) - // Standard Error: 691_000 - .saturating_add(Weight::from_ref_time(1_338_567_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(169_573_000 as u64) + // Standard Error: 701_000 + .saturating_add(Weight::from_ref_time(1_345_943_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1835,8 +1835,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_018_000 - .saturating_add(Weight::from_ref_time(16_181_274_000 as u64).saturating_mul(r as u64)) + // Standard Error: 8_472_000 + .saturating_add(Weight::from_ref_time(17_569_902_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1850,8 +1850,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_806_000 - .saturating_add(Weight::from_ref_time(15_988_209_000 as u64).saturating_mul(r as u64)) + // Standard Error: 7_932_000 + .saturating_add(Weight::from_ref_time(17_368_706_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } @@ -1863,11 +1863,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(9_685_415_000 as u64) - // Standard Error: 6_048_000 - .saturating_add(Weight::from_ref_time(1_132_315_000 as u64).saturating_mul(t as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(9_753_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_036_172_000 as u64) + // Standard Error: 5_683_000 + .saturating_add(Weight::from_ref_time(1_154_945_000 as u64).saturating_mul(t as u64)) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(8_670_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(163 as u64)) @@ -1883,8 +1883,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 30_079_000 - .saturating_add(Weight::from_ref_time(21_155_990_000 as u64).saturating_mul(r as u64)) + // Standard Error: 25_721_000 + .saturating_add(Weight::from_ref_time(22_827_927_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(5 as u64)) @@ -1900,9 +1900,9 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(12_321_649_000 as u64) - // Standard Error: 77_000 - .saturating_add(Weight::from_ref_time(125_883_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_991_911_000 as u64) + // Standard Error: 70_000 + .saturating_add(Weight::from_ref_time(124_789_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(247 as u64)) @@ -1915,9 +1915,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(224_020_000 as u64) - // Standard Error: 63_000 - .saturating_add(Weight::from_ref_time(57_966_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_026_000 as u64) + // Standard Error: 69_000 + .saturating_add(Weight::from_ref_time(58_077_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1929,8 +1929,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(319_801_000 as u64).saturating_mul(n as u64)) + // Standard Error: 104_000 + .saturating_add(Weight::from_ref_time(320_432_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1941,9 +1941,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(222_308_000 as u64) - // Standard Error: 78_000 - .saturating_add(Weight::from_ref_time(70_579_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_548_000 as u64) + // Standard Error: 88_000 + .saturating_add(Weight::from_ref_time(70_774_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1955,8 +1955,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 97_000 - .saturating_add(Weight::from_ref_time(247_328_000 as u64).saturating_mul(n as u64)) + // Standard Error: 152_000 + .saturating_add(Weight::from_ref_time(246_268_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1967,9 +1967,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(221_851_000 as u64) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(48_446_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_368_000 as u64) + // Standard Error: 68_000 + .saturating_add(Weight::from_ref_time(48_368_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1981,8 +1981,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 93_000 - .saturating_add(Weight::from_ref_time(97_258_000 as u64).saturating_mul(n as u64)) + // Standard Error: 94_000 + .saturating_add(Weight::from_ref_time(95_913_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1993,9 +1993,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(221_346_000 as u64) - // Standard Error: 46_000 - .saturating_add(Weight::from_ref_time(47_910_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_100_000 as u64) + // Standard Error: 56_000 + .saturating_add(Weight::from_ref_time(48_043_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2007,8 +2007,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(97_458_000 as u64).saturating_mul(n as u64)) + // Standard Error: 93_000 + .saturating_add(Weight::from_ref_time(96_000_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2019,9 +2019,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(355_672_000 as u64) - // Standard Error: 678_000 - .saturating_add(Weight::from_ref_time(2_970_861_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(375_902_000 as u64) + // Standard Error: 613_000 + .saturating_add(Weight::from_ref_time(2_972_514_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2032,9 +2032,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(232_179_000 as u64) - // Standard Error: 463_000 - .saturating_add(Weight::from_ref_time(2_059_953_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(260_758_000 as u64) + // Standard Error: 396_000 + .saturating_add(Weight::from_ref_time(2_069_937_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2047,315 +2047,315 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_725_000 - .saturating_add(Weight::from_ref_time(1_107_351_000 as u64).saturating_mul(r as u64)) + // Standard Error: 1_629_000 + .saturating_add(Weight::from_ref_time(1_093_632_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_023_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(848_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_130_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(798_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_335_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_761_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_439_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_717_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(69_875_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_289_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_647_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_112_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(71_128_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(2_288_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_045_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_228_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(68_850_000 as u64) + Weight::from_ref_time(71_013_000 as u64) // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_558_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_284_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_585_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_293_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_846_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_278_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_492_000 as u64) + Weight::from_ref_time(69_471_000 as u64) // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_774_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(69_344_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_089_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_967_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(1_984_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_833_000 as u64) + Weight::from_ref_time(72_756_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(70_722_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(6_545_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_426_000 as u64) + // Standard Error: 13_000 + .saturating_add(Weight::from_ref_time(7_095_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(84_465_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(8_307_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(85_090_000 as u64) + // Standard Error: 34_000 + .saturating_add(Weight::from_ref_time(8_897_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_185_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(599_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(94_306_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(585_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_473_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(875_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_494_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(857_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_352_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(907_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_467_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_239_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_219_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_451_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_196_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_049_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_543_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_857_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_442_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_551_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_693_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_589_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(1_682_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_228_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(889_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_230_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(70_987_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(186_348_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_027_000 as u64) + // Standard Error: 296_000 + .saturating_add(Weight::from_ref_time(177_139_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_186_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_304_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_270_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_250_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_796_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_324_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_088_000 as u64) + // Standard Error: 14_000 + .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(70_184_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_315_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_538_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_283_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(70_059_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_316_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_842_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_266_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(70_066_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_282_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_754_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_230_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_100_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_285_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_842_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_193_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_330_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_670_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_935_000 as u64) + Weight::from_ref_time(69_728_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_770_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_024_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_522_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_767_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_794_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_224_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(1_752_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(69_997_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_817_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_625_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_763_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(70_610_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_804_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_816_000 as u64) + // Standard Error: 31_000 + .saturating_add(Weight::from_ref_time(1_852_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(69_759_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_846_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_019_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_780_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(70_405_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(1_808_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_881_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(69_968_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_838_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_021_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_766_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(69_825_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_854_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_156_000 as u64) + // Standard Error: 17_000 + .saturating_add(Weight::from_ref_time(1_712_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(69_973_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_822_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_004_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_907_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_824_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_028_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(72_280_000 as u64) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(1_756_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_159_000 as u64) + // Standard Error: 7_000 + .saturating_add(Weight::from_ref_time(1_813_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(69_827_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_033_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_760_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_995_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_461_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_702_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(2_512_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(69_627_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_453_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_933_000 as u64) + // Standard Error: 23_000 + .saturating_add(Weight::from_ref_time(2_501_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(70_040_000 as u64) + Weight::from_ref_time(69_447_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_456_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(2_555_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_754_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_450_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_872_000 as u64) + // Standard Error: 6_000 + .saturating_add(Weight::from_ref_time(2_459_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_848_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_867_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_904_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_837_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_982_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(69_912_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_839_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_822_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_779_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_656_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_858_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_511_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_806_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_927_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_956_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_788_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_025_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_069_000 as u64) + // Standard Error: 11_000 + .saturating_add(Weight::from_ref_time(1_789_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_916_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_843_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_719_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_792_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_988_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_842_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_709_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_800_000 as u64).saturating_mul(r as u64)) } } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index ee6ce51efdc02..89542d8229a0c 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } wasmi = { version = "0.9.1", optional = true } -wasmtime = { version = "0.38.0", default-features = false, optional = true } +wasmtime = { version = "0.40.1", default-features = false, optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [features] From 047853550b19efacf48fffc87077381bc6fb8bb5 Mon Sep 17 00:00:00 2001 From: Boluwatife Bakre Date: Thu, 8 Sep 2022 11:46:25 +0100 Subject: [PATCH 109/348] Use parameter_types instead of thread_local for test-setup (#12036) * Edit to Assets. parameter_types * fixes * Test Fixes. WIP * Edits to pallet-aura * Camel Case Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Implementation of mutate fn * update to pallet-aura * Update to frame-system. Fixes * Update to frame-support-test. CamelCases * Updates to frame- contracts, offences, staking, bounties, child bounties * Edit to mutate fn. Changes to frame-contracts. CamelCase pallet-aura * Edits to frame-contracts & executive * cargo +nightly fmt * unused import removed * unused import removed * cargo +nightly fmt * minor adjustment * updates * updates * cargo +nightly fmt * cargo +nightly fmt * take fn implemented * update * update * Fixes to CallFilter * cargo +nightly fmt * final fixes * Default changed to $value * Update frame/support/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- frame/assets/src/mock.rs | 29 +-- frame/aura/src/mock.rs | 14 +- frame/bounties/src/tests.rs | 4 - frame/child-bounties/src/tests.rs | 4 - frame/contracts/src/exec.rs | 59 +++--- frame/contracts/src/storage/meter.rs | 183 ++++++++---------- frame/contracts/src/tests.rs | 45 +++-- .../election-provider-multi-phase/src/mock.rs | 3 +- frame/executive/src/lib.rs | 50 +++-- frame/im-online/src/mock.rs | 34 ++-- frame/im-online/src/tests.rs | 41 ++-- frame/merkle-mountain-range/src/mock.rs | 13 +- frame/merkle-mountain-range/src/tests.rs | 2 +- frame/offences/src/mock.rs | 15 +- frame/scheduler/src/mock.rs | 17 +- frame/scored-pool/src/mock.rs | 11 +- frame/scored-pool/src/tests.rs | 10 +- frame/session/src/historical/mod.rs | 10 +- frame/session/src/historical/offchain.rs | 11 +- frame/session/src/mock.rs | 87 +++++---- frame/session/src/tests.rs | 12 +- frame/staking/src/mock.rs | 9 +- frame/staking/src/tests.rs | 7 +- frame/support/src/lib.rs | 15 ++ frame/support/test/tests/construct_runtime.rs | 16 +- frame/system/src/mock.rs | 7 +- frame/system/src/tests.rs | 4 +- frame/timestamp/src/mock.rs | 11 +- frame/tips/src/tests.rs | 13 +- .../asset-tx-payment/src/tests.rs | 9 +- frame/transaction-payment/src/lib.rs | 30 ++- frame/treasury/src/tests.rs | 5 - 32 files changed, 378 insertions(+), 402 deletions(-) diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 0fd4dd3281516..5429c89084e78 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_assets; use frame_support::{ - construct_runtime, + construct_runtime, parameter_types, traits::{ConstU32, ConstU64, GenesisBuild}, }; use sp_core::H256; @@ -101,44 +101,49 @@ impl Config for Test { type Extra = (); } -use std::{cell::RefCell, collections::HashMap}; +use std::collections::HashMap; #[derive(Copy, Clone, Eq, PartialEq, Debug)] -pub(crate) enum Hook { +pub enum Hook { Died(u32, u64), } -thread_local! { - static FROZEN: RefCell> = RefCell::new(Default::default()); - static HOOKS: RefCell> = RefCell::new(Default::default()); +parameter_types! { + static Frozen: HashMap<(u32, u64), u64> = Default::default(); + static Hooks: Vec = Default::default(); } pub struct TestFreezer; impl FrozenBalance for TestFreezer { fn frozen_balance(asset: u32, who: &u64) -> Option { - FROZEN.with(|f| f.borrow().get(&(asset, *who)).cloned()) + Frozen::get().get(&(asset, *who)).cloned() } fn died(asset: u32, who: &u64) { - HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, *who))); + Hooks::mutate(|v| v.push(Hook::Died(asset, *who))); + // Sanity check: dead accounts have no balance. assert!(Assets::balance(asset, *who).is_zero()); } } pub(crate) fn set_frozen_balance(asset: u32, who: u64, amount: u64) { - FROZEN.with(|f| f.borrow_mut().insert((asset, who), amount)); + Frozen::mutate(|v| { + v.insert((asset, who), amount); + }); } pub(crate) fn clear_frozen_balance(asset: u32, who: u64) { - FROZEN.with(|f| f.borrow_mut().remove(&(asset, who))); + Frozen::mutate(|v| { + v.remove(&(asset, who)); + }); } pub(crate) fn hooks() -> Vec { - HOOKS.with(|h| h.borrow().clone()) + Hooks::get().clone() } pub(crate) fn take_hooks() -> Vec { - HOOKS.with(|h| h.take()) + Hooks::take() } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 5023feeaf8aea..5fa298bb008b0 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -30,7 +30,6 @@ use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::IdentityLookup, }; -use sp_std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -86,18 +85,17 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } -thread_local! { - static DISABLED_VALIDATORS: RefCell> = RefCell::new(Default::default()); +parameter_types! { + static DisabledValidatorTestValue: Vec = Default::default(); } pub struct MockDisabledValidators; impl MockDisabledValidators { pub fn disable_validator(index: AuthorityIndex) { - DISABLED_VALIDATORS.with(|v| { - let mut disabled = v.borrow_mut(); - if let Err(i) = disabled.binary_search(&index) { - disabled.insert(i, index); + DisabledValidatorTestValue::mutate(|v| { + if let Err(i) = v.binary_search(&index) { + v.insert(i, index); } }) } @@ -105,7 +103,7 @@ impl MockDisabledValidators { impl DisabledValidators for MockDisabledValidators { fn is_disabled(index: AuthorityIndex) -> bool { - DISABLED_VALIDATORS.with(|v| v.borrow().binary_search(&index).is_ok()) + DisabledValidatorTestValue::get().binary_search(&index).is_ok() } } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index b4ce039b35fbc..cf4b8d23b0458 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -21,7 +21,6 @@ use super::*; use crate as pallet_bounties; -use std::cell::RefCell; use frame_support::{ assert_noop, assert_ok, @@ -102,9 +101,6 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); } -thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub static Burn: Permill = Permill::from_percent(50); diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index f42715f14bbee..0edee8468b86f 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -21,7 +21,6 @@ use super::*; use crate as pallet_child_bounties; -use std::cell::RefCell; use frame_support::{ assert_noop, assert_ok, @@ -105,9 +104,6 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); } -thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 4ae336a907001..f91be3a9e491c 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1376,7 +1376,7 @@ mod tests { }; use assert_matches::assert_matches; use codec::{Decode, Encode}; - use frame_support::{assert_err, assert_ok}; + use frame_support::{assert_err, assert_ok, parameter_types}; use frame_system::{EventRecord, Phase}; use hex_literal::hex; use pallet_contracts_primitives::ReturnFlags; @@ -1393,8 +1393,8 @@ mod tests { type MockStack<'a> = Stack<'a, Test, MockExecutable>; - thread_local! { - static LOADER: RefCell = RefCell::new(MockLoader::default()); + parameter_types! { + static Loader: MockLoader = MockLoader::default(); } fn events() -> Vec> { @@ -1420,8 +1420,8 @@ mod tests { refcount: u64, } - #[derive(Default)] - struct MockLoader { + #[derive(Default, Clone)] + pub struct MockLoader { map: HashMap, MockExecutable>, counter: u64, } @@ -1431,8 +1431,7 @@ mod tests { func_type: ExportedFunction, f: impl Fn(MockCtx, &MockExecutable) -> ExecResult + 'static, ) -> CodeHash { - LOADER.with(|loader| { - let mut loader = loader.borrow_mut(); + Loader::mutate(|loader| { // Generate code hashes as monotonically increasing values. let hash = ::Hash::from_low_u64_be(loader.counter); loader.counter += 1; @@ -1445,8 +1444,7 @@ mod tests { } fn increment_refcount(code_hash: CodeHash) -> Result<(), DispatchError> { - LOADER.with(|loader| { - let mut loader = loader.borrow_mut(); + Loader::mutate(|loader| { match loader.map.entry(code_hash) { Entry::Vacant(_) => Err(>::CodeNotFound)?, Entry::Occupied(mut entry) => entry.get_mut().refcount += 1, @@ -1457,8 +1455,7 @@ mod tests { fn decrement_refcount(code_hash: CodeHash) { use std::collections::hash_map::Entry::Occupied; - LOADER.with(|loader| { - let mut loader = loader.borrow_mut(); + Loader::mutate(|loader| { let mut entry = match loader.map.entry(code_hash) { Occupied(e) => e, _ => panic!("code_hash does not exist"), @@ -1478,13 +1475,8 @@ mod tests { _schedule: &Schedule, _gas_meter: &mut GasMeter, ) -> Result { - LOADER.with(|loader| { - loader - .borrow_mut() - .map - .get(&code_hash) - .cloned() - .ok_or(Error::::CodeNotFound.into()) + Loader::mutate(|loader| { + loader.map.get(&code_hash).cloned().ok_or(Error::::CodeNotFound.into()) }) } @@ -1531,14 +1523,14 @@ mod tests { #[test] fn it_works() { - thread_local! { - static TEST_DATA: RefCell> = RefCell::new(vec![0]); + parameter_types! { + static TestData: Vec = vec![0]; } let value = Default::default(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); let exec_ch = MockLoader::insert(Call, |_ctx, _executable| { - TEST_DATA.with(|data| data.borrow_mut().push(1)); + TestData::mutate(|data| data.push(1)); exec_success() }); @@ -1562,7 +1554,7 @@ mod tests { ); }); - TEST_DATA.with(|data| assert_eq!(*data.borrow(), vec![0, 1])); + assert_eq!(TestData::get(), vec![0, 1]); } #[test] @@ -1841,16 +1833,15 @@ mod tests { fn max_depth() { // This test verifies that when we reach the maximal depth creation of an // yet another context fails. - thread_local! { - static REACHED_BOTTOM: RefCell = RefCell::new(false); + parameter_types! { + static ReachedBottom: bool = false; } let value = Default::default(); let recurse_ch = MockLoader::insert(Call, |ctx, _| { // Try to call into yourself. let r = ctx.ext.call(Weight::zero(), BOB, 0, vec![], true); - REACHED_BOTTOM.with(|reached_bottom| { - let mut reached_bottom = reached_bottom.borrow_mut(); + ReachedBottom::mutate(|reached_bottom| { if !*reached_bottom { // We are first time here, it means we just reached bottom. // Verify that we've got proper error and set `reached_bottom`. @@ -1891,15 +1882,14 @@ mod tests { let origin = ALICE; let dest = BOB; - thread_local! { - static WITNESSED_CALLER_BOB: RefCell>> = RefCell::new(None); - static WITNESSED_CALLER_CHARLIE: RefCell>> = RefCell::new(None); + parameter_types! { + static WitnessedCallerBob: Option> = None; + static WitnessedCallerCharlie: Option> = None; } let bob_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for bob. - WITNESSED_CALLER_BOB - .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); + WitnessedCallerBob::mutate(|caller| *caller = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. assert_matches!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), Ok(_)); @@ -1907,8 +1897,7 @@ mod tests { }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for charlie. - WITNESSED_CALLER_CHARLIE - .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); + WitnessedCallerCharlie::mutate(|caller| *caller = Some(ctx.ext.caller().clone())); exec_success() }); @@ -1932,8 +1921,8 @@ mod tests { assert_matches!(result, Ok(_)); }); - WITNESSED_CALLER_BOB.with(|caller| assert_eq!(*caller.borrow(), Some(origin))); - WITNESSED_CALLER_CHARLIE.with(|caller| assert_eq!(*caller.borrow(), Some(dest))); + assert_eq!(WitnessedCallerBob::get(), Some(origin)); + assert_eq!(WitnessedCallerCharlie::get(), Some(dest)); } #[test] diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index b06f7ea4aedb5..d89208812bcac 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -441,23 +441,23 @@ mod tests { exec::AccountIdOf, tests::{Test, ALICE, BOB, CHARLIE}, }; + use frame_support::parameter_types; use pretty_assertions::assert_eq; - use std::cell::RefCell; type TestMeter = RawMeter; - thread_local! { - static TEST_EXT: RefCell = RefCell::new(Default::default()); + parameter_types! { + static TestExtTestValue: TestExt = Default::default(); } - #[derive(Debug, PartialEq, Eq)] + #[derive(Debug, PartialEq, Eq, Clone)] struct LimitCheck { origin: AccountIdOf, limit: BalanceOf, min_leftover: BalanceOf, } - #[derive(Debug, PartialEq, Eq)] + #[derive(Debug, PartialEq, Eq, Clone)] struct Charge { origin: AccountIdOf, contract: AccountIdOf, @@ -465,8 +465,8 @@ mod tests { terminated: bool, } - #[derive(Default, Debug, PartialEq, Eq)] - struct TestExt { + #[derive(Default, Debug, PartialEq, Eq, Clone)] + pub struct TestExt { limit_checks: Vec, charges: Vec, } @@ -485,12 +485,9 @@ mod tests { min_leftover: BalanceOf, ) -> Result, DispatchError> { let limit = limit.unwrap_or(42); - TEST_EXT.with(|ext| { - ext.borrow_mut().limit_checks.push(LimitCheck { - origin: origin.clone(), - limit, - min_leftover, - }) + TestExtTestValue::mutate(|ext| { + ext.limit_checks + .push(LimitCheck { origin: origin.clone(), limit, min_leftover }) }); Ok(limit) } @@ -501,8 +498,8 @@ mod tests { amount: &DepositOf, terminated: bool, ) { - TEST_EXT.with(|ext| { - ext.borrow_mut().charges.push(Charge { + TestExtTestValue::mutate(|ext| { + ext.charges.push(Charge { origin: origin.clone(), contract: contract.clone(), amount: amount.clone(), @@ -513,7 +510,7 @@ mod tests { } fn clear_ext() { - TEST_EXT.with(|ext| ext.borrow_mut().clear()) + TestExtTestValue::mutate(|ext| ext.clear()) } fn new_info(deposit: BalanceOf) -> ContractInfo { @@ -533,15 +530,13 @@ mod tests { TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); - TEST_EXT.with(|ext| { - assert_eq!( - *ext.borrow(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) - }); + assert_eq!( + TestExtTestValue::get(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + ..Default::default() + } + ) } #[test] @@ -556,15 +551,13 @@ mod tests { nested0.charge(&Default::default()).unwrap(); meter.absorb(nested0, &ALICE, &BOB, None); - TEST_EXT.with(|ext| { - assert_eq!( - *ext.borrow(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - ..Default::default() - } - ) - }); + assert_eq!( + TestExtTestValue::get(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + ..Default::default() + } + ) } #[test] @@ -582,20 +575,18 @@ mod tests { .unwrap(); meter.absorb(nested0, &ALICE, &BOB, None); - TEST_EXT.with(|ext| { - assert_eq!( - *ext.borrow(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - charges: vec![Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(::Currency::minimum_balance() * 2), - terminated: false, - }] - } - ) - }); + assert_eq!( + TestExtTestValue::get(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(::Currency::minimum_balance() * 2), + terminated: false, + }] + } + ) } #[test] @@ -638,34 +629,32 @@ mod tests { assert_eq!(nested1_info.storage_deposit, 40); assert_eq!(nested2_info.storage_deposit, min_balance); - TEST_EXT.with(|ext| { - assert_eq!( - *ext.borrow(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - charges: vec![ - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(10), - terminated: false - }, - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(4), - terminated: false - }, - Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(2), - terminated: false - } - ] - } - ) - }); + assert_eq!( + TestExtTestValue::get(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![ + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(10), + terminated: false + }, + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(4), + terminated: false + }, + Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(2), + terminated: false + } + ] + } + ) } #[test] @@ -697,27 +686,25 @@ mod tests { meter.absorb(nested0, &ALICE, &BOB, None); drop(meter); - TEST_EXT.with(|ext| { - assert_eq!( - *ext.borrow(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - charges: vec![ - Charge { - origin: ALICE, - contract: CHARLIE, - amount: Deposit::Refund(400), - terminated: true - }, - Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(12), - terminated: false - } - ] - } - ) - }); + assert_eq!( + TestExtTestValue::get(), + TestExt { + limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + charges: vec![ + Charge { + origin: ALICE, + contract: CHARLIE, + amount: Deposit::Refund(400), + terminated: true + }, + Charge { + origin: ALICE, + contract: BOB, + amount: Deposit::Charge(12), + terminated: false + } + ] + } + ) } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f65f845f7b9fe..8bca18c5d1c42 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -51,7 +51,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, AccountId32, }; -use std::{cell::RefCell, sync::Arc}; +use std::sync::Arc; use crate as pallet_contracts; @@ -113,10 +113,11 @@ pub mod test_utils { } } -thread_local! { - static TEST_EXTENSION: RefCell = Default::default(); +parameter_types! { + static TestExtensionTestValue: TestExtension = Default::default(); } +#[derive(Clone)] pub struct TestExtension { enabled: bool, last_seen_buffer: Vec, @@ -136,15 +137,15 @@ pub struct TempStorageExtension { impl TestExtension { fn disable() { - TEST_EXTENSION.with(|e| e.borrow_mut().enabled = false) + TestExtensionTestValue::mutate(|e| e.enabled = false) } fn last_seen_buffer() -> Vec { - TEST_EXTENSION.with(|e| e.borrow().last_seen_buffer.clone()) + TestExtensionTestValue::get().last_seen_buffer.clone() } fn last_seen_inputs() -> (u32, u32, u32, u32) { - TEST_EXTENSION.with(|e| e.borrow().last_seen_inputs) + TestExtensionTestValue::get().last_seen_inputs } } @@ -167,14 +168,13 @@ impl ChainExtension for TestExtension { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; - TEST_EXTENSION.with(|e| e.borrow_mut().last_seen_buffer = input); + TestExtensionTestValue::mutate(|e| e.last_seen_buffer = input); Ok(RetVal::Converging(id)) }, 0x8001 => { let env = env.only_in(); - TEST_EXTENSION.with(|e| { - e.borrow_mut().last_seen_inputs = - (env.val0(), env.val1(), env.val2(), env.val3()) + TestExtensionTestValue::mutate(|e| { + e.last_seen_inputs = (env.val0(), env.val1(), env.val2(), env.val3()) }); Ok(RetVal::Converging(id)) }, @@ -192,7 +192,7 @@ impl ChainExtension for TestExtension { } fn enabled() -> bool { - TEST_EXTENSION.with(|e| e.borrow().enabled) + TestExtensionTestValue::get().enabled } } @@ -210,7 +210,7 @@ impl ChainExtension for RevertingExtension { } fn enabled() -> bool { - TEST_EXTENSION.with(|e| e.borrow().enabled) + TestExtensionTestValue::get().enabled } } @@ -259,7 +259,7 @@ impl ChainExtension for TempStorageExtension { } fn enabled() -> bool { - TEST_EXTENSION.with(|e| e.borrow().enabled) + TestExtensionTestValue::get().enabled } } @@ -344,19 +344,30 @@ impl Convert> for Test { /// A filter whose filter function can be swapped at runtime. pub struct TestFilter; -thread_local! { - static CALL_FILTER: RefCell bool> = RefCell::new(|_| true); +#[derive(Clone)] +pub struct Filters { + filter: fn(&Call) -> bool, +} + +impl Default for Filters { + fn default() -> Self { + Filters { filter: (|_| true) } + } +} + +parameter_types! { + static CallFilter: Filters = Default::default(); } impl TestFilter { pub fn set_filter(filter: fn(&Call) -> bool) { - CALL_FILTER.with(|fltr| *fltr.borrow_mut() = filter); + CallFilter::mutate(|fltr| fltr.filter = filter); } } impl Contains for TestFilter { fn contains(call: &Call) -> bool { - CALL_FILTER.with(|fltr| fltr.borrow()(call)) + (CallFilter::get().filter)(call) } } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 48b76bbb98a8f..4e9672fc1d4f6 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -241,8 +241,9 @@ impl pallet_balances::Config for Runtime { type WeightInfo = (); } -#[derive(Eq, PartialEq, Debug, Clone, Copy)] +#[derive(Default, Eq, PartialEq, Debug, Clone, Copy)] pub enum MockedWeightInfo { + #[default] Basic, Complex, Real, diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 87ed8b93bbf8e..b23342800f8b9 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -835,12 +835,12 @@ mod tests { pub struct RuntimeVersion; impl frame_support::traits::Get for RuntimeVersion { fn get() -> sp_version::RuntimeVersion { - RUNTIME_VERSION.with(|v| v.borrow().clone()) + RuntimeVersionTestValues::get().clone() } } - thread_local! { - pub static RUNTIME_VERSION: std::cell::RefCell = + parameter_types! { + pub static RuntimeVersionTestValues: sp_version::RuntimeVersion = Default::default(); } @@ -1239,14 +1239,13 @@ mod tests { #[test] fn runtime_upgraded_should_work() { new_test_ext(1).execute_with(|| { - RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); + RuntimeVersionTestValues::mutate(|v| *v = Default::default()); // It should be added at genesis assert!(frame_system::LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); assert_eq!( @@ -1254,8 +1253,8 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = sp_version::RuntimeVersion { + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, spec_name: "test".into(), ..Default::default() @@ -1267,8 +1266,8 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = sp_version::RuntimeVersion { + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, spec_name: "test".into(), impl_version: 2, @@ -1316,9 +1315,8 @@ mod tests { fn custom_runtime_upgrade_is_called_before_modules() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); Executive::initialize_block(&Header::new( @@ -1338,9 +1336,8 @@ mod tests { fn event_from_runtime_upgrade_is_included() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); // set block number to non zero so events are not excluded @@ -1369,9 +1366,8 @@ mod tests { let header = new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); // Let's build some fake block. @@ -1389,15 +1385,14 @@ mod tests { }); // Reset to get the correct new genesis below. - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 0, ..Default::default() } }); new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); >>::execute_block(Block::new(header, vec![xt])); @@ -1411,9 +1406,8 @@ mod tests { fn all_weights_are_recorded_correctly() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity - RUNTIME_VERSION.with(|v| { - *v.borrow_mut() = - sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } + RuntimeVersionTestValues::mutate(|v| { + *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); let block_number = 1; diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index b734bd37b6fd4..6ea5a16d64f58 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -19,8 +19,6 @@ #![cfg(test)] -use std::cell::RefCell; - use frame_support::{ parameter_types, traits::{ConstU32, ConstU64}, @@ -57,18 +55,18 @@ frame_support::construct_runtime!( } ); -thread_local! { - pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![ +parameter_types! { + pub static Validators: Option> = Some(vec![ 1, 2, 3, - ])); + ]); } pub struct TestSessionManager; impl pallet_session::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| l.borrow_mut().take()) + Validators::mutate(|l| l.take()) } fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} @@ -76,10 +74,8 @@ impl pallet_session::SessionManager for TestSessionManager { impl pallet_session::historical::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| { - l.borrow_mut() - .take() - .map(|validators| validators.iter().map(|v| (*v, *v)).collect()) + Validators::mutate(|l| { + l.take().map(|validators| validators.iter().map(|v| (*v, *v)).collect()) }) } fn end_session(_: SessionIndex) {} @@ -91,15 +87,15 @@ pub type Extrinsic = TestXt; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; -thread_local! { - pub static OFFENCES: RefCell, Offence)>> = RefCell::new(vec![]); +parameter_types! { + pub static Offences: Vec<(Vec, Offence)> = vec![]; } /// A mock offence report handler. pub struct OffenceHandler; impl ReportOffence for OffenceHandler { fn report_offence(reporters: Vec, offence: Offence) -> Result<(), OffenceError> { - OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); + Offences::mutate(|l| l.push((reporters, offence))); Ok(()) } @@ -183,12 +179,12 @@ impl pallet_authorship::Config for Runtime { type EventHandler = ImOnline; } -thread_local! { - pub static MOCK_CURRENT_SESSION_PROGRESS: RefCell>> = RefCell::new(None); +parameter_types! { + pub static MockCurrentSessionProgress: Option> = None; } -thread_local! { - pub static MOCK_AVERAGE_SESSION_LENGTH: RefCell> = RefCell::new(None); +parameter_types! { + pub static MockAverageSessionLength: Option = None; } pub struct TestNextSessionRotation; @@ -196,7 +192,7 @@ pub struct TestNextSessionRotation; impl frame_support::traits::EstimateNextSessionRotation for TestNextSessionRotation { fn average_session_length() -> u64 { // take the mock result if any and return it - let mock = MOCK_AVERAGE_SESSION_LENGTH.with(|p| p.borrow_mut().take()); + let mock = MockAverageSessionLength::mutate(|p| p.take()); mock.unwrap_or(pallet_session::PeriodicSessions::::average_session_length()) } @@ -208,7 +204,7 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession ); // take the mock result if any and return it - let mock = MOCK_CURRENT_SESSION_PROGRESS.with(|p| p.borrow_mut().take()); + let mock = MockCurrentSessionProgress::mutate(|p| p.take()); (mock.unwrap_or(estimate), weight) } diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 05e1af169dba9..6ef7cce7c353b 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -68,7 +68,7 @@ fn should_report_offline_validators() { advance_session(); // enact the change and buffer another one let validators = vec![1, 2, 3, 4, 5, 6]; - VALIDATORS.with(|l| *l.borrow_mut() = Some(validators.clone())); + Validators::mutate(|l| *l = Some(validators.clone())); advance_session(); // when @@ -76,7 +76,7 @@ fn should_report_offline_validators() { advance_session(); // then - let offences = OFFENCES.with(|l| l.replace(vec![])); + let offences = Offences::take(); assert_eq!( offences, vec![( @@ -96,7 +96,7 @@ fn should_report_offline_validators() { advance_session(); // then - let offences = OFFENCES.with(|l| l.replace(vec![])); + let offences = Offences::take(); assert_eq!( offences, vec![( @@ -149,7 +149,7 @@ fn should_mark_online_validator_when_heartbeat_is_received() { new_test_ext().execute_with(|| { advance_session(); // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -184,7 +184,7 @@ fn late_heartbeat_and_invalid_keys_len_should_fail() { new_test_ext().execute_with(|| { advance_session(); // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -226,7 +226,7 @@ fn should_generate_heartbeats() { // buffer new validators Session::rotate_session(); // enact the change and buffer another one - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); Session::rotate_session(); // when @@ -262,7 +262,7 @@ fn should_cleanup_received_heartbeats_on_session_end() { new_test_ext().execute_with(|| { advance_session(); - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one @@ -293,7 +293,7 @@ fn should_mark_online_validator_when_block_is_authored() { new_test_ext().execute_with(|| { advance_session(); // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -330,7 +330,7 @@ fn should_not_send_a_report_if_already_online() { ext.execute_with(|| { advance_session(); // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Validators::mutate(|l| *l = Some(vec![1, 2, 3, 4, 5, 6])); assert_eq!(Session::validators(), Vec::::new()); // enact the change and buffer another one advance_session(); @@ -393,12 +393,12 @@ fn should_handle_missing_progress_estimates() { Session::rotate_session(); // enact the change and buffer another one - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); + Validators::mutate(|l| *l = Some(vec![0, 1, 2])); Session::rotate_session(); // we will return `None` on the next call to `estimate_current_session_progress` // and the offchain worker should fallback to checking `HeartbeatAfter` - MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + MockCurrentSessionProgress::mutate(|p| *p = Some(None)); ImOnline::offchain_worker(block); assert_eq!(state.read().transactions.len(), 3); @@ -427,26 +427,25 @@ fn should_handle_non_linear_session_progress() { // mock the session length as being 10 blocks long, // enact the change and buffer another one - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![0, 1, 2])); + Validators::mutate(|l| *l = Some(vec![0, 1, 2])); // mock the session length has being 10 which should make us assume the fallback for half // session will be reached by block 5. - MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(10)); + MockAverageSessionLength::mutate(|p| *p = Some(10)); Session::rotate_session(); // if we don't have valid results for the current session progres then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. - MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + MockCurrentSessionProgress::mutate(|p| *p = Some(None)); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); - MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); + MockCurrentSessionProgress::mutate(|p| *p = Some(None)); assert!(ImOnline::send_heartbeats(5).ok().is_some()); // if we have a valid current session progress then we'll heartbeat as soon // as we're past 80% of the session regardless of the block number - MOCK_CURRENT_SESSION_PROGRESS - .with(|p| *p.borrow_mut() = Some(Some(Permill::from_percent(81)))); + MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_percent(81)))); assert!(ImOnline::send_heartbeats(2).ok().is_some()); }); @@ -464,8 +463,7 @@ fn test_does_not_heartbeat_early_in_the_session() { ext.execute_with(|| { // mock current session progress as being 5%. we only randomly start // heartbeating after 10% of the session has elapsed. - MOCK_CURRENT_SESSION_PROGRESS - .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); + MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_float(0.05)))); assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly)); }); } @@ -483,9 +481,8 @@ fn test_probability_of_heartbeating_increases_with_session_progress() { let set_test = |progress, random: f64| { // the average session length is 100 blocks, therefore the residual // probability of sending a heartbeat is 1% - MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(100)); - MOCK_CURRENT_SESSION_PROGRESS - .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(progress)))); + MockAverageSessionLength::mutate(|p| *p = Some(100)); + MockCurrentSessionProgress::mutate(|p| *p = Some(Some(Permill::from_float(progress)))); let mut seed = [0u8; 32]; let encoded = ((random * Permill::ACCURACY as f64) as u32).encode(); diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index b2b6821fcd054..30dd6da3d3b8d 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -19,14 +19,17 @@ use crate as pallet_mmr; use crate::*; use codec::{Decode, Encode}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_mmr_primitives::{Compact, LeafDataProvider}; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup, Keccak256}, }; -use sp_std::{cell::RefCell, prelude::*}; +use sp_std::prelude::*; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -91,14 +94,14 @@ impl LeafData { } } -thread_local! { - pub static LEAF_DATA: RefCell = RefCell::new(Default::default()); +parameter_types! { + pub static LeafDataTestValue: LeafData = Default::default(); } impl LeafDataProvider for LeafData { type LeafData = Self; fn leaf_data() -> Self::LeafData { - LEAF_DATA.with(|r| r.borrow().clone()) + LeafDataTestValue::get().clone() } } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index e13f89617bb9a..d6886f90a5da7 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -42,7 +42,7 @@ fn register_offchain_ext(ext: &mut sp_io::TestExternalities) { fn new_block() -> Weight { let number = frame_system::Pallet::::block_number() + 1; let hash = H256::repeat_byte(number as u8); - LEAF_DATA.with(|r| r.borrow_mut().a = number); + LeafDataTestValue::mutate(|r| r.a = number); frame_system::Pallet::::reset_events(); frame_system::Pallet::::initialize(&number, &hash, &Default::default()); diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index a578a6082cf3e..bf4daf0c89944 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -40,13 +40,12 @@ use sp_staking::{ offence::{self, DisableStrategy, Kind, OffenceDetails}, SessionIndex, }; -use std::cell::RefCell; pub struct OnOffenceHandler; -thread_local! { - pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); - pub static OFFENCE_WEIGHT: RefCell = RefCell::new(Default::default()); +parameter_types! { + pub static OnOffencePerbill: Vec = Default::default(); + pub static OffenceWeight: Weight = Default::default(); } impl offence::OnOffenceHandler @@ -58,16 +57,16 @@ impl offence::OnOffenceHandler _offence_session: SessionIndex, _disable_strategy: DisableStrategy, ) -> Weight { - ON_OFFENCE_PERBILL.with(|f| { - *f.borrow_mut() = slash_fraction.to_vec(); + OnOffencePerbill::mutate(|f| { + *f = slash_fraction.to_vec(); }); - OFFENCE_WEIGHT.with(|w| *w.borrow()) + OffenceWeight::get() } } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| f(&mut fractions.borrow_mut())) + OnOffencePerbill::mutate(|fractions| f(fractions)) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 7bbc628604bc6..1639c9a046142 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -39,15 +39,14 @@ use sp_runtime::{ #[frame_support::pallet] pub mod logger { use super::{OriginCaller, OriginTrait}; - use frame_support::pallet_prelude::*; + use frame_support::{pallet_prelude::*, parameter_types}; use frame_system::pallet_prelude::*; - use std::cell::RefCell; - thread_local! { - static LOG: RefCell> = RefCell::new(Vec::new()); + parameter_types! { + static Log: Vec<(OriginCaller, u32)> = Vec::new(); } pub fn log() -> Vec<(OriginCaller, u32)> { - LOG.with(|log| log.borrow().clone()) + Log::get().clone() } #[pallet::pallet] @@ -76,8 +75,8 @@ pub mod logger { #[pallet::weight(*weight)] pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); - LOG.with(|log| { - log.borrow_mut().push((origin.caller().clone(), i)); + Log::mutate(|log| { + log.push((origin.caller().clone(), i)); }); Ok(()) } @@ -85,8 +84,8 @@ pub mod logger { #[pallet::weight(*weight)] pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); - LOG.with(|log| { - log.borrow_mut().push((origin.caller().clone(), i)); + Log::mutate(|log| { + log.push((origin.caller().clone(), i)); }); Ok(()) } diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index e38e0a18b99c8..00c66c3bdefe8 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -30,7 +30,6 @@ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -96,14 +95,14 @@ impl pallet_balances::Config for Test { type WeightInfo = (); } -thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); +parameter_types! { + pub static MembersTestValue: Vec = vec![]; } pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + let mut old_plus_incoming = MembersTestValue::get().to_vec(); old_plus_incoming.extend_from_slice(incoming); old_plus_incoming.sort(); @@ -113,13 +112,13 @@ impl ChangeMembers for TestChangeMembers { assert_eq!(old_plus_incoming, new_plus_outgoing); - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + MembersTestValue::mutate(|m| *m = new.to_vec()); } } impl InitializeMembers for TestChangeMembers { fn initialize_members(new_members: &[u64]) { - MEMBERS.with(|m| *m.borrow_mut() = new_members.to_vec()); + MembersTestValue::mutate(|m| *m = new_members.to_vec()); } } diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 7b431160ddfe5..42f0b47e8b940 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -33,7 +33,7 @@ fn query_membership_works() { assert_eq!(ScoredPool::members(), vec![20, 40]); assert_eq!(Balances::reserved_balance(31), CandidateDeposit::get()); assert_eq!(Balances::reserved_balance(40), CandidateDeposit::get()); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![20, 40]); + assert_eq!(MembersTestValue::get().clone(), vec![20, 40]); }); } @@ -128,7 +128,7 @@ fn kicking_works() { // then assert_eq!(find_in_pool(who), None); assert_eq!(ScoredPool::members(), vec![20, 31]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); assert_eq!(Balances::reserved_balance(who), 0); // deposit must have been returned }); } @@ -152,7 +152,7 @@ fn unscored_entities_must_not_be_used_for_filling_members() { // then // the `None` candidates should not have been filled in assert!(ScoredPool::members().is_empty()); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); }); } @@ -170,7 +170,7 @@ fn refreshing_works() { // then assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); }); } @@ -190,7 +190,7 @@ fn refreshing_happens_every_period() { // then assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + assert_eq!(MembersTestValue::get().clone(), ScoredPool::members()); }); } diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index c72ab8c210d69..45b4ba3c0a799 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -375,7 +375,7 @@ impl> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - }); + let keys: Vec<_> = NextValidators::get() + .iter() + .cloned() + .map(|i| (i, i, UintAuthorityId(i).into())) + .collect(); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 95813d0a70272..ececb8af5ad58 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -141,7 +141,7 @@ mod tests { use super::*; use crate::{ historical::{onchain, Pallet}, - mock::{force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS}, + mock::{force_new_session, set_next_validators, NextValidators, Session, System, Test}, }; use codec::Encode; @@ -163,9 +163,12 @@ mod tests { .build_storage::() .expect("Failed to create test externalities."); - let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - }); + let keys: Vec<_> = NextValidators::get() + .iter() + .cloned() + .map(|i| (i, i, UintAuthorityId(i).into())) + .collect(); + BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 3044d6558e33e..c1858d0bff774 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_session; #[cfg(feature = "historical")] use crate::historical as pallet_session_historical; -use std::{cell::RefCell, collections::BTreeMap}; +use std::collections::BTreeMap; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ @@ -103,29 +103,29 @@ frame_support::construct_runtime!( } ); -thread_local! { - pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); - pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); - pub static AUTHORITIES: RefCell> = - RefCell::new(vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - pub static FORCE_SESSION_END: RefCell = RefCell::new(false); - pub static SESSION_LENGTH: RefCell = RefCell::new(2); - pub static SESSION_CHANGED: RefCell = RefCell::new(false); - pub static TEST_SESSION_CHANGED: RefCell = RefCell::new(false); - pub static DISABLED: RefCell = RefCell::new(false); +parameter_types! { + pub static Validators: Vec = vec![1, 2, 3]; + pub static NextValidators: Vec = vec![1, 2, 3]; + pub static Authorities: Vec = + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]; + pub static ForceSessionEnd: bool = false; + pub static SessionLength: u64 = 2; + pub static SessionChanged: bool = false; + pub static TestSessionChanged: bool = false; + pub static Disabled: bool = false; // Stores if `on_before_session_end` was called - pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); - pub static VALIDATOR_ACCOUNTS: RefCell> = RefCell::new(BTreeMap::new()); + pub static BeforeSessionEndCalled: bool = false; + pub static ValidatorAccounts: BTreeMap = BTreeMap::new(); } pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { - let l = SESSION_LENGTH.with(|l| *l.borrow()); + let l = SessionLength::get(); now % l == 0 || - FORCE_SESSION_END.with(|l| { - let r = *l.borrow(); - *l.borrow_mut() = false; + ForceSessionEnd::mutate(|l| { + let r = *l; + *l = false; r }) } @@ -140,19 +140,19 @@ impl SessionHandler for TestSessionHandler { validators: &[(u64, T)], _queued_validators: &[(u64, T)], ) { - SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); - AUTHORITIES.with(|l| { - *l.borrow_mut() = validators + SessionChanged::mutate(|l| *l = changed); + Authorities::mutate(|l| { + *l = validators .iter() .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) .collect() }); } fn on_disabled(_validator_index: u32) { - DISABLED.with(|l| *l.borrow_mut() = true) + Disabled::mutate(|l| *l = true) } fn on_before_session_ending() { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = true); + BeforeSessionEndCalled::mutate(|b| *b = true); } } @@ -161,16 +161,15 @@ impl SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} fn new_session(_: SessionIndex) -> Option> { - if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { - VALIDATORS.with(|v| { - let mut v = v.borrow_mut(); - *v = NEXT_VALIDATORS.with(|l| l.borrow().clone()); + if !TestSessionChanged::get() { + Validators::mutate(|v| { + *v = NextValidators::get().clone(); Some(v.clone()) }) - } else if DISABLED.with(|l| std::mem::replace(&mut *l.borrow_mut(), false)) { + } else if Disabled::mutate(|l| std::mem::replace(&mut *l, false)) { // If there was a disabled validator, underlying conditions have changed // so we return `Some`. - Some(VALIDATORS.with(|v| v.borrow().clone())) + Some(Validators::get().clone()) } else { None } @@ -188,37 +187,40 @@ impl crate::historical::SessionManager for TestSessionManager { } pub fn authorities() -> Vec { - AUTHORITIES.with(|l| l.borrow().to_vec()) + Authorities::get().to_vec() } pub fn force_new_session() { - FORCE_SESSION_END.with(|l| *l.borrow_mut() = true) + ForceSessionEnd::mutate(|l| *l = true) } pub fn set_session_length(x: u64) { - SESSION_LENGTH.with(|l| *l.borrow_mut() = x) + SessionLength::mutate(|l| *l = x) } pub fn session_changed() -> bool { - SESSION_CHANGED.with(|l| *l.borrow()) + SessionChanged::get() } pub fn set_next_validators(next: Vec) { - NEXT_VALIDATORS.with(|v| *v.borrow_mut() = next); + NextValidators::mutate(|v| *v = next); } pub fn before_session_end_called() -> bool { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow()) + BeforeSessionEndCalled::get() } pub fn reset_before_session_end_called() { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); + BeforeSessionEndCalled::mutate(|b| *b = false); } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NEXT_VALIDATORS - .with(|l| l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect()); + let keys: Vec<_> = NextValidators::get() + .iter() + .cloned() + .map(|i| (i, i, UintAuthorityId(i).into())) + .collect(); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); @@ -230,10 +232,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pallet_session::GenesisConfig:: { keys } .assimilate_storage(&mut t) .unwrap(); - NEXT_VALIDATORS.with(|l| { - let v = l.borrow().iter().map(|&i| (i, i)).collect(); - VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); - }); + + let v = NextValidators::get().iter().map(|&i| (i, i)).collect(); + ValidatorAccounts::mutate(|m| *m = v); sp_io::TestExternalities::new(t) } @@ -279,12 +280,12 @@ impl pallet_timestamp::Config for Test { pub struct TestValidatorIdOf; impl TestValidatorIdOf { pub fn set(v: BTreeMap) { - VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); + ValidatorAccounts::mutate(|m| *m = v); } } impl Convert> for TestValidatorIdOf { fn convert(x: u64) -> Option { - VALIDATOR_ACCOUNTS.with(|m| m.borrow().get(&x).cloned()) + ValidatorAccounts::get().get(&x).cloned() } } diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index c9d2dbb53d9ba..7947a8a670b5a 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ authorities, before_session_end_called, force_new_session, new_test_ext, reset_before_session_end_called, session_changed, set_next_validators, set_session_length, - Origin, PreUpgradeMockSessionKeys, Session, System, Test, TestValidatorIdOf, SESSION_CHANGED, - TEST_SESSION_CHANGED, + Origin, PreUpgradeMockSessionKeys, Session, SessionChanged, System, Test, TestSessionChanged, + TestValidatorIdOf, }; use codec::Decode; @@ -35,7 +35,7 @@ use frame_support::{ }; fn initialize_block(block: u64) { - SESSION_CHANGED.with(|l| *l.borrow_mut() = false); + SessionChanged::mutate(|l| *l = false); System::set_block_number(block); Session::on_initialize(block); } @@ -235,7 +235,7 @@ fn session_changed_flag_works() { new_test_ext().execute_with(|| { TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (69, 69)].into_iter().collect()); - TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); + TestSessionChanged::mutate(|l| *l = true); force_new_session(); initialize_block(1); @@ -384,8 +384,8 @@ fn upgrade_keys() { use sp_core::crypto::key_types::DUMMY; // This test assumes certain mocks. - assert_eq!(mock::NEXT_VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); - assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); + assert_eq!(mock::NextValidators::get().clone(), vec![1, 2, 3]); + assert_eq!(mock::Validators::get().clone(), vec![1, 2, 3]); new_test_ext().execute_with(|| { let pre_one = PreUpgradeMockSessionKeys { a: [1u8; 32], b: [1u8; 64] }; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 7911428b3337c..cdad11589f743 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -37,7 +37,6 @@ use sp_runtime::{ traits::{IdentityLookup, Zero}, }; use sp_staking::offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}; -use std::cell::RefCell; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -216,16 +215,16 @@ parameter_types! { pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } -thread_local! { - pub static REWARD_REMAINDER_UNBALANCED: RefCell = RefCell::new(0); +parameter_types! { + pub static RewardRemainderUnbalanced: u128 = 0; } pub struct RewardRemainderMock; impl OnUnbalanced> for RewardRemainderMock { fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { - REWARD_REMAINDER_UNBALANCED.with(|v| { - *v.borrow_mut() += amount.peek(); + RewardRemainderUnbalanced::mutate(|v| { + *v += amount.peek(); }); drop(amount); } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 448fbd2382454..969ff8e4e2881 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -301,10 +301,7 @@ fn rewards_should_work() { start_session(3); assert_eq!(active_era(), 1); - assert_eq!( - mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), - maximum_payout - total_payout_0, - ); + assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - total_payout_0,); assert_eq!( *mock::staking_events().last().unwrap(), Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) @@ -340,7 +337,7 @@ fn rewards_should_work() { mock::start_active_era(2); assert_eq!( - mock::REWARD_REMAINDER_UNBALANCED.with(|v| *v.borrow()), + mock::RewardRemainderUnbalanced::get(), maximum_payout * 2 - total_payout_0 - total_payout_1, ); assert_eq!( diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 3b2a8b3b62fc2..55ded6da9c5a1 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -446,6 +446,21 @@ macro_rules! parameter_types_impl_thread_local { pub fn set(t: $type) { [<$name:snake:upper>].with(|v| *v.borrow_mut() = t); } + + /// Mutate the internal value in place. + pub fn mutate R>(mutate: F) -> R{ + let mut current = Self::get(); + let result = mutate(&mut current); + Self::set(current); + result + } + + /// Get current value and replace with initial value of the parameter type. + pub fn take() -> $type { + let current = Self::get(); + Self::set($value); + current + } } )* } diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index aed98579a0fd8..04b3dbcedcbf2 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -22,7 +22,10 @@ #![recursion_limit = "128"] use codec::MaxEncodedLen; -use frame_support::traits::{CrateVersion, PalletInfo as _}; +use frame_support::{ + parameter_types, + traits::{CrateVersion, PalletInfo as _}, +}; use scale_info::TypeInfo; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -30,14 +33,13 @@ use sp_runtime::{ traits::{BlakeTwo256, Verify}, DispatchError, ModuleError, }; -use sp_std::cell::RefCell; mod system; pub trait Currency {} -thread_local! { - pub static INTEGRITY_TEST_EXEC: RefCell = RefCell::new(0); +parameter_types! { + pub static IntegrityTestExec: u32 = 0; } mod module1 { @@ -95,7 +97,7 @@ mod module2 { } fn integrity_test() { - INTEGRITY_TEST_EXEC.with(|i| *i.borrow_mut() += 1); + IntegrityTestExec::mutate(|i| *i += 1); } } } @@ -140,7 +142,7 @@ mod nested { } fn integrity_test() { - INTEGRITY_TEST_EXEC.with(|i| *i.borrow_mut() += 1); + IntegrityTestExec::mutate(|i| *i += 1); } } } @@ -377,7 +379,7 @@ fn check_modules_error_type() { #[test] fn integrity_test_works() { __construct_runtime_integrity_test::runtime_integrity_tests(); - assert_eq!(INTEGRITY_TEST_EXEC.with(|i| *i.borrow()), 2); + assert_eq!(IntegrityTestExec::get(), 2); } #[test] diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 23ab3c2af20b0..09a018e2be70e 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -26,7 +26,6 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; -use sp_std::cell::RefCell; type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; type Block = mocking::MockBlock; @@ -79,14 +78,14 @@ parameter_types! { limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } -thread_local! { - pub static KILLED: RefCell> = RefCell::new(vec![]); +parameter_types! { + pub static Killed: Vec = vec![]; } pub struct RecordKilled; impl OnKilledAccount for RecordKilled { fn on_killed_account(who: &u64) { - KILLED.with(|r| r.borrow_mut().push(*who)) + Killed::mutate(|r| r.push(*who)) } } diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index f82ea338bd146..641cda13b07a4 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -55,9 +55,9 @@ fn stored_map_works() { System::dec_consumers(&0); assert!(!System::is_provider_required(&0)); - assert!(KILLED.with(|r| r.borrow().is_empty())); + assert!(Killed::get().is_empty()); assert_ok!(System::remove(&0)); - assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); + assert_eq!(Killed::get(), vec![0u64]); }); } diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index b4c377cfa30ef..1ea553aa548ec 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -19,7 +19,6 @@ use super::*; use crate as pallet_timestamp; -use sp_std::cell::RefCell; use frame_support::{ parameter_types, @@ -78,14 +77,14 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -thread_local! { - pub static CAPTURED_MOMENT: RefCell> = RefCell::new(None); +parameter_types! { + pub static CapturedMoment: Option = None; } pub struct MockOnTimestampSet; impl OnTimestampSet for MockOnTimestampSet { fn on_timestamp_set(moment: Moment) { - CAPTURED_MOMENT.with(|x| *x.borrow_mut() = Some(moment)); + CapturedMoment::mutate(|x| *x = Some(moment)); } } @@ -97,11 +96,11 @@ impl Config for Test { } pub(crate) fn clear_captured_moment() { - CAPTURED_MOMENT.with(|x| *x.borrow_mut() = None); + CapturedMoment::mutate(|x| *x = None); } pub(crate) fn get_captured_moment() -> Option { - CAPTURED_MOMENT.with(|x| *x.borrow()) + CapturedMoment::get() } pub(crate) fn new_test_ext() -> TestExternalities { diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index bcaa99285d2e7..b12885fc59f16 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -19,8 +19,6 @@ #![cfg(test)] -use std::cell::RefCell; - use sp_core::H256; use sp_runtime::{ testing::Header, @@ -100,18 +98,17 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); } -thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +parameter_types! { + static TenToFourteenTestValue: Vec = vec![10,11,12,13,14]; } pub struct TenToFourteen; impl SortedMembers for TenToFourteen { fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| v.borrow().clone()) + TenToFourteenTestValue::get().clone() } #[cfg(feature = "runtime-benchmarks")] fn add(new: &u128) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); + TenToFourteenTestValue::mutate(|members| { members.push(*new); members.sort(); }) @@ -119,7 +116,7 @@ impl SortedMembers for TenToFourteen { } impl ContainsLengthBound for TenToFourteen { fn max_len() -> usize { - TEN_TO_FOURTEEN.with(|v| v.borrow().len()) + TenToFourteenTestValue::get().len() } fn min_len() -> usize { 0 diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index a17d564d7a334..dd7c086a82469 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -33,7 +33,6 @@ use sp_runtime::{ testing::Header, traits::{BlakeTwo256, ConvertInto, IdentityLookup, SaturatedConversion, StaticLookup}, }; -use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -58,8 +57,8 @@ frame_support::construct_runtime!( const CALL: &::Call = &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); -thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::zero()); +parameter_types! { + static ExtrinsicBaseWeight: Weight = Weight::zero(); } pub struct BlockWeights; @@ -68,7 +67,7 @@ impl Get for BlockWeights { frame_system::limits::BlockWeights::builder() .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = Weight::from_ref_time(1024).into(); @@ -235,7 +234,7 @@ impl ExtBuilder { self } fn set_constants(&self) { - EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); + ExtrinsicBaseWeight::mutate(|v| *v = self.base_weight); TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 21d516cda1aa6..cbdc9dcb1e846 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -810,8 +810,6 @@ mod tests { use super::*; use crate as pallet_transaction_payment; - use std::cell::RefCell; - use codec::Encode; use sp_core::H256; @@ -850,8 +848,8 @@ mod tests { const CALL: &::Call = &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); - thread_local! { - static EXTRINSIC_BASE_WEIGHT: RefCell = RefCell::new(Weight::zero()); + parameter_types! { + static ExtrinsicBaseWeight: Weight = Weight::zero(); } pub struct BlockWeights; @@ -860,7 +858,7 @@ mod tests { frame_system::limits::BlockWeights::builder() .base_block(Weight::zero()) .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow()).into(); + weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { weights.max_total = Weight::from_ref_time(1024).into(); @@ -932,9 +930,9 @@ mod tests { } } - thread_local! { - static TIP_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); - static FEE_UNBALANCED_AMOUNT: RefCell = RefCell::new(0); + parameter_types! { + static TipUnbalancedAmount: u64 = 0; + static FeeUnbalancedAmount: u64 = 0; } pub struct DealWithFees; @@ -943,9 +941,9 @@ mod tests { mut fees_then_tips: impl Iterator>, ) { if let Some(fees) = fees_then_tips.next() { - FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += fees.peek()); + FeeUnbalancedAmount::mutate(|a| *a += fees.peek()); if let Some(tips) = fees_then_tips.next() { - TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += tips.peek()); + TipUnbalancedAmount::mutate(|a| *a += tips.peek()); } } } @@ -1002,7 +1000,7 @@ mod tests { self } fn set_constants(&self) { - EXTRINSIC_BASE_WEIGHT.with(|v| *v.borrow_mut() = self.base_weight); + ExtrinsicBaseWeight::mutate(|v| *v = self.base_weight); TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); } @@ -1074,10 +1072,10 @@ mod tests { &Ok(()) )); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 5 + 10); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 0); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10); + assert_eq!(TipUnbalancedAmount::get(), 0); - FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); + FeeUnbalancedAmount::mutate(|a| *a = 0); let pre = ChargeTransactionPayment::::from(5 /* tipped */) .pre_dispatch(&2, CALL, &info_from_weight(Weight::from_ref_time(100)), len) @@ -1092,8 +1090,8 @@ mod tests { &Ok(()) )); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5 + 10 + 50); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| *a.borrow()), 5); + assert_eq!(FeeUnbalancedAmount::get(), 5 + 10 + 50); + assert_eq!(TipUnbalancedAmount::get(), 5); }); } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index bec96daf576e3..c9c6d01f129e6 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -19,8 +19,6 @@ #![cfg(test)] -use std::cell::RefCell; - use sp_core::H256; use sp_runtime::{ testing::Header, @@ -94,9 +92,6 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); } -thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); -} parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); pub const Burn: Permill = Permill::from_percent(50); From cd66db5d1867fc37247d6622d893ac05557bc9bb Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 8 Sep 2022 15:35:44 +0300 Subject: [PATCH 110/348] Fully remove `rusty-cachier` from the `node-bench-regression-guard` job (#11904) Co-authored-by: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> --- scripts/ci/gitlab/pipeline/test.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index acd2cb1fa7855..8036e2cec26bb 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -110,6 +110,8 @@ node-bench-regression-guard: - job: cargo-check-benches artifacts: true # polls artifact from master to compare with current result + # need to specify both parallel jobs from master because of the bug + # https://gitlab.com/gitlab-org/gitlab/-/issues/39063 - project: $CI_PROJECT_PATH job: "cargo-check-benches 1/2" ref: master @@ -127,6 +129,7 @@ node-bench-regression-guard: - echo "In case of this job failure, check your pipeline's cargo-check-benches" - 'node-bench-regression-guard --reference artifacts/benches/master-* --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' + after_script: [""] cargo-check-subkey: stage: test From b8c3d8cc2301a46070ddc9dbd90c135f9db5cd0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 8 Sep 2022 13:48:19 +0100 Subject: [PATCH 111/348] Upgrade wasm crate dependencies (#12173) * Upgrade wasm crate dependencies * New wasmi version changed error output a bit * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts Co-authored-by: command-bot <> --- Cargo.lock | 69 +- bin/node/cli/Cargo.toml | 1 - bin/node/executor/Cargo.toml | 1 - client/executor/Cargo.toml | 3 +- client/executor/common/Cargo.toml | 4 +- .../common/src/sandbox/wasmi_backend.rs | 17 +- client/executor/src/integration_tests/mod.rs | 6 +- client/executor/wasmi/Cargo.toml | 2 +- client/executor/wasmi/src/lib.rs | 2 + client/executor/wasmtime/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 4 +- frame/contracts/src/benchmarking/code.rs | 10 +- frame/contracts/src/wasm/prepare.rs | 2 +- frame/contracts/src/weights.rs | 1250 ++++++++--------- primitives/core/Cargo.toml | 2 +- primitives/sandbox/Cargo.toml | 10 +- primitives/sandbox/src/embedded_executor.rs | 4 +- primitives/version/Cargo.toml | 2 +- primitives/wasm-interface/Cargo.toml | 2 +- 19 files changed, 708 insertions(+), 685 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 223a14efd73b7..38439947258a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4360,9 +4360,9 @@ dependencies = [ [[package]] name = "memory_units" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" +checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" [[package]] name = "merlin" @@ -4991,6 +4991,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-complex" version = "0.4.0" @@ -5027,7 +5038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", - "num-bigint", + "num-bigint 0.2.6", "num-integer", "num-traits", ] @@ -5039,6 +5050,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" dependencies = [ "autocfg", + "num-bigint 0.4.3", "num-integer", "num-traits", ] @@ -6617,9 +6629,9 @@ dependencies = [ [[package]] name = "parity-wasm" -version = "0.42.2" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" +checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" @@ -7980,7 +7992,7 @@ dependencies = [ "futures", "log", "merlin", - "num-bigint", + "num-bigint 0.2.6", "num-rational 0.2.4", "num-traits", "parity-scale-codec", @@ -8236,7 +8248,7 @@ dependencies = [ "log", "once_cell", "parity-scale-codec", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", "paste 1.0.6", "rustix", "sc-allocator", @@ -9486,7 +9498,7 @@ name = "sp-arithmetic-fuzzer" version = "2.0.0" dependencies = [ "honggfuzz", - "num-bigint", + "num-bigint 0.2.6", "primitive-types", "sp-arithmetic", ] @@ -10194,7 +10206,7 @@ version = "5.0.0" dependencies = [ "impl-serde", "parity-scale-codec", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", "scale-info", "serde", "sp-core-hashing-proc-macro", @@ -11415,11 +11427,11 @@ dependencies = [ [[package]] name = "wasm-instrument" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "962e5b0401bbb6c887f54e69b8c496ea36f704df65db73e81fd5ff8dc3e63a9f" +checksum = "8bca81f5279342b38b17d9acbf007a46ddeb73144e2bd5f0a21bfa9fc5d4ab3e" dependencies = [ - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", ] [[package]] @@ -11652,28 +11664,35 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.9.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" +checksum = "fc13b3c219ca9aafeec59150d80d89851df02e0061bc357b4d66fc55a8d38787" dependencies = [ - "downcast-rs", - "errno", - "libc", - "libm", - "memory_units", - "num-rational 0.2.4", - "num-traits", - "parity-wasm 0.42.2", + "parity-wasm 0.45.0", "wasmi-validation", + "wasmi_core", ] [[package]] name = "wasmi-validation" -version = "0.4.0" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" +dependencies = [ + "parity-wasm 0.45.0", +] + +[[package]] +name = "wasmi_core" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" +checksum = "e0a088e8c4c59c6f2b9eae169bf86328adccc477c00b56d3661e3e9fb397b184" dependencies = [ - "parity-wasm 0.42.2", + "downcast-rs", + "libm", + "memory_units", + "num-rational 0.4.0", + "num-traits", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d84b8491cb162..247a61d835dc3 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -149,7 +149,6 @@ pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } [features] default = ["cli"] cli = [ - "node-executor/wasmi-errno", "node-inspect", "sc-cli", "frame-benchmarking-cli", diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 71865783da8ac..651e4657dde32 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -46,7 +46,6 @@ sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [features] wasmtime = ["sc-executor/wasmtime"] -wasmi-errno = ["sc-executor/wasmi-errno"] stress-test = [] [[bench]] diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 6a4b2a44a2d44..2fe7e822b36c3 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -18,7 +18,7 @@ lazy_static = "1.4.0" lru = "0.7.5" parking_lot = "0.12.1" tracing = "0.1.29" -wasmi = "0.9.1" +wasmi = "0.13" codec = { package = "parity-scale-codec", version = "3.0.0" } sc-executor-common = { version = "0.10.0-dev", path = "common" } @@ -63,5 +63,4 @@ default = ["std"] std = [] wasm-extern-trace = [] wasmtime = ["sc-executor-wasmtime"] -wasmi-errno = ["wasmi/errno"] wasmer-sandbox = ["sc-executor-common/wasmer-sandbox"] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index b16ed7e8552ce..1af75c7e31610 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } environmental = "1.1.3" thiserror = "1.0.30" -wasm-instrument = "0.1" +wasm-instrument = "0.2" wasmer = { version = "2.2", features = ["singlepass"], optional = true } -wasmi = "0.9.1" +wasmi = "0.13" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../../primitives/maybe-compressed-blob" } sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } diff --git a/client/executor/common/src/sandbox/wasmi_backend.rs b/client/executor/common/src/sandbox/wasmi_backend.rs index 03fa5dc06dea8..2ba133f5f15b1 100644 --- a/client/executor/common/src/sandbox/wasmi_backend.rs +++ b/client/executor/common/src/sandbox/wasmi_backend.rs @@ -18,14 +18,14 @@ //! Wasmi specific impls for sandbox -use std::rc::Rc; +use std::{fmt, rc::Rc}; use codec::{Decode, Encode}; use sp_sandbox::HostError; use sp_wasm_interface::{FunctionContext, Pointer, ReturnValue, Value, WordSize}; use wasmi::{ memory_units::Pages, ImportResolver, MemoryInstance, Module, ModuleInstance, RuntimeArgs, - RuntimeValue, Trap, TrapKind, + RuntimeValue, Trap, }; use crate::{ @@ -39,9 +39,20 @@ use crate::{ environmental::environmental!(SandboxContextStore: trait SandboxContext); +#[derive(Debug)] +struct CustomHostError(String); + +impl fmt::Display for CustomHostError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "HostError: {}", self.0) + } +} + +impl wasmi::HostError for CustomHostError {} + /// Construct trap error from specified message fn trap(msg: &'static str) -> Trap { - TrapKind::Host(Box::new(Error::Other(msg.into()))).into() + Trap::host(CustomHostError(msg.into())) } impl ImportResolver for Imports { diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index ba0c93630cf6c..25280f856f2a5 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -253,7 +253,7 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_calling_missing_external", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Trap: Host(Other(\"Function `missing_external` is only a stub. Calling a stub is not allowed.\"))", + WasmExecutionMethod::Interpreted => "Other: Function `missing_external` is only a stub. Calling a stub is not allowed.", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "call to a missing function env:missing_external" }; @@ -273,7 +273,7 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Trap: Host(Other(\"Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.\"))", + WasmExecutionMethod::Interpreted => "Other: Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "call to a missing function env:yet_another_missing_external" }; @@ -909,7 +909,7 @@ fn unreachable_intrinsic(wasm_method: WasmExecutionMethod) { match call_in_wasm("test_unreachable_intrinsic", &[], wasm_method, &mut ext).unwrap_err() { Error::AbortedDueToTrap(error) => { let expected = match wasm_method { - WasmExecutionMethod::Interpreted => "Trap: Unreachable", + WasmExecutionMethod::Interpreted => "unreachable", #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled { .. } => "wasm trap: wasm `unreachable` instruction executed", }; diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 46bacf54a42c6..879af677ca042 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } log = "0.4.17" -wasmi = "0.9.1" +wasmi = "0.13" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime-interface" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index e17707e158321..1284cc23e4c96 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -181,6 +181,7 @@ impl Sandbox for FunctionExecutor { let len = val_len as usize; + #[allow(deprecated)] let buffer = match self.memory.get(val_ptr.into(), len) { Err(_) => return Ok(sandbox_env::ERR_OUT_OF_BOUNDS), Ok(buffer) => buffer, @@ -568,6 +569,7 @@ fn call_in_wasm_module( match result { Ok(Some(I64(r))) => { let (ptr, length) = unpack_ptr_and_len(r as u64); + #[allow(deprecated)] memory.get(ptr, length as usize).map_err(|_| Error::Runtime) }, Err(e) => { diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index e4df2791c2ff8..94ad25bdc948c 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -17,7 +17,7 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0" } libc = "0.2.121" log = "0.4.17" -parity-wasm = "0.42.0" +parity-wasm = "0.45" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index ac85c469354fe..019f111302a55 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -20,12 +20,12 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } -wasm-instrument = { version = "0.1", default-features = false } +wasm-instrument = { version = "0.2", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ "const_generics", ] } -wasmi-validation = { version = "0.4", default-features = false } +wasmi-validation = { version = "0.5", default-features = false } impl-trait-for-tuples = "0.2" # Only used in benchmarking to generate random contract code diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 5f9b43d3e3b7a..32ee2dbf93914 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -195,7 +195,7 @@ where for func in def.imported_functions { let sig = builder::signature() .with_params(func.params) - .with_results(func.return_type.into_iter().collect()) + .with_results(func.return_type) .build_sig(); let sig = contract.push_signature(sig); contract = contract @@ -254,9 +254,9 @@ where code = inject_stack_metering::(code); } - let code = code.to_bytes().unwrap(); + let code = code.into_bytes().unwrap(); let hash = T::Hashing::hash(&code); - Self { code, hash, memory: def.memory } + Self { code: code.into(), hash, memory: def.memory } } } @@ -285,11 +285,11 @@ where .find_map(|e| if let External::Memory(mem) = e.external() { Some(mem) } else { None }) .unwrap() .limits(); - let code = module.to_bytes().unwrap(); + let code = module.into_bytes().unwrap(); let hash = T::Hashing::hash(&code); let memory = ImportedMemory { min_pages: limits.initial(), max_pages: limits.maximum().unwrap() }; - Self { code, hash, memory: Some(memory) } + Self { code: code.into(), hash, memory: Some(memory) } } /// Creates a wasm module with an empty `call` and `deploy` function and nothing else. diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 7b81c1c55b3bd..9c130b562c275 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -54,7 +54,7 @@ impl<'a, T: Config> ContractModule<'a, T> { elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?; // Make sure that the module is valid. - validate_module::(&module).map_err(|_| "Module is not valid")?; + validate_module::(&module, ()).map_err(|_| "Module is not valid")?; // Return a `ContractModule` instance with // __valid__ module. diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 6ec10b3590349..4e4a6b6e6a2b7 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -166,15 +166,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(2_985_000 as u64) + Weight::from_ref_time(3_089_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(14_335_000 as u64) + Weight::from_ref_time(13_917_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -182,9 +182,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(13_905_000 as u64) + Weight::from_ref_time(14_172_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_257_000 as u64).saturating_mul(q as u64)) + .saturating_add(Weight::from_ref_time(1_301_000 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -192,9 +192,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(26_333_000 as u64) + Weight::from_ref_time(21_644_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(44_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -205,9 +205,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(238_784_000 as u64) + Weight::from_ref_time(234_349_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -222,9 +222,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(325_497_000 as u64) + Weight::from_ref_time(294_077_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) @@ -239,7 +239,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(201_129_000 as u64) + Weight::from_ref_time(199_028_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) @@ -251,7 +251,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(176_645_000 as u64) + Weight::from_ref_time(176_247_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -261,9 +261,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(53_613_000 as u64) + Weight::from_ref_time(54_917_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -272,7 +272,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_441_000 as u64) + Weight::from_ref_time(37_611_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -280,7 +280,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_030_000 as u64) + Weight::from_ref_time(40_121_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -291,9 +291,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(241_426_000 as u64) - // Standard Error: 46_000 - .saturating_add(Weight::from_ref_time(37_834_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_129_000 as u64) + // Standard Error: 54_000 + .saturating_add(Weight::from_ref_time(35_413_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -304,9 +304,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(181_406_000 as u64) - // Standard Error: 474_000 - .saturating_add(Weight::from_ref_time(206_995_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(189_418_000 as u64) + // Standard Error: 419_000 + .saturating_add(Weight::from_ref_time(207_107_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -318,9 +318,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(198_301_000 as u64) - // Standard Error: 432_000 - .saturating_add(Weight::from_ref_time(264_877_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(203_928_000 as u64) + // Standard Error: 439_000 + .saturating_add(Weight::from_ref_time(268_983_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -332,9 +332,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(243_754_000 as u64) - // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(41_219_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(243_800_000 as u64) + // Standard Error: 40_000 + .saturating_add(Weight::from_ref_time(38_797_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -345,9 +345,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(239_352_000 as u64) - // Standard Error: 33_000 - .saturating_add(Weight::from_ref_time(15_228_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_667_000 as u64) + // Standard Error: 27_000 + .saturating_add(Weight::from_ref_time(15_826_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -358,9 +358,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(240_637_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(37_717_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_116_000 as u64) + // Standard Error: 41_000 + .saturating_add(Weight::from_ref_time(35_402_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -371,9 +371,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(241_127_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(37_481_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_515_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_144_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -384,9 +384,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(246_236_000 as u64) - // Standard Error: 97_000 - .saturating_add(Weight::from_ref_time(110_095_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_087_000 as u64) + // Standard Error: 87_000 + .saturating_add(Weight::from_ref_time(110_236_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -397,9 +397,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(241_552_000 as u64) - // Standard Error: 38_000 - .saturating_add(Weight::from_ref_time(37_189_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_774_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_216_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -410,9 +410,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(239_622_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(37_630_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_146_000 as u64) + // Standard Error: 54_000 + .saturating_add(Weight::from_ref_time(35_101_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -423,9 +423,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(240_284_000 as u64) - // Standard Error: 49_000 - .saturating_add(Weight::from_ref_time(37_375_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_096_000 as u64) + // Standard Error: 55_000 + .saturating_add(Weight::from_ref_time(34_612_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -436,9 +436,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(241_463_000 as u64) - // Standard Error: 43_000 - .saturating_add(Weight::from_ref_time(37_313_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_978_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(34_780_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -450,9 +450,9 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(245_287_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(99_890_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_175_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(99_827_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -463,9 +463,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(168_156_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(17_991_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(168_655_000 as u64) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(15_635_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -476,9 +476,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(240_833_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_622_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_729_000 as u64) + // Standard Error: 52_000 + .saturating_add(Weight::from_ref_time(33_477_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -489,9 +489,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(330_765_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(8_609_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(296_718_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(9_616_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -502,9 +502,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(237_729_000 as u64) - // Standard Error: 488_000 - .saturating_add(Weight::from_ref_time(2_053_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(237_666_000 as u64) + // Standard Error: 497_000 + .saturating_add(Weight::from_ref_time(2_090_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -515,9 +515,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(240_005_000 as u64) + Weight::from_ref_time(239_842_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(169_000 as u64).saturating_mul(n as u64)) + .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -530,9 +530,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(240_524_000 as u64) - // Standard Error: 426_000 - .saturating_add(Weight::from_ref_time(51_863_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_563_000 as u64) + // Standard Error: 519_000 + .saturating_add(Weight::from_ref_time(52_855_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -546,9 +546,9 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(250_710_000 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(132_741_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_136_000 as u64) + // Standard Error: 94_000 + .saturating_add(Weight::from_ref_time(137_406_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -559,9 +559,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(245_816_000 as u64) - // Standard Error: 115_000 - .saturating_add(Weight::from_ref_time(235_313_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(253_433_000 as u64) + // Standard Error: 105_000 + .saturating_add(Weight::from_ref_time(242_337_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -573,11 +573,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(469_886_000 as u64) - // Standard Error: 463_000 - .saturating_add(Weight::from_ref_time(177_286_000 as u64).saturating_mul(t as u64)) - // Standard Error: 127_000 - .saturating_add(Weight::from_ref_time(64_646_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(478_106_000 as u64) + // Standard Error: 557_000 + .saturating_add(Weight::from_ref_time(176_325_000 as u64).saturating_mul(t as u64)) + // Standard Error: 153_000 + .saturating_add(Weight::from_ref_time(67_413_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -590,18 +590,18 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(174_414_000 as u64) - // Standard Error: 26_000 - .saturating_add(Weight::from_ref_time(29_037_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(172_751_000 as u64) + // Standard Error: 37_000 + .saturating_add(Weight::from_ref_time(26_536_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(196_960_000 as u64) - // Standard Error: 414_000 - .saturating_add(Weight::from_ref_time(412_810_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(196_276_000 as u64) + // Standard Error: 428_000 + .saturating_add(Weight::from_ref_time(416_783_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -610,9 +610,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_493_000 as u64) - // Standard Error: 1_251_000 - .saturating_add(Weight::from_ref_time(88_835_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(532_439_000 as u64) + // Standard Error: 1_323_000 + .saturating_add(Weight::from_ref_time(93_843_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(50 as u64)) @@ -621,9 +621,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(512_531_000 as u64) - // Standard Error: 1_107_000 - .saturating_add(Weight::from_ref_time(67_926_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(511_358_000 as u64) + // Standard Error: 1_144_000 + .saturating_add(Weight::from_ref_time(68_754_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(50 as u64)) @@ -632,9 +632,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(208_848_000 as u64) - // Standard Error: 398_000 - .saturating_add(Weight::from_ref_time(400_478_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(204_133_000 as u64) + // Standard Error: 498_000 + .saturating_add(Weight::from_ref_time(406_798_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -643,9 +643,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(488_903_000 as u64) - // Standard Error: 1_226_000 - .saturating_add(Weight::from_ref_time(69_519_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(489_339_000 as u64) + // Standard Error: 1_269_000 + .saturating_add(Weight::from_ref_time(70_700_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(49 as u64)) @@ -654,9 +654,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(211_825_000 as u64) - // Standard Error: 401_000 - .saturating_add(Weight::from_ref_time(328_607_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(211_344_000 as u64) + // Standard Error: 399_000 + .saturating_add(Weight::from_ref_time(330_244_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -664,9 +664,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(452_324_000 as u64) - // Standard Error: 1_026_000 - .saturating_add(Weight::from_ref_time(150_657_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(449_353_000 as u64) + // Standard Error: 1_027_000 + .saturating_add(Weight::from_ref_time(153_022_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -674,9 +674,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(218_730_000 as u64) - // Standard Error: 318_000 - .saturating_add(Weight::from_ref_time(302_022_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(216_197_000 as u64) + // Standard Error: 341_000 + .saturating_add(Weight::from_ref_time(305_401_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -684,9 +684,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(425_128_000 as u64) - // Standard Error: 875_000 - .saturating_add(Weight::from_ref_time(60_095_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(423_033_000 as u64) + // Standard Error: 878_000 + .saturating_add(Weight::from_ref_time(61_940_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -694,9 +694,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(208_039_000 as u64) - // Standard Error: 445_000 - .saturating_add(Weight::from_ref_time(425_933_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(204_244_000 as u64) + // Standard Error: 448_000 + .saturating_add(Weight::from_ref_time(429_399_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -705,9 +705,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(518_112_000 as u64) - // Standard Error: 1_406_000 - .saturating_add(Weight::from_ref_time(159_006_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(516_945_000 as u64) + // Standard Error: 1_412_000 + .saturating_add(Weight::from_ref_time(162_098_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(49 as u64)) @@ -720,9 +720,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(169_573_000 as u64) - // Standard Error: 701_000 - .saturating_add(Weight::from_ref_time(1_345_943_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(170_412_000 as u64) + // Standard Error: 761_000 + .saturating_add(Weight::from_ref_time(1_367_307_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -736,8 +736,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_472_000 - .saturating_add(Weight::from_ref_time(17_569_902_000 as u64).saturating_mul(r as u64)) + // Standard Error: 9_269_000 + .saturating_add(Weight::from_ref_time(17_505_281_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -751,8 +751,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 7_932_000 - .saturating_add(Weight::from_ref_time(17_368_706_000 as u64).saturating_mul(r as u64)) + // Standard Error: 8_780_000 + .saturating_add(Weight::from_ref_time(17_368_867_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } @@ -764,11 +764,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_036_172_000 as u64) - // Standard Error: 5_683_000 - .saturating_add(Weight::from_ref_time(1_154_945_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(8_670_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_076_579_000 as u64) + // Standard Error: 6_568_000 + .saturating_add(Weight::from_ref_time(1_158_818_000 as u64).saturating_mul(t as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(9_731_000 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(163 as u64)) @@ -784,8 +784,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 25_721_000 - .saturating_add(Weight::from_ref_time(22_827_927_000 as u64).saturating_mul(r as u64)) + // Standard Error: 24_125_000 + .saturating_add(Weight::from_ref_time(22_830_521_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(5 as u64)) @@ -801,9 +801,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_991_911_000 as u64) - // Standard Error: 70_000 - .saturating_add(Weight::from_ref_time(124_789_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_739_440_000 as u64) + // Standard Error: 79_000 + .saturating_add(Weight::from_ref_time(126_148_000 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(247 as u64)) @@ -816,9 +816,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(244_026_000 as u64) - // Standard Error: 69_000 - .saturating_add(Weight::from_ref_time(58_077_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_753_000 as u64) + // Standard Error: 60_000 + .saturating_add(Weight::from_ref_time(55_067_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -830,8 +830,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 104_000 - .saturating_add(Weight::from_ref_time(320_432_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(320_367_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -842,9 +842,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_548_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(70_774_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_849_000 as u64) + // Standard Error: 80_000 + .saturating_add(Weight::from_ref_time(67_626_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -856,8 +856,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 152_000 - .saturating_add(Weight::from_ref_time(246_268_000 as u64).saturating_mul(n as u64)) + // Standard Error: 106_000 + .saturating_add(Weight::from_ref_time(247_771_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -868,9 +868,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_368_000 as u64) - // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(48_368_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_162_000 as u64) + // Standard Error: 58_000 + .saturating_add(Weight::from_ref_time(45_169_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -882,8 +882,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 94_000 - .saturating_add(Weight::from_ref_time(95_913_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(97_479_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -894,9 +894,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(239_100_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(48_043_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_072_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(44_847_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -908,8 +908,8 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 93_000 - .saturating_add(Weight::from_ref_time(96_000_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(97_432_000 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -920,9 +920,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(375_902_000 as u64) - // Standard Error: 613_000 - .saturating_add(Weight::from_ref_time(2_972_514_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(374_614_000 as u64) + // Standard Error: 634_000 + .saturating_add(Weight::from_ref_time(2_968_637_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -933,9 +933,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(260_758_000 as u64) - // Standard Error: 396_000 - .saturating_add(Weight::from_ref_time(2_069_937_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_022_000 as u64) + // Standard Error: 408_000 + .saturating_add(Weight::from_ref_time(2_062_013_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -948,316 +948,316 @@ impl WeightInfo for SubstrateWeight { /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_629_000 - .saturating_add(Weight::from_ref_time(1_093_632_000 as u64).saturating_mul(r as u64)) + // Standard Error: 1_536_000 + .saturating_add(Weight::from_ref_time(1_099_219_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_130_000 as u64) + Weight::from_ref_time(70_276_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(798_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(933_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_439_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_717_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_309_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(2_977_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(70_647_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_112_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_165_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_686_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(70_045_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_228_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_872_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_374_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(71_013_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_284_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_891_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_629_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_846_000 as u64) + Weight::from_ref_time(69_747_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_278_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_639_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_471_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_774_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_262_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_142_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(68_967_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_984_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_808_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_342_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_756_000 as u64) + Weight::from_ref_time(73_245_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(70_426_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(7_095_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_308_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(7_333_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(85_090_000 as u64) - // Standard Error: 34_000 - .saturating_add(Weight::from_ref_time(8_897_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(83_967_000 as u64) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(9_205_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(94_306_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(585_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_600_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(546_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_494_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(857_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_449_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_052_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_467_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_326_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(998_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_451_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_196_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_525_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_467_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_857_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_442_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(73_703_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_495_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_589_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_682_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(73_578_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_546_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_230_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(820_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_379_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(934_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(71_027_000 as u64) - // Standard Error: 296_000 - .saturating_add(Weight::from_ref_time(177_139_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_069_000 as u64) + // Standard Error: 114_000 + .saturating_add(Weight::from_ref_time(182_540_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_270_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_250_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_188_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(70_088_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_970_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_366_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(69_538_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_283_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_352_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(69_842_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_266_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_229_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_354_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(69_754_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_230_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_202_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_842_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_193_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_065_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_670_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_252_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_728_000 as u64) + Weight::from_ref_time(70_049_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_770_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_823_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_522_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_767_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_519_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_815_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(71_224_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(1_752_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_953_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_834_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(70_625_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_763_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_299_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_818_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(71_816_000 as u64) - // Standard Error: 31_000 - .saturating_add(Weight::from_ref_time(1_852_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_141_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(70_019_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_780_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_209_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_827_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_881_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_980_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(70_021_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_766_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_022_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(72_156_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(1_712_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_030_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(70_004_000 as u64) + Weight::from_ref_time(70_170_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_833_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(70_028_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_895_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_159_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_813_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_932_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_830_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(70_033_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_760_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_091_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_702_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(2_512_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_025_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_556_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(68_933_000 as u64) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(2_501_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_910_000 as u64) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(2_290_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(69_447_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_555_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_268_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_550_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_872_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(2_459_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_126_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_340_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_867_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_381_000 as u64) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_982_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_095_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(69_822_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_779_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_471_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_511_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_806_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_302_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_956_000 as u64) + Weight::from_ref_time(70_097_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_788_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_069_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(1_789_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_166_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_845_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_719_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_792_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_630_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_879_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_709_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_800_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_101_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_861_000 as u64).saturating_mul(r as u64)) } } @@ -1265,15 +1265,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(2_985_000 as u64) + Weight::from_ref_time(3_089_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(14_335_000 as u64) + Weight::from_ref_time(13_917_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(891_000 as u64).saturating_mul(k as u64)) + .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -1281,9 +1281,9 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(13_905_000 as u64) + Weight::from_ref_time(14_172_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_257_000 as u64).saturating_mul(q as u64)) + .saturating_add(Weight::from_ref_time(1_301_000 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1291,9 +1291,9 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(26_333_000 as u64) + Weight::from_ref_time(21_644_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(44_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1304,9 +1304,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(238_784_000 as u64) + Weight::from_ref_time(234_349_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1321,9 +1321,9 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(325_497_000 as u64) + Weight::from_ref_time(294_077_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) @@ -1338,7 +1338,7 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(201_129_000 as u64) + Weight::from_ref_time(199_028_000 as u64) // Standard Error: 0 .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) @@ -1350,7 +1350,7 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(176_645_000 as u64) + Weight::from_ref_time(176_247_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1360,9 +1360,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(53_613_000 as u64) + Weight::from_ref_time(54_917_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1371,7 +1371,7 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_441_000 as u64) + Weight::from_ref_time(37_611_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1379,7 +1379,7 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_030_000 as u64) + Weight::from_ref_time(40_121_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -1390,9 +1390,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(241_426_000 as u64) - // Standard Error: 46_000 - .saturating_add(Weight::from_ref_time(37_834_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_129_000 as u64) + // Standard Error: 54_000 + .saturating_add(Weight::from_ref_time(35_413_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1403,9 +1403,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(181_406_000 as u64) - // Standard Error: 474_000 - .saturating_add(Weight::from_ref_time(206_995_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(189_418_000 as u64) + // Standard Error: 419_000 + .saturating_add(Weight::from_ref_time(207_107_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1417,9 +1417,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(198_301_000 as u64) - // Standard Error: 432_000 - .saturating_add(Weight::from_ref_time(264_877_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(203_928_000 as u64) + // Standard Error: 439_000 + .saturating_add(Weight::from_ref_time(268_983_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1431,9 +1431,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(243_754_000 as u64) - // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(41_219_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(243_800_000 as u64) + // Standard Error: 40_000 + .saturating_add(Weight::from_ref_time(38_797_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1444,9 +1444,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(239_352_000 as u64) - // Standard Error: 33_000 - .saturating_add(Weight::from_ref_time(15_228_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_667_000 as u64) + // Standard Error: 27_000 + .saturating_add(Weight::from_ref_time(15_826_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1457,9 +1457,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(240_637_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(37_717_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_116_000 as u64) + // Standard Error: 41_000 + .saturating_add(Weight::from_ref_time(35_402_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1470,9 +1470,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(241_127_000 as u64) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(37_481_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_515_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_144_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1483,9 +1483,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(246_236_000 as u64) - // Standard Error: 97_000 - .saturating_add(Weight::from_ref_time(110_095_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_087_000 as u64) + // Standard Error: 87_000 + .saturating_add(Weight::from_ref_time(110_236_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1496,9 +1496,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(241_552_000 as u64) - // Standard Error: 38_000 - .saturating_add(Weight::from_ref_time(37_189_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_774_000 as u64) + // Standard Error: 50_000 + .saturating_add(Weight::from_ref_time(35_216_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1509,9 +1509,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(239_622_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(37_630_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_146_000 as u64) + // Standard Error: 54_000 + .saturating_add(Weight::from_ref_time(35_101_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1522,9 +1522,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(240_284_000 as u64) - // Standard Error: 49_000 - .saturating_add(Weight::from_ref_time(37_375_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_096_000 as u64) + // Standard Error: 55_000 + .saturating_add(Weight::from_ref_time(34_612_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1535,9 +1535,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(241_463_000 as u64) - // Standard Error: 43_000 - .saturating_add(Weight::from_ref_time(37_313_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_978_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(34_780_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1549,9 +1549,9 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(245_287_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(99_890_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_175_000 as u64) + // Standard Error: 86_000 + .saturating_add(Weight::from_ref_time(99_827_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1562,9 +1562,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(168_156_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(17_991_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(168_655_000 as u64) + // Standard Error: 16_000 + .saturating_add(Weight::from_ref_time(15_635_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1575,9 +1575,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(240_833_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_622_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_729_000 as u64) + // Standard Error: 52_000 + .saturating_add(Weight::from_ref_time(33_477_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1588,9 +1588,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(330_765_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(8_609_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(296_718_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(9_616_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1601,9 +1601,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(237_729_000 as u64) - // Standard Error: 488_000 - .saturating_add(Weight::from_ref_time(2_053_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(237_666_000 as u64) + // Standard Error: 497_000 + .saturating_add(Weight::from_ref_time(2_090_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1614,9 +1614,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(240_005_000 as u64) + Weight::from_ref_time(239_842_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(169_000 as u64).saturating_mul(n as u64)) + .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1629,9 +1629,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(240_524_000 as u64) - // Standard Error: 426_000 - .saturating_add(Weight::from_ref_time(51_863_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_563_000 as u64) + // Standard Error: 519_000 + .saturating_add(Weight::from_ref_time(52_855_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1645,9 +1645,9 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(250_710_000 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(132_741_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_136_000 as u64) + // Standard Error: 94_000 + .saturating_add(Weight::from_ref_time(137_406_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1658,9 +1658,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(245_816_000 as u64) - // Standard Error: 115_000 - .saturating_add(Weight::from_ref_time(235_313_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(253_433_000 as u64) + // Standard Error: 105_000 + .saturating_add(Weight::from_ref_time(242_337_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1672,11 +1672,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(469_886_000 as u64) - // Standard Error: 463_000 - .saturating_add(Weight::from_ref_time(177_286_000 as u64).saturating_mul(t as u64)) - // Standard Error: 127_000 - .saturating_add(Weight::from_ref_time(64_646_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(478_106_000 as u64) + // Standard Error: 557_000 + .saturating_add(Weight::from_ref_time(176_325_000 as u64).saturating_mul(t as u64)) + // Standard Error: 153_000 + .saturating_add(Weight::from_ref_time(67_413_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1689,18 +1689,18 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(174_414_000 as u64) - // Standard Error: 26_000 - .saturating_add(Weight::from_ref_time(29_037_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(172_751_000 as u64) + // Standard Error: 37_000 + .saturating_add(Weight::from_ref_time(26_536_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(196_960_000 as u64) - // Standard Error: 414_000 - .saturating_add(Weight::from_ref_time(412_810_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(196_276_000 as u64) + // Standard Error: 428_000 + .saturating_add(Weight::from_ref_time(416_783_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1709,9 +1709,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(529_493_000 as u64) - // Standard Error: 1_251_000 - .saturating_add(Weight::from_ref_time(88_835_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(532_439_000 as u64) + // Standard Error: 1_323_000 + .saturating_add(Weight::from_ref_time(93_843_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(50 as u64)) @@ -1720,9 +1720,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(512_531_000 as u64) - // Standard Error: 1_107_000 - .saturating_add(Weight::from_ref_time(67_926_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(511_358_000 as u64) + // Standard Error: 1_144_000 + .saturating_add(Weight::from_ref_time(68_754_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(50 as u64)) @@ -1731,9 +1731,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(208_848_000 as u64) - // Standard Error: 398_000 - .saturating_add(Weight::from_ref_time(400_478_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(204_133_000 as u64) + // Standard Error: 498_000 + .saturating_add(Weight::from_ref_time(406_798_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1742,9 +1742,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(488_903_000 as u64) - // Standard Error: 1_226_000 - .saturating_add(Weight::from_ref_time(69_519_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(489_339_000 as u64) + // Standard Error: 1_269_000 + .saturating_add(Weight::from_ref_time(70_700_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(49 as u64)) @@ -1753,9 +1753,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(211_825_000 as u64) - // Standard Error: 401_000 - .saturating_add(Weight::from_ref_time(328_607_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(211_344_000 as u64) + // Standard Error: 399_000 + .saturating_add(Weight::from_ref_time(330_244_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1763,9 +1763,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(452_324_000 as u64) - // Standard Error: 1_026_000 - .saturating_add(Weight::from_ref_time(150_657_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(449_353_000 as u64) + // Standard Error: 1_027_000 + .saturating_add(Weight::from_ref_time(153_022_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1773,9 +1773,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(218_730_000 as u64) - // Standard Error: 318_000 - .saturating_add(Weight::from_ref_time(302_022_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(216_197_000 as u64) + // Standard Error: 341_000 + .saturating_add(Weight::from_ref_time(305_401_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1783,9 +1783,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(425_128_000 as u64) - // Standard Error: 875_000 - .saturating_add(Weight::from_ref_time(60_095_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(423_033_000 as u64) + // Standard Error: 878_000 + .saturating_add(Weight::from_ref_time(61_940_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1793,9 +1793,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(208_039_000 as u64) - // Standard Error: 445_000 - .saturating_add(Weight::from_ref_time(425_933_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(204_244_000 as u64) + // Standard Error: 448_000 + .saturating_add(Weight::from_ref_time(429_399_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1804,9 +1804,9 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(518_112_000 as u64) - // Standard Error: 1_406_000 - .saturating_add(Weight::from_ref_time(159_006_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(516_945_000 as u64) + // Standard Error: 1_412_000 + .saturating_add(Weight::from_ref_time(162_098_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(49 as u64)) @@ -1819,9 +1819,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(169_573_000 as u64) - // Standard Error: 701_000 - .saturating_add(Weight::from_ref_time(1_345_943_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(170_412_000 as u64) + // Standard Error: 761_000 + .saturating_add(Weight::from_ref_time(1_367_307_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1835,8 +1835,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 8_472_000 - .saturating_add(Weight::from_ref_time(17_569_902_000 as u64).saturating_mul(r as u64)) + // Standard Error: 9_269_000 + .saturating_add(Weight::from_ref_time(17_505_281_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1850,8 +1850,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 7_932_000 - .saturating_add(Weight::from_ref_time(17_368_706_000 as u64).saturating_mul(r as u64)) + // Standard Error: 8_780_000 + .saturating_add(Weight::from_ref_time(17_368_867_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) } @@ -1863,11 +1863,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_036_172_000 as u64) - // Standard Error: 5_683_000 - .saturating_add(Weight::from_ref_time(1_154_945_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(8_670_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_076_579_000 as u64) + // Standard Error: 6_568_000 + .saturating_add(Weight::from_ref_time(1_158_818_000 as u64).saturating_mul(t as u64)) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(9_731_000 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(163 as u64)) @@ -1883,8 +1883,8 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 25_721_000 - .saturating_add(Weight::from_ref_time(22_827_927_000 as u64).saturating_mul(r as u64)) + // Standard Error: 24_125_000 + .saturating_add(Weight::from_ref_time(22_830_521_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(5 as u64)) @@ -1900,9 +1900,9 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_991_911_000 as u64) - // Standard Error: 70_000 - .saturating_add(Weight::from_ref_time(124_789_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_739_440_000 as u64) + // Standard Error: 79_000 + .saturating_add(Weight::from_ref_time(126_148_000 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(247 as u64)) @@ -1915,9 +1915,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(244_026_000 as u64) - // Standard Error: 69_000 - .saturating_add(Weight::from_ref_time(58_077_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(241_753_000 as u64) + // Standard Error: 60_000 + .saturating_add(Weight::from_ref_time(55_067_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1929,8 +1929,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 104_000 - .saturating_add(Weight::from_ref_time(320_432_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(320_367_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1941,9 +1941,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_548_000 as u64) - // Standard Error: 88_000 - .saturating_add(Weight::from_ref_time(70_774_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(239_849_000 as u64) + // Standard Error: 80_000 + .saturating_add(Weight::from_ref_time(67_626_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1955,8 +1955,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 152_000 - .saturating_add(Weight::from_ref_time(246_268_000 as u64).saturating_mul(n as u64)) + // Standard Error: 106_000 + .saturating_add(Weight::from_ref_time(247_771_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1967,9 +1967,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_368_000 as u64) - // Standard Error: 68_000 - .saturating_add(Weight::from_ref_time(48_368_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(242_162_000 as u64) + // Standard Error: 58_000 + .saturating_add(Weight::from_ref_time(45_169_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1981,8 +1981,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 94_000 - .saturating_add(Weight::from_ref_time(95_913_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(97_479_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1993,9 +1993,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(239_100_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(48_043_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(240_072_000 as u64) + // Standard Error: 53_000 + .saturating_add(Weight::from_ref_time(44_847_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2007,8 +2007,8 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 93_000 - .saturating_add(Weight::from_ref_time(96_000_000 as u64).saturating_mul(n as u64)) + // Standard Error: 95_000 + .saturating_add(Weight::from_ref_time(97_432_000 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2019,9 +2019,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(375_902_000 as u64) - // Standard Error: 613_000 - .saturating_add(Weight::from_ref_time(2_972_514_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(374_614_000 as u64) + // Standard Error: 634_000 + .saturating_add(Weight::from_ref_time(2_968_637_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2032,9 +2032,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(260_758_000 as u64) - // Standard Error: 396_000 - .saturating_add(Weight::from_ref_time(2_069_937_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_022_000 as u64) + // Standard Error: 408_000 + .saturating_add(Weight::from_ref_time(2_062_013_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2047,315 +2047,315 @@ impl WeightInfo for () { /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { Weight::from_ref_time(0 as u64) - // Standard Error: 1_629_000 - .saturating_add(Weight::from_ref_time(1_093_632_000 as u64).saturating_mul(r as u64)) + // Standard Error: 1_536_000 + .saturating_add(Weight::from_ref_time(1_099_219_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes((158 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_130_000 as u64) + Weight::from_ref_time(70_276_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(798_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(933_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_439_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_717_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_309_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(2_977_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(70_647_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_112_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_165_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_686_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(70_045_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_228_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_872_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(2_374_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(71_013_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_284_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_891_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_629_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_846_000 as u64) + Weight::from_ref_time(69_747_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_278_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_639_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_471_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_774_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_262_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_142_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(68_967_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_984_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(68_808_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(2_342_000 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_756_000 as u64) + Weight::from_ref_time(73_245_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(e as u64)) + .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(70_426_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(7_095_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_308_000 as u64) + // Standard Error: 10_000 + .saturating_add(Weight::from_ref_time(7_333_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(85_090_000 as u64) - // Standard Error: 34_000 - .saturating_add(Weight::from_ref_time(8_897_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(83_967_000 as u64) + // Standard Error: 12_000 + .saturating_add(Weight::from_ref_time(9_205_000 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(94_306_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(585_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_600_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(546_000 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_494_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(857_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_449_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_052_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_467_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(890_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_326_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(998_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_451_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_196_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_525_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_467_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_857_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_442_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(73_703_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_495_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_589_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(1_682_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(73_578_000 as u64) + // Standard Error: 5_000 + .saturating_add(Weight::from_ref_time(1_546_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_230_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(820_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_379_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(934_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(71_027_000 as u64) - // Standard Error: 296_000 - .saturating_add(Weight::from_ref_time(177_139_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_069_000 as u64) + // Standard Error: 114_000 + .saturating_add(Weight::from_ref_time(182_540_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_270_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_250_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_188_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(70_088_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_970_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_366_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(69_538_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_283_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_352_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(69_842_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_266_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_229_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_354_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(69_754_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_230_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_202_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_842_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_193_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_065_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_670_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_281_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_252_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_728_000 as u64) + Weight::from_ref_time(70_049_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_770_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_823_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_522_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_767_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_519_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_815_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(71_224_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(1_752_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_953_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_834_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(70_625_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_763_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_299_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_818_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(71_816_000 as u64) - // Standard Error: 31_000 - .saturating_add(Weight::from_ref_time(1_852_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_141_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(70_019_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_780_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_209_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_827_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_881_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_980_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(70_021_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_766_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_022_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(72_156_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(1_712_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_030_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(70_004_000 as u64) + Weight::from_ref_time(70_170_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_820_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_833_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(70_028_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_895_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_159_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(1_813_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_932_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_830_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(70_033_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_760_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_091_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_702_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(2_512_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_025_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_556_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(68_933_000 as u64) - // Standard Error: 23_000 - .saturating_add(Weight::from_ref_time(2_501_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_910_000 as u64) + // Standard Error: 19_000 + .saturating_add(Weight::from_ref_time(2_290_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(69_447_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_555_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_268_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(2_550_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_872_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(2_459_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_126_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(2_340_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_867_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_381_000 as u64) + // Standard Error: 9_000 + .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_982_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_772_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_095_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(69_822_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_779_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_471_000 as u64) + // Standard Error: 8_000 + .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_511_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_806_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_302_000 as u64) + // Standard Error: 2_000 + .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_956_000 as u64) + Weight::from_ref_time(70_097_000 as u64) // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_788_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_069_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(1_789_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_166_000 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_845_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_719_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_792_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_630_000 as u64) + // Standard Error: 4_000 + .saturating_add(Weight::from_ref_time(1_879_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_709_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_800_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_101_000 as u64) + // Standard Error: 3_000 + .saturating_add(Weight::from_ref_time(1_861_000 as u64).saturating_mul(r as u64)) } } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index d591657ee4bd9..27865f0e61a4f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -23,7 +23,7 @@ serde = { version = "1.0.136", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } primitive-types = { version = "0.11.1", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.3.0", optional = true } -wasmi = { version = "0.9.1", optional = true } +wasmi = { version = "0.13", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.2.0", optional = true } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 6a83e20a94618..90b7df105ecde 100644 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -12,16 +12,10 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[target.'cfg(target_arch = "wasm32")'.dependencies] -wasmi = { version = "0.9.1", default-features = false, features = ["core"] } - -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] -wasmi = "0.9.0" - [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } log = { version = "0.4", default-features = false } -wasmi = { version = "0.9.0", optional = true } +wasmi = { version = "0.13", default-features = false } sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -40,7 +34,7 @@ std = [ "sp-io/std", "sp-std/std", "sp-wasm-interface/std", - "wasmi", + "wasmi/std", ] strict = [] wasmer-sandbox = [] diff --git a/primitives/sandbox/src/embedded_executor.rs b/primitives/sandbox/src/embedded_executor.rs index 4410e26c8d122..115c3192f3d89 100644 --- a/primitives/sandbox/src/embedded_executor.rs +++ b/primitives/sandbox/src/embedded_executor.rs @@ -22,7 +22,7 @@ use alloc::string::String; use wasmi::{ memory_units::Pages, Externals, FuncInstance, FuncRef, GlobalDescriptor, GlobalRef, ImportResolver, MemoryDescriptor, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, - RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, TrapKind, + RuntimeArgs, RuntimeValue, Signature, TableDescriptor, TableRef, Trap, }; use sp_std::{ @@ -113,7 +113,7 @@ impl<'a, T> Externals for GuestExternals<'a, T> { ReturnValue::Value(v) => Some(to_wasmi(v)), ReturnValue::Unit => None, }), - Err(HostError) => Err(TrapKind::Host(Box::new(DummyHostError)).into()), + Err(HostError) => Err(Trap::host(DummyHostError)), } } } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 0ca78940fbbbc..1750ebb8cd90b 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-serde = { version = "0.3.1", optional = true } -parity-wasm = { version = "0.42.2", optional = true } +parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 89542d8229a0c..05ccad88ec37a 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } -wasmi = { version = "0.9.1", optional = true } +wasmi = { version = "0.13", optional = true } wasmtime = { version = "0.40.1", default-features = false, optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } From f9fdee10ef4b37dd820406491bb526d3c470e823 Mon Sep 17 00:00:00 2001 From: Hector Bulgarini Date: Thu, 8 Sep 2022 17:57:42 +0200 Subject: [PATCH 112/348] Removing without_storage_info from scored-pool pallet. (#11996) * Removing without_storage_info from scored-pool pallet. * Addressing PR feedback * typo * typo * Addressing PR comments and formatting code * Removing unwanted import * Adding a map_err * cargo fmt Co-authored-by: parity-processbot <> --- frame/scored-pool/src/lib.rs | 62 +++++++++++++++++++++++----------- frame/scored-pool/src/mock.rs | 39 ++++++++++----------- frame/scored-pool/src/tests.rs | 17 ++++++++++ 3 files changed, 80 insertions(+), 38 deletions(-) diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index dd96a5df2baf9..8d4c4715096b7 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -98,10 +98,11 @@ mod mock; #[cfg(test)] mod tests; -use codec::FullCodec; +use codec::{FullCodec, MaxEncodedLen}; use frame_support::{ ensure, traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, + BoundedVec, }; pub use pallet::*; use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; @@ -109,7 +110,12 @@ use sp_std::{fmt::Debug, prelude::*}; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type PoolT = BoundedVec< + (::AccountId, Option<>::Score>), + >::MaximumMembers, +>; +type MembersT = + BoundedVec<::AccountId, >::MaximumMembers>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// The enum is supplied when refreshing the members set. @@ -130,7 +136,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(_); #[pallet::config] @@ -138,6 +143,10 @@ pub mod pallet { /// The currency used for deposits. type Currency: Currency + ReservableCurrency; + /// Maximum members length allowed. + #[pallet::constant] + type MaximumMembers: Get; + /// The score attributed to a member or candidate. type Score: AtLeast32Bit + Clone @@ -146,7 +155,8 @@ pub mod pallet { + FullCodec + MaybeSerializeDeserialize + Debug - + scale_info::TypeInfo; + + scale_info::TypeInfo + + MaxEncodedLen; /// The overarching event type. type Event: From> + IsType<::Event>; @@ -207,9 +217,11 @@ pub mod pallet { InvalidIndex, /// Index does not match requested account. WrongAccountIndex, + /// Number of members exceeds `MaximumMembers`. + TooManyMembers, } - /// The current pool of candidates, stored as an ordered Vec + /// The current pool of candidates, stored as an ordered Bounded Vec /// (ordered descending by score, `None` last, highest first). #[pallet::storage] #[pallet::getter(fn pool)] @@ -229,7 +241,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn members)] pub(crate) type Members, I: 'static = ()> = - StorageValue<_, Vec, ValueQuery>; + StorageValue<_, MembersT, ValueQuery>; /// Size of the `Members` set. #[pallet::storage] @@ -263,10 +275,10 @@ pub mod pallet { }); // Sorts the `Pool` by score in a descending order. Entities which - // have a score of `None` are sorted to the beginning of the vec. + // have a score of `None` are sorted to the end of the bounded vec. pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default())); - - >::put(self.member_count); + >::update_member_count(self.member_count) + .expect("Number of allowed members exceeded"); >::put(&pool); >::refresh_members(pool, ChangeReceiver::MembershipInitialized); } @@ -308,7 +320,8 @@ pub mod pallet { // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. - >::append((who.clone(), Option::<>::Score>::None)); + >::try_append((who.clone(), Option::<>::Score>::None)) + .map_err(|_| Error::::TooManyMembers)?; >::insert(&who, true); @@ -394,7 +407,7 @@ pub mod pallet { Reverse(maybe_score.unwrap_or_default()) }) .unwrap_or_else(|l| l); - pool.insert(location, item); + pool.try_insert(location, item).map_err(|_| Error::::TooManyMembers)?; >::put(&pool); Self::deposit_event(Event::::CandidateScored); @@ -410,8 +423,7 @@ pub mod pallet { #[pallet::weight(0)] pub fn change_member_count(origin: OriginFor, count: u32) -> DispatchResult { ensure_root(origin)?; - MemberCount::::put(&count); - Ok(()) + Self::update_member_count(count).map_err(Into::into) } } } @@ -424,23 +436,28 @@ impl, I: 'static> Pallet { /// type function to invoke at the end of the method. fn refresh_members(pool: PoolT, notify: ChangeReceiver) { let count = MemberCount::::get(); + let old_members = >::get(); - let mut new_members: Vec = pool + let new_members: Vec = pool .into_iter() .filter(|(_, score)| score.is_some()) .take(count as usize) .map(|(account_id, _)| account_id) .collect(); - new_members.sort(); - let old_members = >::get(); - >::put(&new_members); + // It's safe to truncate_from at this point since MemberCount + // is verified that it does not exceed the MaximumMembers value + let mut new_members_bounded: MembersT = BoundedVec::truncate_from(new_members); + + new_members_bounded.sort(); + + >::put(&new_members_bounded); match notify { ChangeReceiver::MembershipInitialized => - T::MembershipInitialized::initialize_members(&new_members), + T::MembershipInitialized::initialize_members(&new_members_bounded), ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]), + T::MembershipChanged::set_members_sorted(&new_members_bounded[..], &old_members[..]), } } @@ -486,4 +503,11 @@ impl, I: 'static> Pallet { Ok(()) } + + /// Make sure the new member count value does not exceed the MaximumMembers + fn update_member_count(new_member_count: u32) -> Result<(), Error> { + ensure!(new_member_count <= T::MaximumMembers::get(), Error::::TooManyMembers); + >::put(new_member_count); + Ok(()) + } } diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 00c66c3bdefe8..74f084673e4c2 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_scored_pool; use frame_support::{ - ord_parameter_types, parameter_types, + bounded_vec, construct_runtime, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, GenesisBuild}, }; use frame_system::EnsureSignedBy; @@ -34,7 +34,7 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; -frame_support::construct_runtime!( +construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, @@ -96,13 +96,13 @@ impl pallet_balances::Config for Test { } parameter_types! { - pub static MembersTestValue: Vec = vec![]; + pub static MembersTestValue: BoundedVec> = bounded_vec![0,10]; } pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MembersTestValue::get().to_vec(); + let mut old_plus_incoming = MembersTestValue::get().into_inner(); old_plus_incoming.extend_from_slice(incoming); old_plus_incoming.sort(); @@ -112,13 +112,15 @@ impl ChangeMembers for TestChangeMembers { assert_eq!(old_plus_incoming, new_plus_outgoing); - MembersTestValue::mutate(|m| *m = new.to_vec()); + MembersTestValue::set(>>::truncate_from(new.to_vec())); } } impl InitializeMembers for TestChangeMembers { fn initialize_members(new_members: &[u64]) { - MembersTestValue::mutate(|m| *m = new_members.to_vec()); + MembersTestValue::set(>>::truncate_from( + new_members.to_vec(), + )); } } @@ -132,25 +134,24 @@ impl Config for Test { type Period = ConstU64<4>; type Score = u64; type ScoreOrigin = EnsureSignedBy; + type MaximumMembers = ConstU32<10>; } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (5, 500_000), - (10, 500_000), - (15, 500_000), - (20, 500_000), - (31, 500_000), - (40, 500_000), - (99, 1), - ], + let mut balances = vec![]; + for i in 1..31 { + balances.push((i, 500_000)); } - .assimilate_storage(&mut t) - .unwrap(); + balances.push((31, 500_000)); + balances.push((40, 500_000)); + balances.push((99, 1)); + + pallet_balances::GenesisConfig:: { balances } + .assimilate_storage(&mut t) + .unwrap(); pallet_scored_pool::GenesisConfig:: { - pool: vec![(5, None), (10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3))], + pool: bounded_vec![(10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3)), (5, None)], member_count: 2, } .assimilate_storage(&mut t) diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 42f0b47e8b940..1a68891be0f50 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -297,3 +297,20 @@ fn candidacy_resubmitting_works() { assert_eq!(ScoredPool::candidate_exists(who), true); }); } + +#[test] +fn pool_candidates_exceeded() { + new_test_ext().execute_with(|| { + for i in [1, 2, 3, 4, 6] { + let who = i as u64; + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99)); + } + + assert_noop!( + ScoredPool::submit_candidacy(Origin::signed(8)), + Error::::TooManyMembers + ); + }); +} From c8bfc91737b8a9e94e06578a1fc5dec80129e025 Mon Sep 17 00:00:00 2001 From: MOZGIII Date: Thu, 8 Sep 2022 20:37:23 +0400 Subject: [PATCH 113/348] Update ss58-registry (#12220) --- Cargo.lock | 4 ++-- primitives/core/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38439947258a0..f47ac8ba408b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10247,9 +10247,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "ss58-registry" -version = "1.18.0" +version = "1.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb8b72a924ccfe7882d0e26144c114503760a4d1248bb5cd06c8ab2d55404cc" +checksum = "b0837b5d62f42082c9d56cd946495ae273a3c68083b637b9153341d5e465146d" dependencies = [ "Inflector", "num-format", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 27865f0e61a4f..bf54f8467d1ef 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -57,7 +57,7 @@ hex = { version = "0.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } -ss58-registry = { version = "1.18.0", default-features = false } +ss58-registry = { version = "1.29.0", default-features = false } sp-core-hashing = { version = "4.0.0", path = "./hashing", default-features = false, optional = true } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } From 5a48536a49859f83dbbbab1285311f14c192e7b1 Mon Sep 17 00:00:00 2001 From: Mira Ressel Date: Thu, 8 Sep 2022 18:50:31 +0200 Subject: [PATCH 114/348] Add missing metadata specs to crate manifests (#12221) Some crates were missing repository or description specs, which are required for publishing crates with cargo unleash. --- bin/node/bench/Cargo.toml | 2 ++ bin/node/inspect/Cargo.toml | 1 + bin/node/primitives/Cargo.toml | 1 + bin/node/rpc/Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 1 + 5 files changed, 6 insertions(+) diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index c7a747c30ed2e..878c5d07c8e78 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -5,6 +5,8 @@ authors = ["Parity Technologies "] description = "Substrate node integration benchmarks." edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index c41681d11be1e..ede96a76206ce 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -2,6 +2,7 @@ name = "node-inspect" version = "0.9.0-dev" authors = ["Parity Technologies "] +description = "Substrate node block inspection tool." edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" homepage = "https://substrate.io" diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index bc6fa669ca4ea..65a4223a7fb9f 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -2,6 +2,7 @@ name = "node-primitives" version = "2.0.0" authors = ["Parity Technologies "] +description = "Substrate node low-level primitives." edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 07b25085b9d10..851eb2cfc5104 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -2,6 +2,7 @@ name = "node-rpc" version = "3.0.0-dev" authors = ["Parity Technologies "] +description = "Substrate node rpc methods." edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ebec9b8b755f6..eb9c91a3baa05 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -2,6 +2,7 @@ name = "kitchensink-runtime" version = "3.0.0-dev" authors = ["Parity Technologies "] +description = "Substrate node kitchensink runtime." edition = "2021" build = "build.rs" license = "Apache-2.0" From 51f60d00db606e508f76e2b5f67f5eebe8647f2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 10 Sep 2022 20:50:59 +0100 Subject: [PATCH 115/348] construct_runtime: Fix generation of types behind features (#12229) * construct_runtime: Fix generation of types behind features With the recent addition of supporting features in `construct_runtime!` there was a bug overseen. The `AllPalletsWithSystem` etc type declarations would be declared twice when a certain was enabled. The problem was that in the macro we didn't feature gate the types that should be declared when there is no feature enabled. This pull request now takes care of feature gating this type behind `all(#( not(feature) ))`. So, these types will only be enabled if no of the configured features is enabled. * Fix tests * FMT --- .../procedural/src/construct_runtime/mod.rs | 138 +++++++++--------- frame/support/test/Cargo.toml | 7 +- frame/support/test/tests/pallet.rs | 78 +++++++--- scripts/ci/gitlab/pipeline/test.yml | 2 +- 4 files changed, 133 insertions(+), 92 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index cfd582d0e4472..e20cb61b7aec1 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -156,10 +156,7 @@ use parse::{ use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use std::{ - collections::{HashMap, HashSet}, - str::FromStr, -}; +use std::{collections::HashSet, str::FromStr}; use syn::{Ident, Result}; /// The fixed name of the system pallet. @@ -324,11 +321,15 @@ fn decl_all_pallets<'a>( features: &HashSet<&str>, ) -> TokenStream2 { let mut types = TokenStream2::new(); - let mut names_by_feature = features + + // Every feature set to the pallet names that should be included by this feature set. + let mut features_to_names = features .iter() + .map(|f| *f) .powerset() - .map(|feat| (feat, Vec::new())) - .collect::>(); + .map(|feat| (HashSet::from_iter(feat), Vec::new())) + .collect::, Vec<_>)>>(); + for pallet_declaration in pallet_declarations { let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; @@ -346,21 +347,22 @@ fn decl_all_pallets<'a>( types.extend(type_decl); if pallet_declaration.cfg_pattern.is_empty() { - for names in names_by_feature.values_mut() { + for (_, names) in features_to_names.iter_mut() { names.push(&pallet_declaration.name); } } else { - for (feature_set, names) in &mut names_by_feature { + for (feature_set, names) in &mut features_to_names { // Rust tidbit: if we have multiple `#[cfg]` feature on the same item, then the // predicates listed in all `#[cfg]` attributes are effectively joined by `and()`, // meaning that all of them must match in order to activate the item let is_feature_active = pallet_declaration.cfg_pattern.iter().all(|expr| { expr.eval(|pred| match pred { - Predicate::Feature(f) => feature_set.contains(&f), - Predicate::Test => feature_set.contains(&&"test"), + Predicate::Feature(f) => feature_set.contains(f), + Predicate::Test => feature_set.contains(&"test"), _ => false, }) }); + if is_feature_active { names.push(&pallet_declaration.name); } @@ -368,11 +370,34 @@ fn decl_all_pallets<'a>( } } - let all_pallets_without_system = names_by_feature.iter().map(|(feature_set, names)| { - let mut feature_set = feature_set.iter().collect::>(); - let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); - let feature_set = feature_set.into_iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + // All possible features. This will be used below for the empty feature set. + let mut all_features = features_to_names + .iter() + .flat_map(|f| f.0.iter().cloned()) + .collect::>(); + let attribute_to_names = features_to_names + .into_iter() + .map(|(mut features, names)| { + // If this is the empty feature set, it needs to be changed to negate all available + // features. So, we ensure that there is some type declared when all features are not + // enabled. + if features.is_empty() { + let test_cfg = all_features.remove("test").then_some(quote!(test)).into_iter(); + let features = all_features.iter(); + let attr = quote!(#[cfg(all( #(not(#test_cfg)),* #(not(feature = #features)),* ))]); + + (attr, names) + } else { + let test_cfg = features.remove("test").then_some(quote!(test)).into_iter(); + let features = features.iter(); + let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #features),* ))]); + + (attr, names) + } + }) + .collect::>(); + + let all_pallets_without_system = attribute_to_names.iter().map(|(attr, names)| { let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME); quote! { #attr @@ -382,11 +407,7 @@ fn decl_all_pallets<'a>( } }); - let all_pallets_with_system = names_by_feature.iter().map(|(feature_set, names)| { - let mut feature_set = feature_set.iter().collect::>(); - let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); - let feature_set = feature_set.into_iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let all_pallets_with_system = attribute_to_names.iter().map(|(attr, names)| { quote! { #attr /// All pallets included in the runtime as a nested tuple of types. @@ -394,28 +415,19 @@ fn decl_all_pallets<'a>( } }); - let all_pallets_without_system_reversed = - names_by_feature.iter().map(|(feature_set, names)| { - let mut feature_set = feature_set.iter().collect::>(); - let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); - let feature_set = feature_set.into_iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); - let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); - } - }); + let all_pallets_without_system_reversed = attribute_to_names.iter().map(|(attr, names)| { + let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// Excludes the System pallet. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); + } + }); - let all_pallets_with_system_reversed = names_by_feature.iter().map(|(feature_set, names)| { - let mut feature_set = feature_set.iter().collect::>(); - let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); - let feature_set = feature_set.into_iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); + let all_pallets_with_system_reversed = attribute_to_names.iter().map(|(attr, names)| { let names = names.iter().rev(); quote! { #attr @@ -426,35 +438,19 @@ fn decl_all_pallets<'a>( } }); - let system_pallet = - match names_by_feature[&Vec::new()].iter().find(|n| **n == SYSTEM_PALLET_NAME) { - Some(name) => name, - None => - return syn::Error::new( - proc_macro2::Span::call_site(), - "`System` pallet declaration is missing. \ - Please add this line: `System: frame_system,`", - ) - .into_compile_error(), - }; - - let all_pallets_reversed_with_system_first = - names_by_feature.iter().map(|(feature_set, names)| { - let mut feature_set = feature_set.iter().collect::>(); - let test_cfg = feature_set.remove(&&&"test").then_some(quote!(test)).into_iter(); - let feature_set = feature_set.into_iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #feature_set),* ))]); - let names = std::iter::once(system_pallet) - .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME)); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); - } - }); + let all_pallets_reversed_with_system_first = attribute_to_names.iter().map(|(attr, names)| { + let system = quote::format_ident!("{}", SYSTEM_PALLET_NAME); + let names = std::iter::once(&system) + .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME).cloned()); + quote! { + #attr + /// All pallets included in the runtime as a nested tuple of types in reversed order. + /// With the system pallet first. + #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ + `AllPalletWithSystem or AllPalletsWithoutSystem`")] + pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); + } + }); quote!( #types diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index dd23d7e6b0d96..d7d3bfc98f3d7 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -47,9 +47,10 @@ std = [ "sp-version/std", ] try-runtime = ["frame-support/try-runtime"] -# WARNING: CI only execute pallet test with this feature, -# if the feature intended to be used outside, CI and this message need to be updated. -conditional-storage = [] +# WARNING: +# Only CI runs with this feature enabled. This feature is for testing stuff related to the FRAME macros +# in conjunction with rust features. +frame-feature-testing = [] # Disable ui tests disable-ui-tests = [] no-metadata-docs = ["frame-support/no-metadata-docs"] diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 907a52f1184cc..d683399482bf4 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -334,22 +334,22 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn conditional_value)] - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] pub type ConditionalValue = StorageValue<_, u32>; - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] #[pallet::storage] #[pallet::getter(fn conditional_map)] pub type ConditionalMap = StorageMap<_, Twox64Concat, u16, u32, OptionQuery, GetDefault, ConstU32<12>>; - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] #[pallet::storage] #[pallet::getter(fn conditional_double_map)] pub type ConditionalDoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] #[pallet::storage] #[pallet::getter(fn conditional_nmap)] pub type ConditionalNMap = @@ -550,7 +550,9 @@ pub mod pallet2 { #[frame_support::pallet] pub mod pallet3 { #[pallet::config] - pub trait Config: frame_system::Config {} + pub trait Config: frame_system::Config::Origin> { + type Origin; + } #[pallet::pallet] pub struct Pallet(_); @@ -612,6 +614,11 @@ impl pallet2::Config for Runtime { impl pallet4::Config for Runtime {} +#[cfg(feature = "frame-feature-testing")] +impl pallet3::Config for Runtime { + type Origin = Origin; +} + pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -626,7 +633,7 @@ frame_support::construct_runtime!( System: frame_system exclude_parts { Pallet, Storage }, Example: pallet, Example2: pallet2 exclude_parts { Call }, - #[cfg(feature = "example3")] + #[cfg(feature = "frame-feature-testing")] Example3: pallet3, Example4: pallet4 use_parts { Call }, } @@ -1024,7 +1031,7 @@ fn storage_expand() { Err(pallet::Error::::NonExistentStorageValue), ); - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] { pallet::ConditionalValue::::put(1); pallet::ConditionalMap::::insert(1, 2); @@ -1164,8 +1171,10 @@ fn migrate_from_pallet_version_to_storage_version() { AllPalletsWithSystem, >(&db_weight); - // 4 pallets, 2 writes and every write costs 5 weight. - assert_eq!(Weight::from_ref_time(4 * 2 * 5), weight); + let pallet_num = if cfg!(feature = "frame-feature-testing") { 5 } else { 4 }; + + // `pallet_num` pallets, 2 writes and every write costs 5 weight. + assert_eq!(Weight::from_ref_time(pallet_num * 2 * 5), weight); // All pallet versions should be removed assert!(sp_io::storage::get(&pallet_version_key(Example::name())).is_none()); @@ -1332,7 +1341,7 @@ fn metadata() { default: vec![1, 1], docs: vec![], }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] StorageEntryMetadata { name: "ConditionalValue", modifier: StorageEntryModifier::Optional, @@ -1340,7 +1349,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] StorageEntryMetadata { name: "ConditionalMap", modifier: StorageEntryModifier::Optional, @@ -1352,7 +1361,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] StorageEntryMetadata { name: "ConditionalDoubleMap", modifier: StorageEntryModifier::Optional, @@ -1367,7 +1376,7 @@ fn metadata() { default: vec![0], docs: vec![], }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] StorageEntryMetadata { name: "ConditionalNMap", modifier: StorageEntryModifier::Optional, @@ -1489,6 +1498,16 @@ fn metadata() { constants: vec![], error: None, }, + #[cfg(feature = "frame-feature-testing")] + PalletMetadata { + index: 3, + name: "Example3", + storage: None, + calls: None, + event: None, + constants: vec![], + error: None, + }, ]; let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && @@ -1633,7 +1652,7 @@ fn test_storage_info() { max_values: None, max_size: Some(16 + 1 + 8 + 2 + 16), }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] { StorageInfo { pallet_name: b"Example".to_vec(), @@ -1643,7 +1662,7 @@ fn test_storage_info() { max_size: Some(4), } }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] { StorageInfo { pallet_name: b"Example".to_vec(), @@ -1653,7 +1672,7 @@ fn test_storage_info() { max_size: Some(8 + 2 + 4), } }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] { StorageInfo { pallet_name: b"Example".to_vec(), @@ -1663,7 +1682,7 @@ fn test_storage_info() { max_size: Some(16 + 1 + 8 + 2 + 4), } }, - #[cfg(feature = "conditional-storage")] + #[cfg(feature = "frame-feature-testing")] { StorageInfo { pallet_name: b"Example".to_vec(), @@ -1730,27 +1749,42 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} + #[cfg(not(feature = "frame-feature-testing"))] fn _b(t: (System, Example4, Example2, Example)) { _a(t) } + #[cfg(feature = "frame-feature-testing")] + fn _b(t: (System, Example4, Example3, Example2, Example)) { + _a(t) + } } #[test] fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} + #[cfg(not(feature = "frame-feature-testing"))] fn _b(t: (System, Example, Example2, Example4)) { _a(t) } + #[cfg(feature = "frame-feature-testing")] + fn _b(t: (System, Example, Example2, Example3, Example4)) { + _a(t) + } } #[test] fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} + #[cfg(not(feature = "frame-feature-testing"))] fn _b(t: (Example, Example2, Example4)) { _a(t) } + #[cfg(feature = "frame-feature-testing")] + fn _b(t: (Example, Example2, Example3, Example4)) { + _a(t) + } } #[test] @@ -1758,9 +1792,14 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} + #[cfg(not(feature = "frame-feature-testing"))] fn _b(t: (Example4, Example2, Example, System)) { _a(t) } + #[cfg(feature = "frame-feature-testing")] + fn _b(t: (Example4, Example3, Example2, Example, System)) { + _a(t) + } } #[test] @@ -1768,9 +1807,14 @@ fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} + #[cfg(not(feature = "frame-feature-testing"))] fn _b(t: (Example4, Example2, Example)) { _a(t) } + #[cfg(feature = "frame-feature-testing")] + fn _b(t: (Example4, Example3, Example2, Example)) { + _a(t) + } } #[test] diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 8036e2cec26bb..50916a37fcf55 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -251,7 +251,7 @@ test-frame-support: RUN_UI_TESTS: 1 script: - rusty-cachier snapshot create - - time cargo test -p frame-support-test --features=conditional-storage,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec + - time cargo test -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - rusty-cachier cache upload From 7de3d7870f63a525977fd4e91cda16039e7b1d60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 12 Sep 2022 11:25:56 +0100 Subject: [PATCH 116/348] Remove native call (#12201) * Remove native call With the recent introduction of staging runtime apis the native call wasn't supported anymore. This removes the entire support for this as it is not used anymore. * FMT * Fix benchmarks * FIX ui tests --- bin/node/executor/benches/bench.rs | 44 +--- bin/node/executor/tests/basic.rs | 204 ++++------------ bin/node/executor/tests/common.rs | 51 +--- bin/node/executor/tests/fees.rs | 59 +---- client/api/src/call_executor.rs | 15 +- client/api/src/execution_extensions.rs | 4 +- client/executor/src/native_executor.rs | 82 +++---- client/service/src/client/call_executor.rs | 40 +--- client/service/src/client/client.rs | 24 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 4 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 4 +- .../api/proc-macro/src/impl_runtime_apis.rs | 40 ++-- primitives/api/src/lib.rs | 22 +- primitives/core/src/lib.rs | 81 ------- primitives/core/src/traits.rs | 9 +- primitives/state-machine/src/lib.rs | 220 +++++------------- test-utils/runtime/src/system.rs | 19 +- 17 files changed, 214 insertions(+), 708 deletions(-) diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index a1d31a5a966db..3ad5dc61d528f 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -31,7 +31,6 @@ use sc_executor::{Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmE use sp_core::{ storage::well_known_keys, traits::{CodeExecutor, RuntimeCode}, - NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; @@ -112,46 +111,24 @@ fn construct_block( // execute the block to get the real header. executor - .call:: _>( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - None, - ) + .call(ext, &runtime_code, "Core_initialize_block", &header.encode(), true) .0 .unwrap(); for i in extrinsics.iter() { executor - .call:: _>( - ext, - &runtime_code, - "BlockBuilder_apply_extrinsic", - &i.encode(), - true, - None, - ) + .call(ext, &runtime_code, "BlockBuilder_apply_extrinsic", &i.encode(), true) .0 .unwrap(); } - let header = match executor - .call:: _>( - ext, - &runtime_code, - "BlockBuilder_finalize_block", - &[0u8; 0], - true, - None, - ) - .0 - .unwrap() - { - NativeOrEncoded::Native(_) => unreachable!(), - NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), - }; + let header = Header::decode( + &mut &executor + .call(ext, &runtime_code, "BlockBuilder_finalize_block", &[0u8; 0], true) + .0 + .unwrap()[..], + ) + .unwrap(); let hash = header.blake2_256(); (Block { header, extrinsics }.encode(), hash.into()) @@ -218,13 +195,12 @@ fn bench_execute_block(c: &mut Criterion) { |test_ext| { for block in blocks.iter() { executor - .call:: _>( + .call( &mut test_ext.ext(), &runtime_code, "Core_execute_block", &block.0, use_native, - None, ) .0 .unwrap(); diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 3426df5872c35..35063f6933c98 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; -use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; +use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; @@ -187,25 +187,14 @@ fn panic_execution_with_foreign_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ) - .0; + let r = + executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) + .0; assert!(r.is_ok()); - let v = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ) - .0 - .unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -219,25 +208,14 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ) - .0; + let r = + executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) + .0; assert!(r.is_ok()); - let v = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ) - .0 - .unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -266,26 +244,14 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ) - .0; + let r = + executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ) - .0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; assert!(r.is_ok()); t.execute_with(|| { @@ -319,26 +285,14 @@ fn successful_execution_with_foreign_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ) - .0; + let r = + executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ) - .0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; assert!(r.is_ok()); t.execute_with(|| { @@ -361,15 +315,7 @@ fn full_native_block_import_works() { .get_dispatch_info() .weight; - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - true, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -441,15 +387,7 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - true, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -579,15 +517,7 @@ fn full_wasm_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - false, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0, false).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -597,15 +527,7 @@ fn full_wasm_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - false, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0, false).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -757,9 +679,7 @@ fn deploying_wasm_contract_should_work() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &b.0, false).0.unwrap(); t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items @@ -778,14 +698,8 @@ fn wasm_big_block_import_fails() { set_heap_pages(&mut t.ext(), 4); - let result = executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ) - .0; + let result = + executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false).0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -793,15 +707,9 @@ fn wasm_big_block_import_fails() { fn native_big_block_import_succeeds() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - true, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, true) + .0 + .unwrap(); } #[test] @@ -812,15 +720,11 @@ fn native_big_block_import_fails_on_fallback() { // block. set_heap_pages(&mut t.ext(), 8); - assert!(executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ) - .0 - .is_err()); + assert!( + executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false,) + .0 + .is_err() + ); } #[test] @@ -837,25 +741,17 @@ fn panic_execution_gives_error() { t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( + let r = executor_call( &mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), false, - None, ) .0; assert!(r.is_ok()); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - false, - None, - ) - .0 - .unwrap() - .into_encoded(); + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + .0 + .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -885,12 +781,11 @@ fn successful_execution_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call:: _>( + let r = executor_call( &mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), false, - None, ) .0; assert!(r.is_ok()); @@ -900,16 +795,9 @@ fn successful_execution_gives_ok() { let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - false, - None, - ) - .0 - .unwrap() - .into_encoded(); + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + .0 + .unwrap(); ApplyExtrinsicResult::decode(&mut &r[..]) .unwrap() .expect("Extrinsic could not be applied") diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 407a1e09f8efb..803ec78329eea 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -27,7 +27,6 @@ use sp_core::{ crypto::KeyTypeId, sr25519::Signature, traits::{CodeExecutor, RuntimeCode}, - NativeOrEncoded, NeverNativeValue, }; use sp_runtime::{ traits::{BlakeTwo256, Header as HeaderT}, @@ -99,17 +98,12 @@ pub fn executor() -> NativeElseWasmExecutor { NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8, 2) } -pub fn executor_call< - R: Decode + Encode + PartialEq, - NC: FnOnce() -> std::result::Result> - + std::panic::UnwindSafe, ->( +pub fn executor_call( t: &mut TestExternalities, method: &str, data: &[u8], use_native: bool, - native_call: Option, -) -> (Result>, bool) { +) -> (Result>, bool) { let mut t = t.ext(); let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap(); @@ -120,7 +114,7 @@ pub fn executor_call< heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; sp_tracing::try_init_simple(); - executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) + executor().call(&mut t, &runtime_code, method, data, use_native) } pub fn new_test_ext(code: &[u8]) -> TestExternalities { @@ -171,29 +165,15 @@ pub fn construct_block( }; // execute the block to get the real header. - executor_call:: _>( - env, - "Core_initialize_block", - &header.encode(), - true, - None, - ) - .0 - .unwrap(); + executor_call(env, "Core_initialize_block", &header.encode(), true).0.unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes // all pre-inclusion checks. - let r = executor_call:: _>( - env, - "BlockBuilder_apply_extrinsic", - &extrinsic.encode(), - true, - None, - ) - .0 - .expect("application of an extrinsic failed") - .into_encoded(); + let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode(), true) + .0 + .expect("application of an extrinsic failed"); + match ApplyExtrinsicResult::decode(&mut &r[..]) .expect("apply result deserialization failed") { @@ -202,19 +182,10 @@ pub fn construct_block( } } - let header = match executor_call:: _>( - env, - "BlockBuilder_finalize_block", - &[0u8; 0], - true, - None, + let header = Header::decode( + &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0], true).0.unwrap()[..], ) - .0 - .unwrap() - { - NativeOrEncoded::Native(_) => unreachable!(), - NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), - }; + .unwrap(); let hash = header.blake2_256(); (Block { header, extrinsics }.encode(), hash.into()) diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index dd1318254d9b7..c5c2d01ca5c13 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -26,7 +26,6 @@ use kitchensink_runtime::{ }; use node_primitives::Balance; use node_testing::keyring::*; -use sp_core::NeverNativeValue; use sp_runtime::{traits::One, Perbill}; pub mod common; @@ -94,15 +93,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { ); // execute a big block. - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - true, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -113,15 +104,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); // execute a big block. - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - true, - None, - ) - .0 - .unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -166,24 +149,12 @@ fn transaction_fee_is_correct() { function: Call::Balances(default_transfer_call()), }); - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ) - .0; + let r = + executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) + .0; assert!(r.is_ok()); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt.clone()), - true, - None, - ) - .0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone()), true).0; assert!(r.is_ok()); t.execute_with(|| { @@ -274,14 +245,7 @@ fn block_weight_capacity_report() { len / 1024 / 1024, ); - let r = executor_call:: _>( - &mut t, - "Core_execute_block", - &block.0, - true, - None, - ) - .0; + let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -342,14 +306,7 @@ fn block_length_capacity_report() { len / 1024 / 1024, ); - let r = executor_call:: _>( - &mut t, - "Core_execute_block", - &block.0, - true, - None, - ) - .0; + let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 738f932a47bf0..949fd16a30704 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -18,13 +18,11 @@ //! A method call executor interface. -use codec::{Decode, Encode}; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; -use sp_core::NativeOrEncoded; use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof}; -use std::{cell::RefCell, panic::UnwindSafe, result}; +use std::cell::RefCell; use crate::execution_extensions::ExecutionExtensions; use sp_api::{ProofRecorder, StorageTransactionCache}; @@ -68,11 +66,9 @@ pub trait CallExecutor: RuntimeVersionOf { /// of the execution context. fn contextual_call< EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, >( &self, at: &BlockId, @@ -85,10 +81,9 @@ pub trait CallExecutor: RuntimeVersionOf { >, >, execution_manager: ExecutionManager, - native_call: Option, proof_recorder: &Option>, extensions: Option, - ) -> sp_blockchain::Result> + ) -> sp_blockchain::Result> where ExecutionManager: Clone; diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 574687312c82b..07a483bc3eaf2 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -201,11 +201,11 @@ impl ExecutionExtensions { /// /// Based on the execution context and capabilities it produces /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( + pub fn manager_and_extensions( &self, at: &BlockId, context: ExecutionContext, - ) -> (ExecutionManager>, Extensions) { + ) -> (ExecutionManager>, Extensions) { let manager = match context { ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), ExecutionContext::Syncing => self.strategies.syncing.get_manager(), diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 7610c4c8f32e0..b164b427e306d 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -27,22 +27,18 @@ use std::{ marker::PhantomData, panic::{AssertUnwindSafe, UnwindSafe}, path::PathBuf, - result, sync::{ atomic::{AtomicU64, Ordering}, mpsc, Arc, }, }; -use codec::{Decode, Encode}; +use codec::Encode; use sc_executor_common::{ runtime_blob::RuntimeBlob, wasm_runtime::{AllocationStats, InvokeMethod, WasmInstance, WasmModule}, }; -use sp_core::{ - traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, - NativeOrEncoded, -}; +use sp_core::traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}; use sp_externalities::ExternalitiesExt as _; use sp_tasks::new_async_externalities; use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion}; @@ -339,18 +335,14 @@ where { type Error = Error; - fn call< - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, - >( + fn call( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], _use_native: bool, - _native_call: Option, - ) -> (Result>, bool) { + ) -> (Result>, bool) { tracing::trace!( target: "executor", %method, @@ -363,7 +355,7 @@ where |module, mut instance, _onchain_version, mut ext| { with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); - instance.call_export(method, data).map(NativeOrEncoded::Encoded) + instance.call_export(method, data) }) }, ); @@ -594,18 +586,14 @@ fn preregister_builtin_ext(module: Arc) { impl CodeExecutor for NativeElseWasmExecutor { type Error = Error; - fn call< - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, - >( + fn call( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], use_native: bool, - native_call: Option, - ) -> (Result>, bool) { + ) -> (Result>, bool) { tracing::trace!( target: "executor", function = %method, @@ -623,49 +611,31 @@ impl CodeExecutor for NativeElseWasmExecut let can_call_with = onchain_version.can_call_with(&self.native_version.runtime_version); - match (use_native, can_call_with, native_call) { - (_, false, _) | (false, _, _) => { - if !can_call_with { - tracing::trace!( - target: "executor", - native = %self.native_version.runtime_version, - chain = %onchain_version, - "Request for native execution failed", - ); - } - - with_externalities_safe(&mut **ext, move || { - preregister_builtin_ext(module.clone()); - instance.call_export(method, data).map(NativeOrEncoded::Encoded) - }) - }, - (true, true, Some(call)) => { - tracing::trace!( - target: "executor", - native = %self.native_version.runtime_version, - chain = %onchain_version, - "Request for native execution with native call succeeded" - ); - - used_native = true; - let res = with_externalities_safe(&mut **ext, move || (call)()) - .and_then(|r| r.map(NativeOrEncoded::Native).map_err(Error::ApiError)); - - Ok(res) - }, - _ => { + if use_native && can_call_with { + tracing::trace!( + target: "executor", + native = %self.native_version.runtime_version, + chain = %onchain_version, + "Request for native execution succeeded", + ); + + used_native = true; + Ok(with_externalities_safe(&mut **ext, move || D::dispatch(method, data))? + .ok_or_else(|| Error::MethodNotFound(method.to_owned()))) + } else { + if !can_call_with { tracing::trace!( target: "executor", native = %self.native_version.runtime_version, chain = %onchain_version, - "Request for native execution succeeded", + "Request for native execution failed", ); + } - used_native = true; - Ok(with_externalities_safe(&mut **ext, move || D::dispatch(method, data))? - .map(NativeOrEncoded::Encoded) - .ok_or_else(|| Error::MethodNotFound(method.to_owned()))) - }, + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data) + }) } }, ); diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index de851ac848919..e39436ec641d7 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -17,21 +17,17 @@ // along with this program. If not, see . use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; -use codec::{Decode, Encode}; use sc_client_api::{backend, call_executor::CallExecutor, HeaderBackend}; use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_api::{ProofRecorder, StorageTransactionCache}; -use sp_core::{ - traits::{CodeExecutor, RuntimeCode, SpawnNamed}, - NativeOrEncoded, NeverNativeValue, -}; +use sp_core::traits::{CodeExecutor, RuntimeCode, SpawnNamed}; use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ backend::AsTrieBackend, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, }; -use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; +use std::{cell::RefCell, sync::Arc}; /// Call executor that executes methods locally, querying all required /// data from local backend. @@ -162,7 +158,7 @@ where sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) })?; - let return_data = StateMachine::new( + let mut sm = StateMachine::new( &state, &mut changes, &self.executor, @@ -172,22 +168,17 @@ where &runtime_code, self.spawn_handle.clone(), ) - .set_parent_hash(at_hash) - .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - )?; + .set_parent_hash(at_hash); - Ok(return_data.into_encoded()) + sm.execute_using_consensus_failure_handler(strategy.get_manager()) + .map_err(Into::into) } fn contextual_call< EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, >( &self, at: &BlockId, @@ -196,10 +187,9 @@ where changes: &RefCell, storage_transaction_cache: Option<&RefCell>>, execution_manager: ExecutionManager, - native_call: Option, recorder: &Option>, extensions: Option, - ) -> Result, sp_blockchain::Error> + ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone, { @@ -242,10 +232,7 @@ where ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler( - execution_manager, - native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), - ) + state_machine.execute_using_consensus_failure_handler(execution_manager) }, None => { let mut state_machine = StateMachine::new( @@ -260,10 +247,7 @@ where ) .with_storage_transaction_cache(storage_transaction_cache.as_deref_mut()) .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler( - execution_manager, - native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), - ) + state_machine.execute_using_consensus_failure_handler(execution_manager) }, } .map_err(Into::into) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 9d6a2e0457a84..27561046c3481 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -22,7 +22,6 @@ use super::{ block_rules::{BlockRules, LookupResult as BlockLookupResult}, genesis, }; -use codec::{Decode, Encode}; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; @@ -59,12 +58,9 @@ use sp_blockchain::{ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_core::{ - storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, Storage, StorageChild, - StorageData, StorageKey, - }, - NativeOrEncoded, +use sp_core::storage::{ + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, Storage, StorageChild, StorageData, + StorageKey, }; #[cfg(feature = "test-helpers")] use sp_keystore::SyncCryptoStorePtr; @@ -85,9 +81,7 @@ use sp_trie::{CompactProof, StorageProof}; use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, marker::PhantomData, - panic::UnwindSafe, path::PathBuf, - result, sync::Arc, }; @@ -1659,27 +1653,23 @@ where { type StateBackend = B::State; - fn call_api_at< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( + fn call_api_at( &self, - params: CallApiAtParams, - ) -> Result, sp_api::ApiError> { + params: CallApiAtParams, + ) -> Result, sp_api::ApiError> { let at = params.at; let (manager, extensions) = self.execution_extensions.manager_and_extensions(at, params.context); self.executor - .contextual_call:: _, _, _>( + .contextual_call( at, params.function, ¶ms.arguments, params.overlayed_changes, Some(params.storage_transaction_cache), manager, - params.native_call, params.recorder, Some(extensions), ) diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index e674e49eddbe5..5d159ec961c7f 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index ecdc18263432e..4671855431b27 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 5ab8fa0521699..7a26ead0f586a 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -449,28 +449,30 @@ impl<'a> ApiRuntimeImplToApiRuntimeApiImpl<'a> { } let res = (|| { - let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at(self.call, at)?; - - let params = #crate_::CallApiAtParams::<_, fn() -> _, _> { - at, - function: (*fn_name)(version), - native_call: None, - arguments: params, - overlayed_changes: &self.changes, - storage_transaction_cache: &self.storage_transaction_cache, - context, - recorder: &self.recorder, - }; - - #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at::<#crate_::NeverNativeValue, _>( - self.call, - params, - ) - })(); + let version = #crate_::CallApiAt::<__SR_API_BLOCK__>::runtime_version_at( + self.call, + at, + )?; + + let params = #crate_::CallApiAtParams { + at, + function: (*fn_name)(version), + arguments: params, + overlayed_changes: &self.changes, + storage_transaction_cache: &self.storage_transaction_cache, + context, + recorder: &self.recorder, + }; + + #crate_::CallApiAt::<__SR_API_BLOCK__>::call_api_at( + self.call, + params, + ) + })(); self.commit_or_rollback(std::result::Result::is_ok(&res)); - res.map(#crate_::NativeOrEncoded::into_encoded) + res } }); diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 353ebd91cf5df..91d4b07a1cefc 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -83,8 +83,6 @@ use sp_core::OpaqueMetadata; pub use sp_core::{offchain, ExecutionContext}; #[doc(hidden)] #[cfg(feature = "std")] -pub use sp_core::{NativeOrEncoded, NeverNativeValue}; -#[cfg(feature = "std")] pub use sp_runtime::StateVersion; #[doc(hidden)] pub use sp_runtime::{ @@ -102,14 +100,12 @@ pub use sp_state_machine::{ backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, TrieBackendBuilder, }; -#[cfg(feature = "std")] -use sp_std::result; #[doc(hidden)] pub use sp_std::{mem, slice}; #[doc(hidden)] pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] -use std::{cell::RefCell, panic::UnwindSafe}; +use std::cell::RefCell; /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -590,16 +586,11 @@ pub trait ApiExt { /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, NC, Backend: StateBackend>> { +pub struct CallApiAtParams<'a, Block: BlockT, Backend: StateBackend>> { /// The block id that determines the state that should be setup when calling the function. pub at: &'a BlockId, /// The name of the function that should be called. pub function: &'static str, - /// An optional native call that calls the `function`. This is an optimization to call into a - /// native runtime without requiring to encode/decode the parameters. The native runtime can - /// still be called when this value is `None`, we then just fallback to encoding/decoding the - /// parameters. - pub native_call: Option, /// The encoded arguments of the function. pub arguments: Vec, /// The overlayed changes that are on top of the state. @@ -620,13 +611,10 @@ pub trait CallApiAt { /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. - fn call_api_at< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( + fn call_api_at( &self, - params: CallApiAtParams, - ) -> Result, ApiError>; + params: CallApiAtParams, + ) -> Result, ApiError>; /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index fadbdada1f004..fda7604d5337f 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -40,8 +40,6 @@ pub use serde; use serde::{Deserialize, Serialize}; use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; use sp_std::{ops::Deref, prelude::*}; -#[cfg(feature = "std")] -use std::borrow::Cow; pub use sp_debug_derive::RuntimeDebug; @@ -209,85 +207,6 @@ impl OpaquePeerId { } } -/// Something that is either a native or an encoded value. -#[cfg(feature = "std")] -pub enum NativeOrEncoded { - /// The native representation. - Native(R), - /// The encoded representation. - Encoded(Vec), -} - -#[cfg(feature = "std")] -impl From for NativeOrEncoded { - fn from(val: R) -> Self { - Self::Native(val) - } -} - -#[cfg(feature = "std")] -impl sp_std::fmt::Debug for NativeOrEncoded { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - hexdisplay::HexDisplay::from(&self.as_encoded().as_ref()).fmt(f) - } -} - -#[cfg(feature = "std")] -impl NativeOrEncoded { - /// Return the value as the encoded format. - pub fn as_encoded(&self) -> Cow<'_, [u8]> { - match self { - NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), - NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), - } - } - - /// Return the value as the encoded format. - pub fn into_encoded(self) -> Vec { - match self { - NativeOrEncoded::Encoded(e) => e, - NativeOrEncoded::Native(n) => n.encode(), - } - } -} - -#[cfg(feature = "std")] -impl PartialEq for NativeOrEncoded { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, - (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) | - (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => - Some(n) == codec::Decode::decode(&mut &e[..]).ok().as_ref(), - (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, - } - } -} - -/// A value that is never in a native representation. -/// This is type is useful in conjunction with `NativeOrEncoded`. -#[cfg(feature = "std")] -#[derive(PartialEq)] -pub enum NeverNativeValue {} - -#[cfg(feature = "std")] -impl codec::Encode for NeverNativeValue { - fn encode(&self) -> Vec { - // The enum is not constructable, so this function should never be callable! - unreachable!() - } -} - -#[cfg(feature = "std")] -impl codec::EncodeLike for NeverNativeValue {} - -#[cfg(feature = "std")] -impl codec::Decode for NeverNativeValue { - fn decode(_: &mut I) -> Result { - Err("`NeverNativeValue` should never be decoded".into()) - } -} - /// Provide a simple 4 byte identifier for a type. pub trait TypeId { /// Simple 4 byte identifier. diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 80e8963a2909d..c5149cc48c074 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -20,7 +20,6 @@ use std::{ borrow::Cow, fmt::{Debug, Display}, - panic::UnwindSafe, }; pub use sp_externalities::{Externalities, ExternalitiesExt}; @@ -32,18 +31,14 @@ pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'stat /// Call a given method in the runtime. Returns a tuple of the result (either the output data /// or an execution error) together with a `bool`, which is true if native execution was used. - fn call< - R: codec::Codec + PartialEq, - NC: FnOnce() -> Result> + UnwindSafe, - >( + fn call( &self, ext: &mut dyn Externalities, runtime_code: &RuntimeCode, method: &str, data: &[u8], use_native: bool, - native_call: Option, - ) -> (Result, Self::Error>, bool); + ) -> (Result, Self::Error>, bool); } /// Something that can fetch the runtime `:code`. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 59488599883e9..4126aea478d5b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -157,31 +157,28 @@ mod execution { use crate::backend::AsTrieBackend; use super::*; - use codec::{Codec, Decode, Encode}; + use codec::Codec; use hash_db::Hasher; use smallvec::SmallVec; use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, - NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; use std::{ collections::{HashMap, HashSet}, fmt, - panic::UnwindSafe, - result, }; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions are protected from being closed by the runtime. qed"; - pub(crate) type CallResult = Result, E>; + pub(crate) type CallResult = Result, E>; /// Default handler of the execution manager. - pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; + pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; @@ -243,9 +240,7 @@ mod execution { impl ExecutionStrategy { /// Gets the corresponding manager for the execution strategy. - pub fn get_manager( - self, - ) -> ExecutionManager> { + pub fn get_manager(self) -> ExecutionManager> { match self { ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), @@ -265,19 +260,19 @@ mod execution { } /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. - pub fn native_else_wasm() -> ExecutionManager> { + pub fn native_else_wasm() -> ExecutionManager> { ExecutionManager::NativeElseWasm } /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out /// the type. - fn always_wasm() -> ExecutionManager> { + fn always_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) } /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out /// the type. - fn always_untrusted_wasm() -> ExecutionManager> { + fn always_untrusted_wasm() -> ExecutionManager> { ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) } @@ -379,23 +374,10 @@ mod execution { pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { // We are not giving a native call and thus we are sure that the result can never be a // native value. - self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - ) - .map(NativeOrEncoded::into_encoded) + self.execute_using_consensus_failure_handler(strategy.get_manager()) } - fn execute_aux( - &mut self, - use_native: bool, - native_call: Option, - ) -> (CallResult, bool) - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> - + UnwindSafe, - { + fn execute_aux(&mut self, use_native: bool) -> (CallResult, bool) { let mut cache = StorageTransactionCache::default(); let cache = match self.storage_transaction_cache.as_mut() { @@ -426,7 +408,6 @@ mod execution { self.method, self.call_data, use_native, - native_call, ); self.overlay @@ -444,26 +425,20 @@ mod execution { (result, was_native) } - fn execute_call_with_both_strategy( + fn execute_call_with_both_strategy( &mut self, - mut native_call: Option, on_consensus_failure: Handler, - ) -> CallResult + ) -> CallResult where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> - + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult, + Handler: + FnOnce(CallResult, CallResult) -> CallResult, { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true, native_call.take()); + let (result, was_native) = self.execute_aux(true); if was_native { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux(false, native_call); + let (wasm_result, _) = self.execute_aux(false); if (result.is_ok() && wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || @@ -479,25 +454,16 @@ mod execution { } } - fn execute_call_with_native_else_wasm_strategy( - &mut self, - mut native_call: Option, - ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> - + UnwindSafe, - { + fn execute_call_with_native_else_wasm_strategy(&mut self) -> CallResult { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux(true, native_call.take()); + let (result, was_native) = self.execute_aux(true); if !was_native || result.is_ok() { self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux(false, native_call); - wasm_result + self.execute_aux(false).0 } } @@ -510,35 +476,29 @@ mod execution { /// /// Returns the result of the executed function either in native representation `R` or /// in SCALE encoded representation. - pub fn execute_using_consensus_failure_handler( + pub fn execute_using_consensus_failure_handler( &mut self, manager: ExecutionManager, - mut native_call: Option, - ) -> Result, Box> + ) -> Result, Box> where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> - + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult, + Handler: + FnOnce(CallResult, CallResult) -> CallResult, { let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => self - .execute_call_with_both_strategy(native_call.take(), on_consensus_failure), + ExecutionManager::Both(on_consensus_failure) => + self.execute_call_with_both_strategy(on_consensus_failure), ExecutionManager::NativeElseWasm => - self.execute_call_with_native_else_wasm_strategy(native_call.take()), + self.execute_call_with_native_else_wasm_strategy(), ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), }; - self.execute_aux(false, native_call).0 + self.execute_aux(false).0 }, - ExecutionManager::NativeWhenPossible => self.execute_aux(true, native_call).0, + ExecutionManager::NativeWhenPossible => self.execute_aux(true).0, } }; @@ -603,29 +563,23 @@ mod execution { let proving_backend = TrieBackendBuilder::wrap(trie_backend).with_recorder(Default::default()).build(); - let result = { - let mut sm = StateMachine::<_, H, Exec>::new( - &proving_backend, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); - - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )? - }; + let result = StateMachine::<_, H, Exec>::new( + &proving_backend, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ) + .execute_using_consensus_failure_handler::<_>(always_wasm())?; let proof = proving_backend .extract_proof() .expect("A recorder was set and thus, a storage proof can be extracted; qed"); - Ok((result.into_encoded(), proof)) + Ok((result, proof)) } /// Check execution proof, generated by `prove_execution` call. @@ -673,7 +627,7 @@ mod execution { Exec: CodeExecutor + Clone + 'static, Spawn: SpawnNamed + Send + 'static, { - let mut sm = StateMachine::<_, H, Exec>::new( + StateMachine::<_, H, Exec>::new( trie_backend, overlay, exec, @@ -682,13 +636,8 @@ mod execution { Extensions::default(), runtime_code, spawn_handle, - ); - - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_untrusted_wasm(), - None, ) - .map(NativeOrEncoded::into_encoded) + .execute_using_consensus_failure_handler(always_untrusted_wasm()) } /// Generate storage read proof. @@ -1362,21 +1311,16 @@ mod tests { use super::{backend::AsTrieBackend, ext::Ext, *}; use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key}; use assert_matches::assert_matches; - use codec::{Decode, Encode}; + use codec::Encode; use sp_core::{ map, storage::{ChildInfo, StateVersion}, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, - NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1}; - use std::{ - collections::{BTreeMap, HashMap}, - panic::UnwindSafe, - result, - }; + use std::collections::{BTreeMap, HashMap}; #[derive(Clone)] struct DummyCodeExecutor { @@ -1388,28 +1332,20 @@ mod tests { impl CodeExecutor for DummyCodeExecutor { type Error = u8; - fn call< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, - >( + fn call( &self, ext: &mut dyn Externalities, _: &RuntimeCode, _method: &str, _data: &[u8], use_native: bool, - native_call: Option, - ) -> (CallResult, bool) { + ) -> (CallResult, bool) { let using_native = use_native && self.native_available; - match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { - (true, true, _, Some(call)) => { - let res = sp_externalities::set_and_run_with_externalities(ext, call); - (res.map(NativeOrEncoded::Native).map_err(|_| 0), true) - }, - (true, true, _, None) | (false, _, true, None) => ( - Ok(NativeOrEncoded::Encoded(vec![ + match (using_native, self.native_succeeds, self.fallback_succeeds) { + (true, true, _) | (false, _, true) => ( + Ok(vec![ ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], - ])), + ]), using_native, ), _ => (Err(0), using_native), @@ -1510,13 +1446,10 @@ mod tests { ); assert!(state_machine - .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - ExecutionManager::Both(|we, _ne| { - consensus_failed = true; - we - }), - None, - ) + .execute_using_consensus_failure_handler(ExecutionManager::Both(|we, _ne| { + consensus_failed = true; + we + }),) .is_err()); assert!(consensus_failed); } @@ -2260,51 +2193,4 @@ mod tests { overlay.commit_transaction().unwrap(); assert_eq!(overlay.storage(b"ccc"), Some(None)); } - - #[test] - fn runtime_registered_extensions_are_removed_after_execution() { - let state_version = StateVersion::default(); - use sp_externalities::ExternalitiesExt; - sp_externalities::decl_extension! { - struct DummyExt(u32); - } - - let backend = trie_backend::tests::test_trie(state_version, None, None); - let mut overlayed_changes = Default::default(); - let wasm_code = RuntimeCode::empty(); - - let mut state_machine = StateMachine::new( - &backend, - &mut overlayed_changes, - &DummyCodeExecutor { - native_available: true, - native_succeeds: true, - fallback_succeeds: false, - }, - "test", - &[], - Default::default(), - &wasm_code, - TaskExecutor::new(), - ); - - let run_state_machine = |state_machine: &mut StateMachine<_, _, _>| { - state_machine - .execute_using_consensus_failure_handler:: _, _, _>( - ExecutionManager::NativeWhenPossible, - Some(|| { - sp_externalities::with_externalities(|mut ext| { - ext.register_extension(DummyExt(2)).unwrap(); - }) - .unwrap(); - - Ok(()) - }), - ) - .unwrap(); - }; - - run_state_machine(&mut state_machine); - run_state_machine(&mut state_machine); - } } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 77cd18c028364..45a6f66416d8c 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -322,7 +322,6 @@ mod tests { use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, - NeverNativeValue, }; use sp_io::{hashing::twox_128, TestExternalities}; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; @@ -406,14 +405,7 @@ mod tests { }; executor() - .call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ) + .call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false) .0 .unwrap(); }) @@ -515,14 +507,7 @@ mod tests { }; executor() - .call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ) + .call(&mut ext, &runtime_code, "Core_execute_block", &b.encode(), false) .0 .unwrap(); }) From d5de897396d2e816b7eea94caecf5de179ed29a5 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 12 Sep 2022 15:27:11 +0100 Subject: [PATCH 117/348] Fuzz testing for nomination pools (#12002) * some additional tests and stuff * make sanity public * add some sort of fuzz test for pools * breaks every now and then * breaks every now and then * IT WORKS AND PASSES 100k TESTS * cleanup * safe id addition * fix assert_eq_error_rate * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * Update frame/nomination-pools/src/tests.rs Co-authored-by: Oliver Tale-Yazdi * add some doc * Fix * ".git/.scripts/fmt.sh" 1 Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> --- Cargo.lock | 77 ++- frame/nomination-pools/Cargo.toml | 2 + frame/nomination-pools/src/lib.rs | 49 +- frame/nomination-pools/src/mock.rs | 3 +- frame/nomination-pools/src/tests.rs | 537 ++++++++++++++++++ frame/support/src/traits/misc.rs | 15 +- primitives/runtime/src/lib.rs | 21 +- .../benchmarking-cli/src/machine/hardware.rs | 8 +- 8 files changed, 647 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f47ac8ba408b3..335e8b3526792 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -905,7 +905,7 @@ dependencies = [ "ansi_term", "clap 3.1.18", "node-cli", - "rand 0.8.4", + "rand 0.8.5", "sc-chain-spec", "sc-keystore", "sp-core", @@ -2038,7 +2038,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parking_lot 0.12.1", - "rand 0.8.4", + "rand 0.8.5", "scale-info", ] @@ -2049,7 +2049,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.4", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2143,7 +2143,7 @@ dependencies = [ "log", "memory-db", "parity-scale-codec", - "rand 0.8.4", + "rand 0.8.5", "rand_pcg 0.3.1", "sc-block-builder", "sc-cli", @@ -2215,7 +2215,7 @@ dependencies = [ "frame-support", "honggfuzz", "parity-scale-codec", - "rand 0.8.4", + "rand 0.8.5", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -3212,7 +3212,7 @@ dependencies = [ "jsonrpsee-types", "lazy_static", "parking_lot 0.12.1", - "rand 0.8.4", + "rand 0.8.5", "rustc-hash", "serde", "serde_json", @@ -3593,7 +3593,7 @@ dependencies = [ "log", "prost", "prost-build", - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -3620,7 +3620,7 @@ dependencies = [ "pin-project", "prost", "prost-build", - "rand 0.8.4", + "rand 0.8.5", "ring", "rw-stream-sink", "sha2 0.10.2", @@ -3767,7 +3767,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "socket2", "void", @@ -3821,7 +3821,7 @@ dependencies = [ "log", "prost", "prost-build", - "rand 0.8.4", + "rand 0.8.5", "sha2 0.10.2", "snow", "static_assertions", @@ -3895,7 +3895,7 @@ dependencies = [ "prost", "prost-build", "prost-codec", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "static_assertions", "thiserror", @@ -3918,7 +3918,7 @@ dependencies = [ "log", "prost", "prost-build", - "rand 0.8.4", + "rand 0.8.5", "sha2 0.10.2", "thiserror", "unsigned-varint", @@ -4076,7 +4076,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand 0.8.4", + "rand 0.8.5", "serde", "sha2 0.9.8", "typenum", @@ -4502,7 +4502,7 @@ dependencies = [ "num-complex", "num-rational 0.4.0", "num-traits", - "rand 0.8.4", + "rand 0.8.5", "rand_distr", "simba", "typenum", @@ -4525,7 +4525,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7d66043b25d4a6cccb23619d10c19c25304b355a7dccd4a8e11423dd2382146" dependencies = [ - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -4685,7 +4685,7 @@ dependencies = [ "pallet-transaction-payment", "parity-scale-codec", "platforms", - "rand 0.8.4", + "rand 0.8.5", "regex", "remote-externalities", "sc-authority-discovery", @@ -5341,7 +5341,7 @@ dependencies = [ "frame-election-provider-support", "honggfuzz", "pallet-bags-list", - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -5495,7 +5495,7 @@ dependencies = [ "pallet-utility", "parity-scale-codec", "pretty_assertions", - "rand 0.8.4", + "rand 0.8.5", "rand_pcg 0.3.1", "scale-info", "serde", @@ -5931,6 +5931,7 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", + "rand 0.8.5", "scale-info", "sp-core", "sp-io", @@ -6554,7 +6555,7 @@ dependencies = [ "lz4", "memmap2 0.2.1", "parking_lot 0.11.2", - "rand 0.8.4", + "rand 0.8.5", "snap", ] @@ -7168,7 +7169,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -7207,20 +7208,19 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", "rand_pcg 0.2.1", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.0", "rand_core 0.6.2", - "rand_hc 0.3.0", ] [[package]] @@ -7268,7 +7268,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -7280,15 +7280,6 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" -dependencies = [ - "rand_core 0.6.2", -] - [[package]] name = "rand_pcg" version = "0.2.1" @@ -7907,7 +7898,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "quickcheck", - "rand 0.8.4", + "rand 0.8.5", "sc-client-api", "sc-state-db", "sp-arithmetic", @@ -8279,7 +8270,7 @@ dependencies = [ "log", "parity-scale-codec", "parking_lot 0.12.1", - "rand 0.8.4", + "rand 0.8.5", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -9397,7 +9388,7 @@ dependencies = [ "futures", "httparse", "log", - "rand 0.8.4", + "rand 0.8.5", "sha-1 0.9.4", ] @@ -9877,7 +9868,7 @@ dependencies = [ "clap 3.1.18", "honggfuzz", "parity-scale-codec", - "rand 0.8.4", + "rand 0.8.5", "scale-info", "sp-npos-elections", "sp-runtime", @@ -10282,7 +10273,7 @@ dependencies = [ "lazy_static", "nalgebra", "num-traits", - "rand 0.8.4", + "rand 0.8.5", ] [[package]] @@ -11053,7 +11044,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "thiserror", "tinyvec", @@ -11141,7 +11132,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if 1.0.0", "digest 0.10.3", - "rand 0.8.4", + "rand 0.7.3", "static_assertions", ] @@ -11859,7 +11850,7 @@ dependencies = [ "memfd", "memoffset", "paste 1.0.6", - "rand 0.8.4", + "rand 0.8.5", "rustix", "thiserror", "wasmtime-asm-macros", @@ -12151,7 +12142,7 @@ dependencies = [ "log", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.8.4", + "rand 0.8.5", "static_assertions", ] diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index 1d613511eacec..ef8465e670bf6 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -29,10 +29,12 @@ log = { version = "0.4.0", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } +rand = { version = "0.8.5", features = ["small_rng"] } [features] runtime-benchmarks = [] try-runtime = [ "frame-support/try-runtime" ] +fuzz-test = [] default = ["std"] std = [ "codec/std", diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index a8c4e50daa0b5..9e872955669f8 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1453,7 +1453,7 @@ pub mod pallet { PartialUnbondNotAllowedPermissionlessly, } - #[derive(Encode, Decode, PartialEq, TypeInfo, frame_support::PalletError)] + #[derive(Encode, Decode, PartialEq, TypeInfo, frame_support::PalletError, RuntimeDebug)] pub enum DefensiveError { /// There isn't enough space in the unbond pool. NotEnoughSpaceInUnbondPool, @@ -1758,8 +1758,8 @@ pub mod pallet { let bonded_pool = BondedPool::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::PoolNotFound.into())?; - let mut sub_pools = SubPoolsStorage::::get(member.pool_id) - .defensive_ok_or::>(DefensiveError::SubPoolsNotFound.into())?; + let mut sub_pools = + SubPoolsStorage::::get(member.pool_id).ok_or(Error::::SubPoolsNotFound)?; bonded_pool.ok_to_withdraw_unbonded_with(&caller, &member_account)?; @@ -1887,10 +1887,10 @@ pub mod pallet { ); ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); - let pool_id = LastPoolId::::mutate(|id| { - *id += 1; - *id - }); + let pool_id = LastPoolId::::try_mutate::<_, Error, _>(|id| { + *id = id.checked_add(1).ok_or(Error::::OverflowRisk)?; + Ok(*id) + })?; let mut bonded_pool = BondedPool::::new( pool_id, PoolRoles { @@ -2416,16 +2416,46 @@ impl Pallet { for id in reward_pools { let account = Self::create_reward_account(id); - assert!(T::Currency::free_balance(&account) >= T::Currency::minimum_balance()); + assert!( + T::Currency::free_balance(&account) >= T::Currency::minimum_balance(), + "reward pool of {id}: {:?} (ed = {:?})", + T::Currency::free_balance(&account), + T::Currency::minimum_balance() + ); } let mut pools_members = BTreeMap::::new(); + let mut pools_members_pending_rewards = BTreeMap::>::new(); let mut all_members = 0u32; PoolMembers::::iter().for_each(|(_, d)| { - assert!(BondedPools::::contains_key(d.pool_id)); + let bonded_pool = BondedPools::::get(d.pool_id).unwrap(); assert!(!d.total_points().is_zero(), "no member should have zero points: {:?}", d); *pools_members.entry(d.pool_id).or_default() += 1; all_members += 1; + + let reward_pool = RewardPools::::get(d.pool_id).unwrap(); + if !bonded_pool.points.is_zero() { + let current_rc = + reward_pool.current_reward_counter(d.pool_id, bonded_pool.points).unwrap(); + *pools_members_pending_rewards.entry(d.pool_id).or_default() += + d.pending_rewards(current_rc).unwrap(); + } // else this pool has been heavily slashed and cannot have any rewards anymore. + }); + + RewardPools::::iter_keys().for_each(|id| { + // the sum of the pending rewards must be less than the leftover balance. Since the + // reward math rounds down, we might accumulate some dust here. + log!( + trace, + "pool {:?}, sum pending rewards = {:?}, remaining balance = {:?}", + id, + pools_members_pending_rewards.get(&id), + RewardPool::::current_balance(id) + ); + assert!( + RewardPool::::current_balance(id) >= + pools_members_pending_rewards.get(&id).map(|x| *x).unwrap_or_default() + ) }); BondedPools::::iter().for_each(|(id, inner)| { @@ -2470,6 +2500,7 @@ impl Pallet { sum_unbonding_balance ); } + Ok(()) } diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 4428c28558f18..f1574b3684aab 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -4,6 +4,7 @@ use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; use sp_runtime::FixedU128; +pub type BlockNumber = u64; pub type AccountId = u128; pub type Balance = u128; pub type RewardCounter = FixedU128; @@ -129,7 +130,7 @@ impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Origin = Origin; type Index = u64; - type BlockNumber = u64; + type BlockNumber = BlockNumber; type Call = Call; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 20ba2b76fe31a..362fece3f3daf 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -1774,6 +1774,54 @@ mod claim_payout { }) } + #[test] + fn bond_extra_pending_rewards_works() { + ExtBuilder::default().add_members(vec![(20, 20)]).build_and_execute(|| { + MaxPoolMembers::::set(None); + MaxPoolMembersPerPool::::set(None); + + // pool receives some rewards. + Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); + System::reset_events(); + + // 10 cashes it out, and bonds it. + { + assert_ok!(Pools::claim_payout(Origin::signed(10))); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + // there is 30 points and 30 reward points in the system RC is 1. + assert_eq!(member.last_recorded_reward_counter, 1.into()); + assert_eq!(reward_pool.total_rewards_claimed, 10); + // these two are not updated -- only updated when the points change. + assert_eq!(reward_pool.last_recorded_total_payouts, 0); + assert_eq!(reward_pool.last_recorded_reward_counter, 0.into()); + + assert_eq!( + pool_events_since_last_call(), + vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10 }] + ); + } + + // 20 re-bonds it. + { + assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::Rewards)); + let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); + assert_eq!(member.last_recorded_reward_counter, 1.into()); + assert_eq!(reward_pool.total_rewards_claimed, 30); + // since points change, these two are updated. + assert_eq!(reward_pool.last_recorded_total_payouts, 30); + assert_eq!(reward_pool.last_recorded_reward_counter, 1.into()); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 20, pool_id: 1, payout: 20 }, + Event::Bonded { member: 20, pool_id: 1, bonded: 20, joined: false } + ] + ); + } + }) + } + #[test] fn unbond_updates_recorded_data() { ExtBuilder::default() @@ -3733,6 +3781,170 @@ mod withdraw_unbonded { }) } + #[test] + fn out_of_sync_unbonding_chunks() { + // the unbonding_eras in pool member are always fixed to the era at which they are unlocked, + // but the actual unbonding pools get pruned and might get combined in the no_era pool. + // Pools are only merged when one unbonds, so we unbond a little bit on every era to + // simulate this. + ExtBuilder::default() + .add_members(vec![(20, 100), (30, 100)]) + .build_and_execute(|| { + System::reset_events(); + + // when + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); + + // then member-local unbonding is pretty much in sync with the global pools. + assert_eq!( + PoolMembers::::get(20).unwrap().unbonding_eras, + member_unbonding_eras!(3 => 5) + ); + assert_eq!( + PoolMembers::::get(30).unwrap().unbonding_eras, + member_unbonding_eras!(3 => 5) + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + no_era: Default::default(), + with_era: unbonding_pools_with_era! { + 3 => UnbondPool { points: 10, balance: 10 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 3 }, + Event::Unbonded { member: 30, pool_id: 1, points: 5, balance: 5, era: 3 }, + ] + ); + + // when + CurrentEra::set(1); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + + // then still member-local unbonding is pretty much in sync with the global pools. + assert_eq!( + PoolMembers::::get(20).unwrap().unbonding_eras, + member_unbonding_eras!(3 => 5, 4 => 5) + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + no_era: Default::default(), + with_era: unbonding_pools_with_era! { + 3 => UnbondPool { points: 10, balance: 10 }, + 4 => UnbondPool { points: 5, balance: 5 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 4 }] + ); + + // when + CurrentEra::set(2); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + + // then still member-local unbonding is pretty much in sync with the global pools. + assert_eq!( + PoolMembers::::get(20).unwrap().unbonding_eras, + member_unbonding_eras!(3 => 5, 4 => 5, 5 => 5) + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + no_era: Default::default(), + with_era: unbonding_pools_with_era! { + 3 => UnbondPool { points: 10, balance: 10 }, + 4 => UnbondPool { points: 5, balance: 5 }, + 5 => UnbondPool { points: 5, balance: 5 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 5 }] + ); + + // when + CurrentEra::set(5); + assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + + // then + assert_eq!( + PoolMembers::::get(20).unwrap().unbonding_eras, + member_unbonding_eras!(3 => 5, 4 => 5, 5 => 5, 8 => 5) + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + // era 3 is merged into no_era. + no_era: UnbondPool { points: 10, balance: 10 }, + with_era: unbonding_pools_with_era! { + 4 => UnbondPool { points: 5, balance: 5 }, + 5 => UnbondPool { points: 5, balance: 5 }, + 8 => UnbondPool { points: 5, balance: 5 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![Event::Unbonded { member: 20, pool_id: 1, points: 5, balance: 5, era: 8 }] + ); + + // now we start withdrawing unlocked bonds. + + // when + assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + // then + assert_eq!( + PoolMembers::::get(20).unwrap().unbonding_eras, + member_unbonding_eras!(8 => 5) + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + // era 3 is merged into no_era. + no_era: UnbondPool { points: 5, balance: 5 }, + with_era: unbonding_pools_with_era! { + 8 => UnbondPool { points: 5, balance: 5 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![Event::Withdrawn { member: 20, pool_id: 1, points: 15, balance: 15 }] + ); + + // when + assert_ok!(Pools::withdraw_unbonded(Origin::signed(30), 30, 0)); + // then + assert_eq!( + PoolMembers::::get(30).unwrap().unbonding_eras, + member_unbonding_eras!() + ); + assert_eq!( + SubPoolsStorage::::get(1).unwrap(), + SubPools { + // era 3 is merged into no_era. + no_era: Default::default(), + with_era: unbonding_pools_with_era! { + 8 => UnbondPool { points: 5, balance: 5 } + } + } + ); + assert_eq!( + pool_events_since_last_call(), + vec![Event::Withdrawn { member: 30, pool_id: 1, points: 5, balance: 5 }] + ); + }) + } + #[test] fn full_multi_step_withdrawing_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { @@ -4768,3 +4980,328 @@ mod reward_counter_precision { }); } } + +// NOTE: run this with debug_assertions, but in release mode. +#[cfg(feature = "fuzz-test")] +mod fuzz_test { + use super::*; + use crate::pallet::{Call as PoolsCall, Event as PoolsEvents}; + use frame_support::traits::UnfilteredDispatchable; + use rand::{seq::SliceRandom, thread_rng, Rng}; + use sp_runtime::{assert_eq_error_rate, Perquintill}; + + const ERA: BlockNumber = 1000; + const MAX_ED_MULTIPLE: Balance = 10_000; + const MIN_ED_MULTIPLE: Balance = 10; + + // not quite elegant, just to make it available in random_signed_origin. + const REWARD_AGENT_ACCOUNT: AccountId = 42; + + /// Grab random accounts, either known ones, or new ones. + fn random_signed_origin(rng: &mut R) -> (Origin, AccountId) { + let count = PoolMembers::::count(); + if rng.gen::() && count > 0 { + // take an existing account. + let skip = rng.gen_range(0..count as usize); + + // this is tricky: the account might be our reward agent, which we never want to be + // randomly chosen here. Try another one, or, if it is only our agent, return a random + // one nonetheless. + let candidate = PoolMembers::::iter_keys().skip(skip).take(1).next().unwrap(); + let acc = + if candidate == REWARD_AGENT_ACCOUNT { rng.gen::() } else { candidate }; + + (Origin::signed(acc), acc) + } else { + // create a new account + let acc = rng.gen::(); + (Origin::signed(acc), acc) + } + } + + fn random_ed_multiple(rng: &mut R) -> Balance { + let multiple = rng.gen_range(MIN_ED_MULTIPLE..MAX_ED_MULTIPLE); + ExistentialDeposit::get() * multiple + } + + fn fund_account(rng: &mut R, account: &AccountId) { + let target_amount = random_ed_multiple(rng); + if let Some(top_up) = target_amount.checked_sub(Balances::free_balance(account)) { + let _ = Balances::deposit_creating(account, top_up); + } + assert!(Balances::free_balance(account) >= target_amount); + } + + fn random_existing_pool(mut rng: &mut R) -> Option { + BondedPools::::iter_keys().collect::>().choose(&mut rng).map(|x| *x) + } + + fn random_call(mut rng: &mut R) -> (crate::pallet::Call, Origin) { + let op = rng.gen::(); + let mut op_count = + as frame_support::dispatch::GetCallName>::get_call_names() + .len(); + // Exclude set_state, set_metadata, set_configs, update_roles and chill. + op_count -= 5; + + match op % op_count { + 0 => { + // join + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let (origin, who) = random_signed_origin(&mut rng); + fund_account(&mut rng, &who); + let amount = random_ed_multiple(&mut rng); + (PoolsCall::::join { amount, pool_id }, origin) + }, + 1 => { + // bond_extra + let (origin, who) = random_signed_origin(&mut rng); + let extra = if rng.gen::() { + BondExtra::Rewards + } else { + fund_account(&mut rng, &who); + let amount = random_ed_multiple(&mut rng); + BondExtra::FreeBalance(amount) + }; + (PoolsCall::::bond_extra { extra }, origin) + }, + 2 => { + // claim_payout + let (origin, _) = random_signed_origin(&mut rng); + (PoolsCall::::claim_payout {}, origin) + }, + 3 => { + // unbond + let (origin, who) = random_signed_origin(&mut rng); + let amount = random_ed_multiple(&mut rng); + (PoolsCall::::unbond { member_account: who, unbonding_points: amount }, origin) + }, + 4 => { + // pool_withdraw_unbonded + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let (origin, _) = random_signed_origin(&mut rng); + (PoolsCall::::pool_withdraw_unbonded { pool_id, num_slashing_spans: 0 }, origin) + }, + 5 => { + // withdraw_unbonded + let (origin, who) = random_signed_origin(&mut rng); + ( + PoolsCall::::withdraw_unbonded { + member_account: who, + num_slashing_spans: 0, + }, + origin, + ) + }, + 6 => { + // create + let (origin, who) = random_signed_origin(&mut rng); + let amount = random_ed_multiple(&mut rng); + fund_account(&mut rng, &who); + let root = who.clone(); + let state_toggler = who.clone(); + let nominator = who.clone(); + (PoolsCall::::create { amount, root, state_toggler, nominator }, origin) + }, + 7 => { + // nominate + let (origin, _) = random_signed_origin(&mut rng); + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let validators = Default::default(); + (PoolsCall::::nominate { pool_id, validators }, origin) + }, + _ => unreachable!(), + } + } + + #[derive(Default)] + struct RewardAgent { + who: AccountId, + pool_id: Option, + expected_reward: Balance, + } + + // TODO: inject some slashes into the game. + impl RewardAgent { + fn new(who: AccountId) -> Self { + Self { who, ..Default::default() } + } + + fn join(&mut self) { + if self.pool_id.is_some() { + return + } + let pool_id = LastPoolId::::get(); + let amount = 10 * ExistentialDeposit::get(); + let origin = Origin::signed(self.who); + let _ = Balances::deposit_creating(&self.who, 10 * amount); + self.pool_id = Some(pool_id); + log::info!(target: "reward-agent", "🤖 reward agent joining in {} with {}", pool_id, amount); + assert_ok!(PoolsCall::join:: { amount, pool_id }.dispatch_bypass_filter(origin)); + } + + fn claim_payout(&mut self) { + // 10 era later, we claim our payout. We expect our income to be roughly what we + // calculated. + if !PoolMembers::::contains_key(&self.who) { + log!(warn, "reward agent is not in the pool yet, cannot claim"); + return + } + let pre = Balances::free_balance(&42); + let origin = Origin::signed(42); + assert_ok!(PoolsCall::::claim_payout {}.dispatch_bypass_filter(origin)); + let post = Balances::free_balance(&42); + + let income = post - pre; + log::info!( + target: "reward-agent", "🤖 CLAIM: actual: {}, expected: {}", + income, + self.expected_reward, + ); + assert_eq_error_rate!(income, self.expected_reward, 10); + self.expected_reward = 0; + } + } + + #[test] + fn fuzz_test() { + let mut reward_agent = RewardAgent::new(42); + sp_tracing::try_init_simple(); + // NOTE: use this to get predictable (non)randomness: + // use::{rngs::SmallRng, SeedableRng}; + // let mut rng = SmallRng::from_seed([0u8; 32]); + let mut rng = thread_rng(); + let mut ext = sp_io::TestExternalities::new_empty(); + // NOTE: sadly events don't fulfill the requirements of hashmap or btreemap. + let mut events_histogram = Vec::<(PoolsEvents, u32)>::default(); + let mut iteration = 0 as BlockNumber; + let mut ok = 0; + let mut err = 0; + + ext.execute_with(|| { + MaxPoolMembers::::set(Some(10_000)); + MaxPoolMembersPerPool::::set(Some(1000)); + MaxPools::::set(Some(1_000)); + + MinCreateBond::::set(10 * ExistentialDeposit::get()); + MinJoinBond::::set(5 * ExistentialDeposit::get()); + System::set_block_number(1); + }); + + ExistentialDeposit::set(10u128.pow(12u32)); + BondingDuration::set(8); + + loop { + ext.execute_with(|| { + iteration += 1; + let (call, origin) = random_call(&mut rng); + let outcome = call.clone().dispatch_bypass_filter(origin.clone()); + + match outcome { + Ok(_) => ok += 1, + Err(_) => err += 1, + }; + + log!( + debug, + "iteration {}, call {:?}, origin {:?}, outcome: {:?}, so far {} ok {} err", + iteration, + call, + origin, + outcome, + ok, + err, + ); + + // possibly join the reward_agent + if iteration > ERA / 2 && BondedPools::::count() > 0 { + reward_agent.join(); + } + // and possibly roughly every 4 era, trigger payout for the agent. Doing this more + // frequent is also harmless. + if rng.gen_range(0..(4 * ERA)) == 0 { + reward_agent.claim_payout(); + } + + // execute sanity checks at a fixed interval, possibly on every block. + if iteration % + (std::env::var("SANITY_CHECK_INTERVAL") + .ok() + .and_then(|x| x.parse::().ok())) + .unwrap_or(1) == 0 + { + log!(info, "running sanity checks at {}", iteration); + Pools::do_try_state(u8::MAX).unwrap(); + } + + // collect and reset events. + System::events() + .into_iter() + .map(|r| r.event) + .filter_map( + |e| if let mock::Event::Pools(inner) = e { Some(inner) } else { None }, + ) + .for_each(|e| { + if let Some((_, c)) = events_histogram + .iter_mut() + .find(|(x, _)| std::mem::discriminant(x) == std::mem::discriminant(&e)) + { + *c += 1; + } else { + events_histogram.push((e, 1)) + } + }); + System::reset_events(); + + // trigger an era change, and check the status of the reward agent. + if iteration % ERA == 0 { + CurrentEra::mutate(|c| *c += 1); + BondedPools::::iter().for_each(|(id, _)| { + let amount = random_ed_multiple(&mut rng); + let _ = + Balances::deposit_creating(&Pools::create_reward_account(id), amount); + // if we just paid out the reward agent, let's calculate how much we expect + // our reward agent to have earned. + if reward_agent.pool_id.map_or(false, |mid| mid == id) { + let all_points = BondedPool::::get(id).map(|p| p.points).unwrap(); + let member_points = + PoolMembers::::get(reward_agent.who).map(|m| m.points).unwrap(); + let agent_share = Perquintill::from_rational(member_points, all_points); + log::info!( + target: "reward-agent", + "🤖 REWARD = amount = {:?}, ratio: {:?}, share {:?}", + amount, + agent_share, + agent_share * amount, + ); + reward_agent.expected_reward += agent_share * amount; + } + }); + + log!( + info, + "iteration {}, {} pools, {} members, {} ok {} err, events = {:?}", + iteration, + BondedPools::::count(), + PoolMembers::::count(), + ok, + err, + events_histogram + .iter() + .map(|(x, c)| ( + format!("{:?}", x) + .split(" ") + .map(|x| x.to_string()) + .collect::>() + .first() + .cloned() + .unwrap(), + c, + )) + .collect::>(), + ); + } + }); + } + } +} diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 0f781d0bbd4cf..59d5ec067baed 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -151,10 +151,10 @@ pub trait DefensiveOption { /// Defensively transform this option to a result, mapping `None` to the return value of an /// error closure. - fn defensive_ok_or_else E>(self, err: F) -> Result; + fn defensive_ok_or_else E>(self, err: F) -> Result; /// Defensively transform this option to a result, mapping `None` to a default value. - fn defensive_ok_or(self, err: E) -> Result; + fn defensive_ok_or(self, err: E) -> Result; /// Exactly the same as `map`, but it prints the appropriate warnings if the value being mapped /// is `None`. @@ -318,16 +318,17 @@ impl DefensiveOption for Option { ) } - fn defensive_ok_or_else E>(self, err: F) -> Result { + fn defensive_ok_or_else E>(self, err: F) -> Result { self.ok_or_else(|| { - defensive!(); - err() + let err_value = err(); + defensive!(err_value); + err_value }) } - fn defensive_ok_or(self, err: E) -> Result { + fn defensive_ok_or(self, err: E) -> Result { self.ok_or_else(|| { - defensive!(); + defensive!(err); err }) } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 7006a13e415e4..9c2b691f64faa 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -36,6 +36,8 @@ pub use sp_std; #[doc(hidden)] pub use paste; +#[doc(hidden)] +pub use sp_arithmetic::traits::Saturating; #[doc(hidden)] pub use sp_application_crypto as app_crypto; @@ -825,7 +827,24 @@ pub fn verify_encoded_lazy( macro_rules! assert_eq_error_rate { ($x:expr, $y:expr, $error:expr $(,)?) => { assert!( - ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), + ($x >= $crate::Saturating::saturating_sub($y, $error)) && + ($x <= $crate::Saturating::saturating_add($y, $error)), + "{:?} != {:?} (with error rate {:?})", + $x, + $y, + $error, + ); + }; +} + +/// Same as [`assert_eq_error_rate`], but intended to be used with floating point number, or +/// generally those who do not have over/underflow potentials. +#[macro_export] +#[cfg(feature = "std")] +macro_rules! assert_eq_error_rate_float { + ($x:expr, $y:expr, $error:expr $(,)?) => { + assert!( + ($x >= $y - $error) && ($x <= $y + $error), "{:?} != {:?} (with error rate {:?})", $x, $y, diff --git a/utils/frame/benchmarking-cli/src/machine/hardware.rs b/utils/frame/benchmarking-cli/src/machine/hardware.rs index 5c62660cc7cf4..97960de99c4bf 100644 --- a/utils/frame/benchmarking-cli/src/machine/hardware.rs +++ b/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -161,7 +161,7 @@ impl fmt::Display for Throughput { #[cfg(test)] mod tests { use super::*; - use sp_runtime::assert_eq_error_rate; + use sp_runtime::assert_eq_error_rate_float; /// `SUBSTRATE_REFERENCE_HARDWARE` can be en- and decoded. #[test] @@ -179,9 +179,9 @@ mod tests { const EPS: f64 = 0.1; let gib = Throughput::GiBs(14.324); - assert_eq_error_rate!(14.324, gib.to_gibs(), EPS); - assert_eq_error_rate!(14667.776, gib.to_mibs(), EPS); - assert_eq_error_rate!(14667.776 * 1024.0, gib.to_kibs(), EPS); + assert_eq_error_rate_float!(14.324, gib.to_gibs(), EPS); + assert_eq_error_rate_float!(14667.776, gib.to_mibs(), EPS); + assert_eq_error_rate_float!(14667.776 * 1024.0, gib.to_kibs(), EPS); assert_eq!("14.32 GiB/s", gib.to_string()); assert_eq!("14.32 GiB/s", gib.normalize().to_string()); From c997a62ee6e4efcfa0d7850700f315f4af39ffb4 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Mon, 12 Sep 2022 18:37:27 +0200 Subject: [PATCH 118/348] Add `ConstFeeMultiplier` to the transaction payment pallet (#12222) * fix: FeeMultiplierUpdate * fix: cargo fmt * fix: rustdoc * Revert "fix: rustdoc" This reverts commit 96b6ad80a4cd4d856cf5a830889858c4dd4c385b. * Revert "fix: cargo fmt" This reverts commit 13016527bdbc53d9484642d6b52430550c0efc55. * Revert "fix: FeeMultiplierUpdate" This reverts commit 2cbddd0b85e0293d0eeda859807ddf70cee29067. * feat: add ConstFeeMultiplier * fix: use cConstFeeMultiplier in the template node --- bin/node-template/runtime/src/lib.rs | 12 +++++++++--- frame/transaction-payment/src/lib.rs | 27 ++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index e28a3bb2adb9d..e9c2800ffa3f5 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -14,7 +14,9 @@ use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + traits::{ + AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -38,7 +40,7 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; -use pallet_transaction_payment::CurrencyAdapter; +use pallet_transaction_payment::{ConstFeeMultiplier, CurrencyAdapter, Multiplier}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; @@ -251,13 +253,17 @@ impl pallet_balances::Config for Runtime { type WeightInfo = pallet_balances::weights::SubstrateWeight; } +parameter_types! { + pub FeeMultiplier: Multiplier = Multiplier::one(); +} + impl pallet_transaction_payment::Config for Runtime { type Event = Event; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; type LengthToFee = IdentityFee; - type FeeMultiplierUpdate = (); + type FeeMultiplierUpdate = ConstFeeMultiplier; } impl pallet_sudo::Config for Runtime { diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index cbdc9dcb1e846..b3c53cc048bad 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -226,6 +226,30 @@ where } } +/// A struct to make the fee multiplier a constant +pub struct ConstFeeMultiplier>(sp_std::marker::PhantomData); + +impl> MultiplierUpdate for ConstFeeMultiplier { + fn min() -> Multiplier { + M::get() + } + fn target() -> Perquintill { + Default::default() + } + fn variability() -> Multiplier { + Default::default() + } +} + +impl Convert for ConstFeeMultiplier +where + M: Get, +{ + fn convert(_previous: Multiplier) -> Multiplier { + Self::min() + } +} + /// Storage releases of the pallet. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] enum Releases { @@ -371,7 +395,8 @@ pub mod pallet { // add 1 percent; let addition = target / 100; if addition == Weight::zero() { - // this is most likely because in a test setup we set everything to (). + // this is most likely because in a test setup we set everything to () + // or to `ConstFeeMultiplier`. return } From 7cb78ff1b0df2ac947e8198890b73f5b7f741e0b Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 12 Sep 2022 20:48:10 +0300 Subject: [PATCH 119/348] Don't run `cargo-check-benches` for `master` based downstream staging pipelines (#12233) --- .gitlab-ci.yml | 12 ++++++++++++ scripts/ci/gitlab/pipeline/test.yml | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 765112bfa9e62..a639505b3bb03 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -150,6 +150,18 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs +# handle the specific case where benches could store incorrect bench data because of the downstream staging runs +# exclude cargo-check-benches from such runs +.test-refs-no-trigger-prs-only-check-benches: + rules: + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_IMAGE =~ /staging$/ + when: never + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" + - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + .test-refs-wasmer-sandbox: rules: - if: $CI_PIPELINE_SOURCE == "web" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 50916a37fcf55..7be445b0bca98 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -59,7 +59,7 @@ cargo-check-benches: CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env - - .test-refs + - .test-refs-no-trigger-prs-only-check-benches - .collect-artifacts - .pipeline-stopper-artifacts before_script: From dd1f1b5f291aa2325eddf5f26037b84cfbbe0957 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 12 Sep 2022 19:10:32 +0100 Subject: [PATCH 120/348] Expose some of the staking miner types over metadata (#12245) * Expose some of the staking miner types over metadata * Update frame/election-provider-multi-phase/src/lib.rs Co-authored-by: Emily Ostbo <47109040+EmilyOstbo@users.noreply.github.com> Co-authored-by: Emily Ostbo <47109040+EmilyOstbo@users.noreply.github.com> --- .../election-provider-multi-phase/src/lib.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bfe5cb2e96728..a9ce35499a329 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -708,6 +708,25 @@ pub mod pallet { type WeightInfo: WeightInfo; } + // Expose miner configs over the metadata such that they can be re-implemented. + #[pallet::extra_constants] + impl Pallet { + #[pallet::constant_name(MinerMaxLength)] + fn max_length() -> u32 { + ::MaxLength::get() + } + + #[pallet::constant_name(MinerMaxWeight)] + fn max_weight() -> Weight { + ::MaxWeight::get() + } + + #[pallet::constant_name(MinerMaxVotesPerVoter)] + fn max_votes_per_voter() -> u32 { + ::MaxVotesPerVoter::get() + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_initialize(now: T::BlockNumber) -> Weight { From f1c60e529a6c1d158a0a37fe0a8daf887b2b9f74 Mon Sep 17 00:00:00 2001 From: yjh Date: Tue, 13 Sep 2022 05:34:43 +0800 Subject: [PATCH 121/348] extract some grandpa types to Primitives crate (#12208) * extract some grandpa types to primitives * fmt * fmt --- client/finality-grandpa/src/aux_schema.rs | 10 +-- .../src/communication/gossip.rs | 16 ++--- .../finality-grandpa/src/communication/mod.rs | 27 ++++---- client/finality-grandpa/src/environment.rs | 62 +++++++++++-------- client/finality-grandpa/src/finality_proof.rs | 9 ++- client/finality-grandpa/src/justification.rs | 57 ++++++++++++----- client/finality-grandpa/src/lib.rs | 53 +++------------- client/finality-grandpa/src/tests.rs | 2 +- client/finality-grandpa/src/until_imported.rs | 21 ++++--- frame/transaction-payment/rpc/src/lib.rs | 3 +- primitives/finality-grandpa/src/lib.rs | 62 ++++++++++++++++++- 11 files changed, 197 insertions(+), 125 deletions(-) diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 25ed4a3f490e0..235453ea35df1 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -191,7 +191,7 @@ where "state is for completed round; completed rounds must have a prevote ghost; qed.", ); - let mut current_rounds = CurrentRounds::new(); + let mut current_rounds = CurrentRounds::::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); let set_state = VoterSetState::Live { @@ -255,7 +255,7 @@ where let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - let mut current_rounds = CurrentRounds::new(); + let mut current_rounds = CurrentRounds::::new(); current_rounds.insert(last_round_number + 1, HasVoted::No); VoterSetState::Live { @@ -500,7 +500,7 @@ mod test { use super::*; use sp_core::{crypto::UncheckedFrom, H256}; use sp_finality_grandpa::AuthorityId; - use substrate_test_runtime_client; + use substrate_test_runtime_client::{self, runtime::Block}; fn dummy_id() -> AuthorityId { AuthorityId::unchecked_from([1; 32]) @@ -574,7 +574,7 @@ mod test { .unwrap(), ); - let mut current_rounds = CurrentRounds::new(); + let mut current_rounds = CurrentRounds::::new(); current_rounds.insert(round_number + 1, HasVoted::No); assert_eq!( @@ -667,7 +667,7 @@ mod test { .unwrap(), ); - let mut current_rounds = CurrentRounds::new(); + let mut current_rounds = CurrentRounds::::new(); current_rounds.insert(round_number + 1, HasVoted::No); assert_eq!( diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 5f94a4d1b65be..95efedf7b23b7 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -350,7 +350,7 @@ pub(super) struct VoteMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The message itself. - pub(super) message: SignedMessage, + pub(super) message: SignedMessage, } /// Network level commit message with topic information. @@ -361,7 +361,7 @@ pub(super) struct FullCommitMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The compact commit message. - pub(super) message: CompactCommit, + pub(super) message: CompactCommit, } /// V1 neighbor packet. Neighbor packets are sent from nodes to their peers @@ -406,7 +406,7 @@ pub(super) struct FullCatchUpMessage { /// The voter set ID this message is from. pub(super) set_id: SetId, /// The compact commit message. - pub(super) message: CatchUp, + pub(super) message: CatchUp, } /// Misbehavior that peers can perform. @@ -1071,7 +1071,7 @@ impl Inner { let (base_hash, base_number) = last_completed_round.base; - let catch_up = CatchUp:: { + let catch_up = CatchUp:: { round_number: last_completed_round.number, prevotes, precommits, @@ -1651,8 +1651,8 @@ mod tests { use crate::communication; use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; - use sc_network_test::Block; use sp_core::{crypto::UncheckedFrom, H256}; + use substrate_test_runtime_client::runtime::{Block, Header}; // some random config (not really needed) fn config() -> crate::Config { @@ -1856,7 +1856,7 @@ mod tests { &VoteMessage { round: Round(1), set_id: SetId(set_id), - message: SignedMessage:: { + message: SignedMessage::
{ message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { target_hash: Default::default(), target_number: 10, @@ -1872,7 +1872,7 @@ mod tests { &VoteMessage { round: Round(1), set_id: SetId(set_id), - message: SignedMessage:: { + message: SignedMessage::
{ message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { target_hash: Default::default(), target_number: 10, @@ -1943,7 +1943,7 @@ mod tests { votes: Default::default(), }); - let mut current_rounds = environment::CurrentRounds::new(); + let mut current_rounds = environment::CurrentRounds::::new(); current_rounds.insert(3, environment::HasVoted::No); let set_state = diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 04d7ceaa05c8c..d211c10ae62be 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -312,8 +312,8 @@ impl> NetworkBridge { round: Round, set_id: SetId, voters: Arc>, - has_voted: HasVoted, - ) -> (impl Stream> + Unpin, OutgoingMessages) { + has_voted: HasVoted, + ) -> (impl Stream> + Unpin, OutgoingMessages) { self.note_round(round, set_id, &voters); let keystore = keystore.and_then(|ks| { @@ -675,15 +675,15 @@ pub(crate) struct OutgoingMessages { round: RoundNumber, set_id: SetIdNumber, keystore: Option, - sender: mpsc::Sender>, + sender: mpsc::Sender>, network: Arc>>, - has_voted: HasVoted, + has_voted: HasVoted, telemetry: Option, } impl Unpin for OutgoingMessages {} -impl Sink> for OutgoingMessages { +impl Sink> for OutgoingMessages { type Error = Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { @@ -694,7 +694,10 @@ impl Sink> for OutgoingMessages { }) } - fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { + fn start_send( + mut self: Pin<&mut Self>, + mut msg: Message, + ) -> Result<(), Self::Error> { // if we've voted on this round previously under the same key, send that vote instead match &mut msg { finality_grandpa::Message::PrimaryPropose(ref mut vote) => { @@ -784,7 +787,7 @@ impl Sink> for OutgoingMessages { // checks a compact commit. returns the cost associated with processing it if // the commit was bad. fn check_compact_commit( - msg: &CompactCommit, + msg: &CompactCommit, voters: &VoterSet, round: Round, set_id: SetId, @@ -852,7 +855,7 @@ fn check_compact_commit( // checks a catch up. returns the cost associated with processing it if // the catch up was bad. fn check_catch_up( - msg: &CatchUp, + msg: &CatchUp, voters: &VoterSet, set_id: SetId, telemetry: Option, @@ -902,7 +905,7 @@ fn check_catch_up( ) -> Result where B: BlockT, - I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, { use crate::communication::gossip::Misbehavior; @@ -996,7 +999,7 @@ impl CommitsOut { } } -impl Sink<(RoundNumber, Commit)> for CommitsOut { +impl Sink<(RoundNumber, Commit)> for CommitsOut { type Error = Error; fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { @@ -1005,7 +1008,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { fn start_send( self: Pin<&mut Self>, - input: (RoundNumber, Commit), + input: (RoundNumber, Commit), ) -> Result<(), Self::Error> { if !self.is_voter { return Ok(()) @@ -1027,7 +1030,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { .map(|signed| (signed.precommit, (signed.signature, signed.id))) .unzip(); - let compact_commit = CompactCommit:: { + let compact_commit = CompactCommit:: { target_hash: commit.target_hash, target_number: commit.target_number, precommits, diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 63c8697053842..3d708a95f41cb 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -81,7 +81,7 @@ pub struct CompletedRound { /// The target block base used for voting in the round. pub base: (Block::Hash, NumberFor), /// All the votes observed in the round. - pub votes: Vec>, + pub votes: Vec>, } // Data about last completed rounds within a single voter set. Stores @@ -170,7 +170,7 @@ impl CompletedRounds { /// A map with voter status information for currently live rounds, /// which votes have we cast and what are they. -pub type CurrentRounds = BTreeMap>; +pub type CurrentRounds = BTreeMap::Header>>; /// The state of the current voter set, whether it is currently active or not /// and information related to the previously completed rounds. Current round @@ -214,7 +214,7 @@ impl VoterSetState { authority_set, ); - let mut current_rounds = CurrentRounds::new(); + let mut current_rounds = CurrentRounds::::new(); current_rounds.insert(1, HasVoted::No); VoterSetState::Live { completed_rounds, current_rounds } @@ -258,27 +258,27 @@ impl VoterSetState { /// Whether we've voted already during a prior run of the program. #[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub enum HasVoted { +pub enum HasVoted { /// Has not voted already in this round. No, /// Has voted in this round. - Yes(AuthorityId, Vote), + Yes(AuthorityId, Vote
), } /// The votes cast by this voter already during a prior run of the program. #[derive(Debug, Clone, Decode, Encode, PartialEq)] -pub enum Vote { +pub enum Vote { /// Has cast a proposal. - Propose(PrimaryPropose), + Propose(PrimaryPropose
), /// Has cast a prevote. - Prevote(Option>, Prevote), + Prevote(Option>, Prevote
), /// Has cast a precommit (implies prevote.) - Precommit(Option>, Prevote, Precommit), + Precommit(Option>, Prevote
, Precommit
), } -impl HasVoted { +impl HasVoted
{ /// Returns the proposal we should vote with (if any.) - pub fn propose(&self) -> Option<&PrimaryPropose> { + pub fn propose(&self) -> Option<&PrimaryPropose
> { match self { HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), HasVoted::Yes(_, Vote::Prevote(propose, _)) | @@ -288,7 +288,7 @@ impl HasVoted { } /// Returns the prevote we should vote with (if any.) - pub fn prevote(&self) -> Option<&Prevote> { + pub fn prevote(&self) -> Option<&Prevote
> { match self { HasVoted::Yes(_, Vote::Prevote(_, prevote)) | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), @@ -297,7 +297,7 @@ impl HasVoted { } /// Returns the precommit we should vote with (if any.) - pub fn precommit(&self) -> Option<&Precommit> { + pub fn precommit(&self) -> Option<&Precommit
> { match self { HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), _ => None, @@ -368,7 +368,7 @@ impl SharedVoterSetState { } /// Return vote status information for the current round. - pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { + pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { VoterSetState::Live { current_rounds, .. } => current_rounds .get(&round) @@ -771,7 +771,7 @@ where fn proposed( &self, round: RoundNumber, - propose: PrimaryPropose, + propose: PrimaryPropose, ) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, @@ -811,13 +811,17 @@ where Ok(()) } - fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { + fn prevoted( + &self, + round: RoundNumber, + prevote: Prevote, + ) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; - let report_prevote_metrics = |prevote: &Prevote| { + let report_prevote_metrics = |prevote: &Prevote| { telemetry!( self.telemetry; CONSENSUS_DEBUG; @@ -873,14 +877,14 @@ where fn precommitted( &self, round: RoundNumber, - precommit: Precommit, + precommit: Precommit, ) -> Result<(), Self::Error> { let local_id = match self.voter_set_state.voting_on(round) { Some(id) => id, None => return Ok(()), }; - let report_precommit_metrics = |precommit: &Precommit| { + let report_precommit_metrics = |precommit: &Precommit| { telemetry!( self.telemetry; CONSENSUS_DEBUG; @@ -1065,7 +1069,7 @@ where hash: Block::Hash, number: NumberFor, round: RoundNumber, - commit: Commit, + commit: Commit, ) -> Result<(), Self::Error> { finalize_block( self.client.clone(), @@ -1092,7 +1096,11 @@ where fn prevote_equivocation( &self, _round: RoundNumber, - equivocation: finality_grandpa::Equivocation, Self::Signature>, + equivocation: finality_grandpa::Equivocation< + Self::Id, + Prevote, + Self::Signature, + >, ) { warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); if let Err(err) = self.report_equivocation(equivocation.into()) { @@ -1103,7 +1111,11 @@ where fn precommit_equivocation( &self, _round: RoundNumber, - equivocation: finality_grandpa::Equivocation, Self::Signature>, + equivocation: finality_grandpa::Equivocation< + Self::Id, + Precommit, + Self::Signature, + >, ) { warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); if let Err(err) = self.report_equivocation(equivocation.into()) { @@ -1114,11 +1126,11 @@ where pub(crate) enum JustificationOrCommit { Justification(GrandpaJustification), - Commit((RoundNumber, Commit)), + Commit((RoundNumber, Commit)), } -impl From<(RoundNumber, Commit)> for JustificationOrCommit { - fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { +impl From<(RoundNumber, Commit)> for JustificationOrCommit { + fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { JustificationOrCommit::Commit(commit) } } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index ac243a1633ee1..e7578fa669463 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -380,8 +380,13 @@ mod tests { precommits: Vec::new(), }; - let grandpa_just = - GrandpaJustification:: { round: 8, votes_ancestries: Vec::new(), commit }; + let grandpa_just: GrandpaJustification = + sp_finality_grandpa::GrandpaJustification::
{ + round: 8, + votes_ancestries: Vec::new(), + commit, + } + .into(); let finality_proof = FinalityProof { block: header(2).hash(), diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 6c3b6aa826ded..56b26c964ce9b 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -18,6 +18,7 @@ use std::{ collections::{HashMap, HashSet}, + marker::PhantomData, sync::Arc, }; @@ -42,9 +43,25 @@ use crate::{AuthorityList, Commit, Error}; /// nodes, and are used by syncing nodes to prove authority set handoffs. #[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] pub struct GrandpaJustification { - pub(crate) round: u64, - pub(crate) commit: Commit, - pub(crate) votes_ancestries: Vec, + /// The GRANDPA justification for block finality. + pub justification: sp_finality_grandpa::GrandpaJustification, + _block: PhantomData, +} + +impl From> + for GrandpaJustification +{ + fn from(justification: sp_finality_grandpa::GrandpaJustification) -> Self { + Self { justification, _block: Default::default() } + } +} + +impl Into> + for GrandpaJustification +{ + fn into(self) -> sp_finality_grandpa::GrandpaJustification { + self.justification + } } impl GrandpaJustification { @@ -53,8 +70,8 @@ impl GrandpaJustification { pub fn from_commit( client: &Arc, round: u64, - commit: Commit, - ) -> Result, Error> + commit: Commit, + ) -> Result where C: HeaderBackend, { @@ -108,7 +125,7 @@ impl GrandpaJustification { } } - Ok(GrandpaJustification { round, commit, votes_ancestries }) + Ok(sp_finality_grandpa::GrandpaJustification { round, commit, votes_ancestries }.into()) } /// Decode a GRANDPA justification and validate the commit and the votes' @@ -118,15 +135,17 @@ impl GrandpaJustification { finalized_target: (Block::Hash, NumberFor), set_id: u64, voters: &VoterSet, - ) -> Result, ClientError> + ) -> Result where NumberFor: finality_grandpa::BlockNumberOps, { let justification = GrandpaJustification::::decode(&mut &*encoded) .map_err(|_| ClientError::JustificationDecode)?; - if (justification.commit.target_hash, justification.commit.target_number) != - finalized_target + if ( + justification.justification.commit.target_hash, + justification.justification.commit.target_number, + ) != finalized_target { let msg = "invalid commit target in grandpa justification".to_string(); Err(ClientError::BadJustification(msg)) @@ -157,9 +176,10 @@ impl GrandpaJustification { { use finality_grandpa::Chain; - let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); + let ancestry_chain = AncestryChain::::new(&self.justification.votes_ancestries); - match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { + match finality_grandpa::validate_commit(&self.justification.commit, voters, &ancestry_chain) + { Ok(ref result) if result.is_valid() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); @@ -171,6 +191,7 @@ impl GrandpaJustification { // should serve as the root block for populating ancestry (i.e. // collect all headers from all precommit blocks to the base) let base_hash = self + .justification .commit .precommits .iter() @@ -186,12 +207,12 @@ impl GrandpaJustification { let mut buf = Vec::new(); let mut visited_hashes = HashSet::new(); - for signed in self.commit.precommits.iter() { + for signed in self.justification.commit.precommits.iter() { if !sp_finality_grandpa::check_message_signature_with_buffer( &finality_grandpa::Message::Precommit(signed.precommit.clone()), &signed.id, &signed.signature, - self.round, + self.justification.round, set_id, &mut buf, ) { @@ -220,8 +241,12 @@ impl GrandpaJustification { } } - let ancestry_hashes: HashSet<_> = - self.votes_ancestries.iter().map(|h: &Block::Header| h.hash()).collect(); + let ancestry_hashes: HashSet<_> = self + .justification + .votes_ancestries + .iter() + .map(|h: &Block::Header| h.hash()) + .collect(); if visited_hashes != ancestry_hashes { return Err(ClientError::BadJustification( @@ -235,7 +260,7 @@ impl GrandpaJustification { /// The target block number and hash that this justifications proves finality for. pub fn target(&self) -> (NumberFor, Block::Hash) { - (self.commit.target_number, self.commit.target_hash) + (self.justification.commit.target_number, self.justification.commit.target_hash) } } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 149cf1f89a222..7e47b70bd6b98 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -144,72 +144,35 @@ use sp_finality_grandpa::{AuthorityList, AuthoritySignature, SetId}; use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. -pub use sp_finality_grandpa::{AuthorityId, AuthorityPair, GrandpaApi, ScheduledChange}; +pub use sp_finality_grandpa::{ + AuthorityId, AuthorityPair, CatchUp, Commit, CompactCommit, GrandpaApi, Message, Precommit, + Prevote, PrimaryPropose, ScheduledChange, SignedMessage, +}; use std::marker::PhantomData; #[cfg(test)] mod tests; -/// A GRANDPA message for a substrate chain. -pub type Message = finality_grandpa::Message<::Hash, NumberFor>; - -/// A signed message. -pub type SignedMessage = finality_grandpa::SignedMessage< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, ->; - -/// A primary propose message for this chain's block type. -pub type PrimaryPropose = - finality_grandpa::PrimaryPropose<::Hash, NumberFor>; -/// A prevote message for this chain's block type. -pub type Prevote = finality_grandpa::Prevote<::Hash, NumberFor>; -/// A precommit message for this chain's block type. -pub type Precommit = finality_grandpa::Precommit<::Hash, NumberFor>; -/// A catch up message for this chain's block type. -pub type CatchUp = finality_grandpa::CatchUp< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, ->; -/// A commit message for this chain's block type. -pub type Commit = finality_grandpa::Commit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, ->; -/// A compact commit message for this chain's block type. -pub type CompactCommit = finality_grandpa::CompactCommit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, ->; /// A global communication input stream for commits and catch up messages. Not /// exposed publicly, used internally to simplify types in the communication /// layer. -type CommunicationIn = finality_grandpa::voter::CommunicationIn< +type CommunicationIn = voter::CommunicationIn< ::Hash, NumberFor, AuthoritySignature, AuthorityId, >; - /// Global communication input stream for commits and catch up messages, with /// the hash type not being derived from the block, useful for forcing the hash /// to some type (e.g. `H256`) when the compiler can't do the inference. type CommunicationInH = - finality_grandpa::voter::CommunicationIn, AuthoritySignature, AuthorityId>; + voter::CommunicationIn, AuthoritySignature, AuthorityId>; /// Global communication sink for commits with the hash type not being derived /// from the block, useful for forcing the hash to some type (e.g. `H256`) when /// the compiler can't do the inference. type CommunicationOutH = - finality_grandpa::voter::CommunicationOut, AuthoritySignature, AuthorityId>; + voter::CommunicationOut, AuthoritySignature, AuthorityId>; /// Shared voter state for querying. pub struct SharedVoterState { @@ -233,7 +196,7 @@ impl SharedVoterState { } /// Get the inner `VoterState` instance. - pub fn voter_state(&self) -> Option> { + pub fn voter_state(&self) -> Option> { self.inner.read().as_ref().map(|vs| vs.get()) } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 3dd21d51b6a2d..b1e46be5cabde 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -450,7 +450,7 @@ fn finalize_3_voters_1_full_observer() { let justification = crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); - assert_eq!(justification.commit.target_number, 20); + assert_eq!(justification.justification.commit.target_number, 20); } } diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index fe7caf74422db..df0b63348e94b 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -354,7 +354,7 @@ fn warn_authority_wrong_target(hash: H, id: AuthorityId) ); } -impl BlockUntilImported for SignedMessage { +impl BlockUntilImported for SignedMessage { type Blocked = Self; fn needs_waiting>( @@ -389,8 +389,13 @@ impl BlockUntilImported for SignedMessage { /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = - UntilImported>; +pub(crate) type UntilVoteTargetImported = UntilImported< + Block, + BlockStatus, + BlockSyncRequester, + I, + SignedMessage<::Header>, +>; /// This blocks a global message import, i.e. a commit or catch up messages, /// until all blocks referenced in its votes are known. @@ -646,7 +651,7 @@ mod tests { // unwrap the commit from `CommunicationIn` returning its fields in a tuple, // panics if the given message isn't a commit - fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit) { + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit
) { match msg { voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), _ => panic!("expected commit"), @@ -655,7 +660,7 @@ mod tests { // unwrap the catch up from `CommunicationIn` returning its inner representation, // panics if the given message isn't a catch up - fn unapply_catch_up(msg: CommunicationIn) -> CatchUp { + fn unapply_catch_up(msg: CommunicationIn) -> CatchUp
{ match msg { voter::CommunicationIn::CatchUp(catch_up, ..) => catch_up, _ => panic!("expected catch up"), @@ -740,7 +745,7 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let unknown_commit = CompactCommit:: { + let unknown_commit = CompactCommit::
{ target_hash: h1.hash(), target_number: 5, precommits: vec![ @@ -768,7 +773,7 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let known_commit = CompactCommit:: { + let known_commit = CompactCommit::
{ target_hash: h1.hash(), target_number: 5, precommits: vec![ @@ -910,7 +915,7 @@ mod tests { // we create a commit message, with precommits for blocks 6 and 7 which // we haven't imported. - let unknown_commit = CompactCommit:: { + let unknown_commit = CompactCommit::
{ target_hash: h1.hash(), target_number: 5, precommits: vec![ diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 75ec42321ef5e..0c7e26cec0f58 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -21,7 +21,7 @@ use std::{convert::TryInto, sync::Arc}; use codec::{Codec, Decode}; use jsonrpsee::{ - core::{async_trait, Error as JsonRpseeError, RpcResult}, + core::{Error as JsonRpseeError, RpcResult}, proc_macros::rpc, types::error::{CallError, ErrorCode, ErrorObject}, }; @@ -81,7 +81,6 @@ impl From for i32 { } } -#[async_trait] impl TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> for TransactionPayment diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 4be42c3d19b6c..2adfce1bea241 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -29,7 +29,10 @@ use codec::{Codec, Decode, Encode, Input}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::{traits::NumberFor, ConsensusEngineId, RuntimeDebug}; +use sp_runtime::{ + traits::{Header as HeaderT, NumberFor}, + ConsensusEngineId, RuntimeDebug, +}; use sp_std::{borrow::Cow, vec::Vec}; #[cfg(feature = "std")] @@ -76,6 +79,63 @@ pub type RoundNumber = u64; /// A list of Grandpa authorities with associated weights. pub type AuthorityList = Vec<(AuthorityId, AuthorityWeight)>; +/// A GRANDPA message for a substrate chain. +pub type Message
= grandpa::Message<
::Hash,
::Number>; + +/// A signed message. +pub type SignedMessage
= grandpa::SignedMessage< +
::Hash, +
::Number, + AuthoritySignature, + AuthorityId, +>; + +/// A primary propose message for this chain's block type. +pub type PrimaryPropose
= + grandpa::PrimaryPropose<
::Hash,
::Number>; +/// A prevote message for this chain's block type. +pub type Prevote
= grandpa::Prevote<
::Hash,
::Number>; +/// A precommit message for this chain's block type. +pub type Precommit
= + grandpa::Precommit<
::Hash,
::Number>; +/// A catch up message for this chain's block type. +pub type CatchUp
= grandpa::CatchUp< +
::Hash, +
::Number, + AuthoritySignature, + AuthorityId, +>; +/// A commit message for this chain's block type. +pub type Commit
= grandpa::Commit< +
::Hash, +
::Number, + AuthoritySignature, + AuthorityId, +>; + +/// A compact commit message for this chain's block type. +pub type CompactCommit
= grandpa::CompactCommit< +
::Hash, +
::Number, + AuthoritySignature, + AuthorityId, +>; + +/// A GRANDPA justification for block finality, it includes a commit message and +/// an ancestry proof including all headers routing all precommit target blocks +/// to the commit target block. Due to the current voting strategy the precommit +/// targets should be the same as the commit target, since honest voters don't +/// vote past authority set change blocks. +/// +/// This is meant to be stored in the db and passed around the network to other +/// nodes, and are used by syncing nodes to prove authority set handoffs. +#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] +pub struct GrandpaJustification { + pub round: u64, + pub commit: Commit
, + pub votes_ancestries: Vec
, +} + /// A scheduled change of authority set. #[cfg_attr(feature = "std", derive(Serialize))] #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] From 5527263978a763bafc78d60955c662c20f465d18 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 13 Sep 2022 00:03:31 +0200 Subject: [PATCH 122/348] BREAKING: Rename Call & Event (#11981) * rename Event to RuntimeEvent * rename Call * rename in runtimes * small fix * rename Event * small fix & rename RuntimeCall back to Call for now * small fixes * more renaming * a bit more renaming * fmt * small fix * commit * prep for renaming associated types * fix * rename associated Event type * rename to RuntimeEvent * commit * merge conflict fixes & fmt * additional renaming * fix. * fix decl_event * rename in tests * remove warnings * remove accidental rename * . * commit * update .stderr * fix in test * update .stderr * TRYBUILD=overwrite * docs * fmt * small change in docs * rename PalletEvent to Event * rename Call to RuntimeCall * renamed at wrong places :P * rename Call * rename * rename associated type * fix * fix & fmt * commit * frame-support-test * passing tests * update docs * rustdoc fix * update .stderr * wrong code in docs * merge fix * fix in error message * update .stderr * docs & error message * . * merge fix * merge fix * fmt * fmt * merge fix * more fixing * fmt * remove unused * fmt * fix Co-authored-by: Shawn Tabrizi --- bin/node-template/node/src/benchmarking.rs | 2 +- bin/node-template/pallets/template/src/lib.rs | 2 +- .../pallets/template/src/mock.rs | 6 +- bin/node-template/runtime/src/lib.rs | 29 +-- bin/node/cli/benches/block_production.rs | 2 +- bin/node/cli/src/service.rs | 10 +- bin/node/executor/benches/bench.rs | 8 +- bin/node/executor/tests/basic.rs | 94 +++++---- bin/node/executor/tests/fees.rs | 25 ++- bin/node/runtime/src/impls.rs | 9 +- bin/node/runtime/src/lib.rs | 188 ++++++++--------- bin/node/testing/src/bench.rs | 11 +- docs/Upgrading-2.0-to-3.0.md | 2 +- frame/alliance/src/benchmarking.rs | 2 +- frame/alliance/src/lib.rs | 5 +- frame/alliance/src/mock.rs | 36 ++-- frame/alliance/src/tests.rs | 50 ++--- frame/assets/src/benchmarking.rs | 4 +- frame/assets/src/lib.rs | 3 +- frame/assets/src/mock.rs | 8 +- frame/atomic-swap/src/lib.rs | 2 +- frame/atomic-swap/src/tests.rs | 8 +- frame/aura/src/mock.rs | 4 +- frame/authority-discovery/src/lib.rs | 6 +- frame/authorship/src/lib.rs | 4 +- frame/babe/src/mock.rs | 18 +- frame/bags-list/src/lib.rs | 3 +- frame/bags-list/src/mock.rs | 6 +- frame/balances/src/lib.rs | 3 +- frame/balances/src/tests.rs | 44 ++-- frame/balances/src/tests_composite.rs | 8 +- frame/balances/src/tests_local.rs | 25 ++- frame/balances/src/tests_reentrancy.rs | 33 +-- frame/beefy-mmr/src/mock.rs | 6 +- frame/beefy/src/mock.rs | 6 +- frame/benchmarking/src/baseline.rs | 4 +- frame/benchmarking/src/tests.rs | 4 +- frame/benchmarking/src/tests_instance.rs | 20 +- frame/bounties/src/benchmarking.rs | 2 +- frame/bounties/src/lib.rs | 3 +- frame/bounties/src/tests.rs | 16 +- frame/child-bounties/src/benchmarking.rs | 2 +- frame/child-bounties/src/lib.rs | 2 +- frame/child-bounties/src/tests.rs | 14 +- frame/collective/src/benchmarking.rs | 2 +- frame/collective/src/lib.rs | 3 +- frame/collective/src/tests.rs | 167 ++++++++------- frame/contracts/src/exec.rs | 18 +- frame/contracts/src/lib.rs | 12 +- frame/contracts/src/tests.rs | 195 +++++++++--------- frame/contracts/src/wasm/mod.rs | 12 +- frame/contracts/src/wasm/runtime.rs | 2 +- frame/conviction-voting/src/lib.rs | 3 +- frame/conviction-voting/src/tests.rs | 14 +- frame/democracy/src/benchmarking.rs | 2 +- frame/democracy/src/lib.rs | 2 +- frame/democracy/src/tests.rs | 30 +-- .../election-provider-multi-phase/src/lib.rs | 4 +- .../election-provider-multi-phase/src/mock.rs | 19 +- .../src/unsigned.rs | 4 +- .../election-provider-support/src/onchain.rs | 4 +- frame/elections-phragmen/src/lib.rs | 23 ++- frame/examples/basic/src/lib.rs | 6 +- frame/examples/basic/src/tests.rs | 8 +- frame/examples/offchain-worker/src/lib.rs | 5 +- frame/examples/offchain-worker/src/tests.rs | 30 +-- frame/examples/parallel/src/lib.rs | 5 +- frame/examples/parallel/src/tests.rs | 8 +- frame/executive/src/lib.rs | 40 ++-- frame/gilt/src/lib.rs | 2 +- frame/gilt/src/mock.rs | 8 +- frame/grandpa/src/lib.rs | 9 +- frame/grandpa/src/mock.rs | 21 +- frame/identity/src/benchmarking.rs | 2 +- frame/identity/src/lib.rs | 2 +- frame/identity/src/tests.rs | 8 +- frame/im-online/src/lib.rs | 2 +- frame/im-online/src/mock.rs | 14 +- frame/im-online/src/tests.rs | 6 +- frame/indices/src/lib.rs | 2 +- frame/indices/src/mock.rs | 8 +- frame/lottery/src/lib.rs | 26 ++- frame/lottery/src/mock.rs | 10 +- frame/lottery/src/tests.rs | 72 ++++--- frame/membership/src/lib.rs | 11 +- frame/merkle-mountain-range/src/mock.rs | 4 +- frame/multisig/src/benchmarking.rs | 4 +- frame/multisig/src/lib.rs | 10 +- frame/multisig/src/tests.rs | 24 +-- frame/nicks/src/lib.rs | 10 +- frame/node-authorization/src/lib.rs | 2 +- frame/node-authorization/src/mock.rs | 6 +- .../nomination-pools/benchmarking/src/mock.rs | 12 +- frame/nomination-pools/src/lib.rs | 2 +- frame/nomination-pools/src/mock.rs | 12 +- frame/nomination-pools/src/tests.rs | 2 +- .../nomination-pools/test-staking/src/mock.rs | 16 +- frame/offences/benchmarking/src/lib.rs | 14 +- frame/offences/benchmarking/src/mock.rs | 22 +- frame/offences/src/lib.rs | 2 +- frame/offences/src/mock.rs | 6 +- frame/offences/src/tests.rs | 8 +- frame/preimage/src/lib.rs | 2 +- frame/preimage/src/mock.rs | 8 +- frame/proxy/src/benchmarking.rs | 12 +- frame/proxy/src/lib.rs | 18 +- frame/proxy/src/tests.rs | 69 ++++--- frame/randomness-collective-flip/src/lib.rs | 4 +- frame/ranked-collective/src/benchmarking.rs | 2 +- frame/ranked-collective/src/lib.rs | 3 +- frame/ranked-collective/src/tests.rs | 6 +- frame/recovery/src/benchmarking.rs | 4 +- frame/recovery/src/lib.rs | 6 +- frame/recovery/src/mock.rs | 10 +- frame/recovery/src/tests.rs | 12 +- frame/referenda/src/benchmarking.rs | 2 +- frame/referenda/src/lib.rs | 5 +- frame/referenda/src/mock.rs | 32 +-- frame/referenda/src/tests.rs | 2 +- frame/referenda/src/types.rs | 2 +- frame/remark/src/benchmarking.rs | 4 +- frame/remark/src/lib.rs | 2 +- frame/remark/src/mock.rs | 6 +- frame/remark/src/tests.rs | 2 +- frame/scheduler/src/benchmarking.rs | 4 +- frame/scheduler/src/lib.rs | 54 ++--- frame/scheduler/src/mock.rs | 20 +- frame/scheduler/src/tests.rs | 183 ++++++++++------ frame/scored-pool/src/lib.rs | 3 +- frame/scored-pool/src/mock.rs | 8 +- frame/session/benchmarking/src/mock.rs | 10 +- frame/session/src/lib.rs | 2 +- frame/session/src/mock.rs | 6 +- frame/society/src/lib.rs | 3 +- frame/society/src/mock.rs | 8 +- frame/staking/src/mock.rs | 18 +- frame/staking/src/pallet/mod.rs | 2 +- frame/staking/src/tests.rs | 15 +- frame/state-trie-migration/src/lib.rs | 12 +- frame/sudo/src/lib.rs | 12 +- frame/sudo/src/mock.rs | 16 +- frame/sudo/src/tests.rs | 26 +-- .../src/construct_runtime/expand/call.rs | 24 +-- .../src/construct_runtime/expand/event.rs | 8 +- .../src/construct_runtime/expand/origin.rs | 6 +- .../src/construct_runtime/expand/unsigned.rs | 6 +- frame/support/procedural/src/lib.rs | 2 +- .../procedural/src/pallet/expand/call.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 9 +- .../procedural/src/pallet/parse/config.rs | 24 ++- .../procedural/src/pallet/parse/mod.rs | 28 +-- frame/support/src/dispatch.rs | 16 +- frame/support/src/lib.rs | 12 +- frame/support/test/compile_pass/src/lib.rs | 6 +- frame/support/test/tests/construct_runtime.rs | 60 +++--- .../both_use_and_excluded_parts.rs | 2 +- .../both_use_and_excluded_parts.stderr | 25 +-- .../exclude_undefined_part.rs | 2 +- .../exclude_undefined_part.stderr | 25 +-- .../no_std_genesis_config.rs | 6 +- .../pallet_error_too_large.rs | 6 +- .../undefined_call_part.rs | 6 +- .../undefined_event_part.rs | 6 +- .../undefined_event_part.stderr | 33 --- .../undefined_genesis_config_part.rs | 6 +- .../undefined_inherent_part.rs | 6 +- .../undefined_origin_part.rs | 6 +- .../undefined_validate_unsigned_part.rs | 6 +- .../undefined_validate_unsigned_part.stderr | 4 +- .../use_undefined_part.rs | 2 +- .../use_undefined_part.stderr | 25 +-- frame/support/test/tests/instance.rs | 22 +- frame/support/test/tests/issue2219.rs | 6 +- frame/support/test/tests/origin.rs | 14 +- frame/support/test/tests/pallet.rs | 78 +++---- .../test/tests/pallet_compatibility.rs | 14 +- .../tests/pallet_compatibility_instance.rs | 23 ++- frame/support/test/tests/pallet_instance.rs | 36 ++-- .../tests/pallet_ui/event_field_not_member.rs | 2 +- .../tests/pallet_ui/event_not_in_trait.stderr | 2 +- .../pallet_ui/event_type_invalid_bound.rs | 2 +- .../pallet_ui/event_type_invalid_bound.stderr | 4 +- .../pallet_ui/event_type_invalid_bound_2.rs | 2 +- .../event_type_invalid_bound_2.stderr | 4 +- .../tests/pallet_with_name_trait_is_valid.rs | 10 +- frame/support/test/tests/storage_layers.rs | 14 +- frame/support/test/tests/system.rs | 8 +- frame/system/benches/bench.rs | 8 +- frame/system/benchmarking/src/mock.rs | 4 +- frame/system/src/extensions/check_genesis.rs | 2 +- .../system/src/extensions/check_mortality.rs | 2 +- .../src/extensions/check_non_zero_sender.rs | 4 +- frame/system/src/extensions/check_nonce.rs | 4 +- .../src/extensions/check_spec_version.rs | 2 +- .../system/src/extensions/check_tx_version.rs | 2 +- frame/system/src/extensions/check_weight.rs | 22 +- frame/system/src/lib.rs | 26 +-- frame/system/src/mock.rs | 8 +- frame/system/src/mocking.rs | 2 +- frame/system/src/offchain.rs | 8 +- frame/timestamp/src/mock.rs | 4 +- frame/tips/src/lib.rs | 3 +- frame/tips/src/tests.rs | 16 +- .../asset-tx-payment/src/lib.rs | 12 +- .../asset-tx-payment/src/payment.rs | 16 +- .../asset-tx-payment/src/tests.rs | 16 +- frame/transaction-payment/src/lib.rs | 84 ++++---- frame/transaction-payment/src/payment.rs | 16 +- frame/transaction-storage/src/benchmarking.rs | 4 +- frame/transaction-storage/src/lib.rs | 4 +- frame/transaction-storage/src/mock.rs | 10 +- frame/treasury/src/benchmarking.rs | 2 +- frame/treasury/src/lib.rs | 3 +- frame/treasury/src/tests.rs | 8 +- frame/uniques/src/benchmarking.rs | 4 +- frame/uniques/src/lib.rs | 3 +- frame/uniques/src/mock.rs | 8 +- frame/uniques/src/tests.rs | 6 +- frame/utility/src/benchmarking.rs | 8 +- frame/utility/src/lib.rs | 33 +-- frame/utility/src/tests.rs | 111 ++++++---- frame/vesting/src/lib.rs | 2 +- frame/vesting/src/mock.rs | 8 +- frame/whitelist/src/benchmarking.rs | 4 +- frame/whitelist/src/lib.rs | 10 +- frame/whitelist/src/mock.rs | 12 +- frame/whitelist/src/tests.rs | 13 +- test-utils/runtime/src/lib.rs | 10 +- 228 files changed, 1792 insertions(+), 1673 deletions(-) diff --git a/bin/node-template/node/src/benchmarking.rs b/bin/node-template/node/src/benchmarking.rs index f0e32104cd3ee..90fe06edf04b8 100644 --- a/bin/node-template/node/src/benchmarking.rs +++ b/bin/node-template/node/src/benchmarking.rs @@ -119,7 +119,7 @@ impl frame_benchmarking_cli::ExtrinsicBuilder for TransferKeepAliveBuilder { pub fn create_benchmark_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - call: runtime::Call, + call: runtime::RuntimeCall, nonce: u32, ) -> runtime::UncheckedExtrinsic { let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index a9209a9040b6d..0b55d7ae86fcf 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -27,7 +27,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; } // The pallet's runtime storage items. diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index e03f37b2eea69..3289ec2da4952 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -28,7 +28,7 @@ impl system::Config for Test { type BlockLength = (); type DbWeight = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -36,7 +36,7 @@ impl system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -50,7 +50,7 @@ impl system::Config for Test { } impl pallet_template::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } // Build genesis storage according to the mock runtime. diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index e9c2800ffa3f5..099654a12a420 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -158,7 +158,7 @@ impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; + type RuntimeCall = RuntimeCall; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. @@ -172,7 +172,7 @@ impl frame_system::Config for Runtime { /// The header type. type Header = generic::Header; /// The ubiquitous event type. - type Event = Event; + type RuntimeEvent = RuntimeEvent; /// The ubiquitous origin type. type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). @@ -209,8 +209,7 @@ impl pallet_aura::Config for Runtime { } impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; type KeyOwnerProofSystem = (); @@ -246,7 +245,7 @@ impl pallet_balances::Config for Runtime { /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU128; type AccountStore = System; @@ -258,7 +257,7 @@ parameter_types! { } impl pallet_transaction_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -267,13 +266,13 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; } /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } // Create the runtime by composing the FRAME pallets that were previously configured. @@ -314,10 +313,12 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); + /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -473,17 +474,17 @@ impl_runtime_apis! { } } - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { fn query_call_info( - call: Call, + call: RuntimeCall, len: u32, ) -> pallet_transaction_payment::RuntimeDispatchInfo { TransactionPayment::query_call_info(call, len) } fn query_call_fee_details( - call: Call, + call: RuntimeCall, len: u32, ) -> pallet_transaction_payment::FeeDetails { TransactionPayment::query_call_fee_details(call, len) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index c0f3b96e093cb..0a734fa447448 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -122,7 +122,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { kitchensink_runtime::UncheckedExtrinsic { signature: None, - function: kitchensink_runtime::Call::Timestamp(pallet_timestamp::Call::set { now }), + function: kitchensink_runtime::RuntimeCall::Timestamp(pallet_timestamp::Call::set { now }), } .into() } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 13003c1a7a41f..2152301ac2d42 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -69,7 +69,7 @@ pub fn fetch_nonce(client: &FullClient, account: sp_core::sr25519::Pair) -> u32 pub fn create_extrinsic( client: &FullClient, sender: sp_core::sr25519::Pair, - function: impl Into, + function: impl Into, nonce: Option, ) -> kitchensink_runtime::UncheckedExtrinsic { let function = function.into(); @@ -570,7 +570,7 @@ mod tests { use codec::Encode; use kitchensink_runtime::{ constants::{currency::CENTS, time::SLOT_DURATION}, - Address, BalancesCall, Call, UncheckedExtrinsic, + Address, BalancesCall, RuntimeCall, UncheckedExtrinsic, }; use node_primitives::{Block, DigestItem, Signature}; use sc_client_api::BlockBackend; @@ -759,8 +759,10 @@ mod tests { }; let signer = charlie.clone(); - let function = - Call::Balances(BalancesCall::transfer { dest: to.into(), value: amount }); + let function = RuntimeCall::Balances(BalancesCall::transfer { + dest: to.into(), + value: amount, + }); let check_non_zero_sender = frame_system::CheckNonZeroSender::new(); let check_spec_version = frame_system::CheckSpecVersion::new(); diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 3ad5dc61d528f..850be3e3c6281 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -19,8 +19,8 @@ use codec::{Decode, Encode}; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use frame_support::Hashable; use kitchensink_runtime::{ - constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, - UncheckedExtrinsic, + constants::currency::*, Block, BuildStorage, CheckedExtrinsic, GenesisConfig, Header, + RuntimeCall, UncheckedExtrinsic, }; use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; @@ -141,11 +141,11 @@ fn test_blocks( let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: 0 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: 0 }), }]; block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { signed: Some((alice(), signed_extra(i, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 1 * DOLLARS, }), diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 35063f6933c98..33f71568ec6c5 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -28,8 +28,8 @@ use sp_runtime::{ use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, - Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, - UncheckedExtrinsic, + Balances, CheckedExtrinsic, Header, Runtime, RuntimeCall, RuntimeEvent, System, + TransactionPayment, UncheckedExtrinsic, }; use node_primitives::{Balance, Hash}; use node_testing::keyring::*; @@ -67,7 +67,7 @@ fn transfer_fee(extrinsic: &E) -> Balance { fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(default_transfer_call()), + function: RuntimeCall::Balances(default_transfer_call()), }) } @@ -84,11 +84,11 @@ fn changes_trie_block() -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 69 * DOLLARS, }), @@ -111,11 +111,11 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 69 * DOLLARS, }), @@ -131,18 +131,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: alice().into(), value: 5 * DOLLARS, }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(1, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 15 * DOLLARS, }), @@ -166,11 +166,11 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark { remark: vec![0; size] }), + function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; size] }), }, ], (time * 1000 / SLOT_DURATION).into(), @@ -324,7 +324,7 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::System(frame_system::Event::ExtrinsicSuccess { + event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, @@ -335,7 +335,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Withdraw { + event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who: alice().into(), amount: fees, }), @@ -343,7 +343,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: alice().into(), to: bob().into(), amount: 69 * DOLLARS, @@ -352,7 +352,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Deposit { + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -360,12 +360,14 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), + event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { + value: fees * 8 / 10, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::TransactionPayment( + event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), actual_fee: fees, @@ -376,7 +378,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::System(frame_system::Event::ExtrinsicSuccess { + event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], @@ -398,7 +400,7 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::System(frame_system::Event::ExtrinsicSuccess { + event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, @@ -409,7 +411,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Withdraw { + event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who: bob().into(), amount: fees, }), @@ -417,7 +419,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: bob().into(), to: alice().into(), amount: 5 * DOLLARS, @@ -426,7 +428,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances(pallet_balances::Event::Deposit { + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -434,12 +436,14 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), + event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { + value: fees * 8 / 10, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::TransactionPayment( + event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: bob().into(), actual_fee: fees, @@ -450,14 +454,14 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::System(frame_system::Event::ExtrinsicSuccess { + event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Withdraw { + event: RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who: alice().into(), amount: fees, }), @@ -465,7 +469,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: alice().into(), to: bob().into(), amount: 15 * DOLLARS, @@ -474,7 +478,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances(pallet_balances::Event::Deposit { + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), amount: fees * 8 / 10, }), @@ -482,12 +486,14 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Treasury(pallet_treasury::Event::Deposit { value: fees * 8 / 10 }), + event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { + value: fees * 8 / 10, + }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::TransactionPayment( + event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), actual_fee: fees, @@ -498,7 +504,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::System(frame_system::Event::ExtrinsicSuccess { + event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { dispatch_info: DispatchInfo { weight: transfer_weight, ..Default::default() }, }), topics: vec![], @@ -648,24 +654,24 @@ fn deploying_wasm_contract_should_work() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::Contracts( - pallet_contracts::Call::instantiate_with_code:: { - value: 0, - gas_limit: Weight::from_ref_time(500_000_000), - storage_deposit_limit: None, - code: transfer_code, - data: Vec::new(), - salt: Vec::new(), - }, - ), + function: RuntimeCall::Contracts(pallet_contracts::Call::instantiate_with_code::< + Runtime, + > { + value: 0, + gas_limit: Weight::from_ref_time(500_000_000), + storage_deposit_limit: None, + code: transfer_code, + data: Vec::new(), + salt: Vec::new(), + }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: Call::Contracts(pallet_contracts::Call::call:: { + function: RuntimeCall::Contracts(pallet_contracts::Call::call:: { dest: sp_runtime::MultiAddress::Id(addr.clone()), value: 10, gas_limit: Weight::from_ref_time(500_000_000), diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index c5c2d01ca5c13..ae1bfa371469c 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -22,7 +22,8 @@ use frame_support::{ }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, - Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, + Balances, CheckedExtrinsic, Multiplier, Runtime, RuntimeCall, TransactionByteFee, + TransactionPayment, }; use node_primitives::Balance; use node_testing::keyring::*; @@ -53,12 +54,12 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time1 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time1 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::Sudo(pallet_sudo::Call::sudo { - call: Box::new(Call::System(frame_system::Call::fill_block { + function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { + call: Box::new(RuntimeCall::System(frame_system::Call::fill_block { ratio: Perbill::from_percent(60), })), }), @@ -76,11 +77,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time2 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time2 }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: Call::System(frame_system::Call::remark { remark: vec![0; 1] }), + function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1] }), }, ], (time2 / SLOT_DURATION).into(), @@ -146,7 +147,7 @@ fn transaction_fee_is_correct() { let tip = 1_000_000; let xt = sign(CheckedExtrinsic { signed: Some((alice(), signed_extra(0, tip))), - function: Call::Balances(default_transfer_call()), + function: RuntimeCall::Balances(default_transfer_call()), }); let r = @@ -212,7 +213,7 @@ fn block_weight_capacity_report() { let mut xts = (0..num_transfers) .map(|i| CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: Call::Balances(pallet_balances::Call::transfer { + function: RuntimeCall::Balances(pallet_balances::Call::transfer { dest: bob().into(), value: 0, }), @@ -223,7 +224,7 @@ fn block_weight_capacity_report() { 0, CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), }, ); @@ -286,11 +287,13 @@ fn block_length_capacity_report() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set { now: time * 1000 }), + function: RuntimeCall::Timestamp(pallet_timestamp::Call::set { + now: time * 1000, + }), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark { + function: RuntimeCall::System(frame_system::Call::remark { remark: vec![0u8; (block_number * factor) as usize], }), }, diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index cedc945b0af7e..3509592ebf04a 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -18,7 +18,8 @@ //! Some configurable implementations as associated type for the substrate runtime. use crate::{ - AccountId, AllianceMotion, Assets, Authorship, Balances, Call, Hash, NegativeImbalance, Runtime, + AccountId, AllianceMotion, Assets, Authorship, Balances, Hash, NegativeImbalance, Runtime, + RuntimeCall, }; use frame_support::{ pallet_prelude::*, @@ -77,11 +78,11 @@ impl IdentityVerifier for AllianceIdentityVerifier { } pub struct AllianceProposalProvider; -impl ProposalProvider for AllianceProposalProvider { +impl ProposalProvider for AllianceProposalProvider { fn propose_proposal( who: AccountId, threshold: u32, - proposal: Box, + proposal: Box, length_bound: u32, ) -> Result<(u32, u32), DispatchError> { AllianceMotion::do_propose_proposed(who, threshold, proposal, length_bound) @@ -109,7 +110,7 @@ impl ProposalProvider for AllianceProposalProvider { AllianceMotion::do_close(proposal_hash, proposal_index, proposal_weight_bound, length_bound) } - fn proposal_of(proposal_hash: Hash) -> Option { + fn proposal_of(proposal_hash: Hash) -> Option { AllianceMotion::proposal_of(proposal_hash) } } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5aa488f328a5e..966a2e17ea475 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -205,7 +205,7 @@ impl frame_system::Config for Runtime { type BlockLength = RuntimeBlockLength; type DbWeight = RocksDbWeight; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = Index; type BlockNumber = BlockNumber; type Hash = Hash; @@ -213,7 +213,7 @@ impl frame_system::Config for Runtime { type AccountId = AccountId; type Lookup = Indices; type Header = generic::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; type PalletInfo = PalletInfo; @@ -229,8 +229,8 @@ impl frame_system::Config for Runtime { impl pallet_randomness_collective_flip::Config for Runtime {} impl pallet_utility::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type PalletsOrigin = OriginCaller; type WeightInfo = pallet_utility::weights::SubstrateWeight; } @@ -243,8 +243,8 @@ parameter_types! { } impl pallet_multisig::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; @@ -286,25 +286,28 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { - fn filter(&self, c: &Call) -> bool { +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { match self { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - Call::Balances(..) | - Call::Assets(..) | Call::Uniques(..) | - Call::Vesting(pallet_vesting::Call::vested_transfer { .. }) | - Call::Indices(pallet_indices::Call::transfer { .. }) + RuntimeCall::Balances(..) | + RuntimeCall::Assets(..) | + RuntimeCall::Uniques(..) | + RuntimeCall::Vesting(pallet_vesting::Call::vested_transfer { .. }) | + RuntimeCall::Indices(pallet_indices::Call::transfer { .. }) ), ProxyType::Governance => matches!( c, - Call::Democracy(..) | - Call::Council(..) | Call::Society(..) | - Call::TechnicalCommittee(..) | - Call::Elections(..) | Call::Treasury(..) + RuntimeCall::Democracy(..) | + RuntimeCall::Council(..) | + RuntimeCall::Society(..) | + RuntimeCall::TechnicalCommittee(..) | + RuntimeCall::Elections(..) | + RuntimeCall::Treasury(..) ), - ProxyType::Staking => matches!(c, Call::Staking(..)), + ProxyType::Staking => matches!(c, RuntimeCall::Staking(..)), } } fn is_superset(&self, o: &Self) -> bool { @@ -319,8 +322,8 @@ impl InstanceFilter for ProxyType { } impl pallet_proxy::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type ProxyType = ProxyType; type ProxyDepositBase = ProxyDepositBase; @@ -341,10 +344,10 @@ parameter_types! { } impl pallet_scheduler::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type PalletsOrigin = OriginCaller; - type Call = Call; + type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = ConstU32<50>; @@ -363,7 +366,7 @@ parameter_types! { impl pallet_preimage::Config for Runtime { type WeightInfo = pallet_preimage::weights::SubstrateWeight; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; type MaxSize = PreimageMaxSize; @@ -413,7 +416,7 @@ impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_indices::weights::SubstrateWeight; } @@ -431,7 +434,7 @@ impl pallet_balances::Config for Runtime { type ReserveIdentifier = [u8; 8]; type Balance = Balance; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type WeightInfo = pallet_balances::weights::SubstrateWeight; @@ -446,7 +449,7 @@ parameter_types! { } impl pallet_transaction_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; @@ -456,7 +459,7 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_asset_tx_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< pallet_assets::BalanceToAssetBalance, @@ -496,7 +499,7 @@ impl_opaque_keys! { } impl pallet_session::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; @@ -546,7 +549,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; type RewardRemainder = Treasury; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = Treasury; // send the slashed funds to the treasury. type Reward = (); // rewards are minted from the void type SessionsPerEra = SessionsPerEra; @@ -686,7 +689,7 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { } impl pallet_election_provider_multi_phase::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type EstimateCallFee = TransactionPayment; type SignedPhase = SignedPhase; @@ -721,7 +724,7 @@ parameter_types! { } impl pallet_bags_list::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ScoreProvider = Staking; type WeightInfo = pallet_bags_list::weights::SubstrateWeight; type BagThresholds = BagThresholds; @@ -750,7 +753,7 @@ impl Convert for U256ToBalance { impl pallet_nomination_pools::Config for Runtime { type WeightInfo = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = Balance; type RewardCounter = FixedU128; @@ -770,7 +773,7 @@ parameter_types! { impl pallet_conviction_voting::Config for Runtime { type WeightInfo = pallet_conviction_voting::weights::SubstrateWeight; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type VoteLockingPeriod = VoteLockingPeriod; type MaxVotes = ConstU32<512>; @@ -827,8 +830,8 @@ impl pallet_referenda::TracksInfo for TracksInfo { impl pallet_referenda::Config for Runtime { type WeightInfo = pallet_referenda::weights::SubstrateWeight; - type Call = Call; - type Event = Event; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = EnsureSigned; @@ -846,8 +849,8 @@ impl pallet_referenda::Config for Runtime { impl pallet_referenda::Config for Runtime { type WeightInfo = pallet_referenda::weights::SubstrateWeight; - type Call = Call; - type Event = Event; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = EnsureSigned; @@ -865,7 +868,7 @@ impl pallet_referenda::Config for Runtime { impl pallet_ranked_collective::Config for Runtime { type WeightInfo = pallet_ranked_collective::weights::SubstrateWeight; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PromoteOrigin = EnsureRootWithSuccess>; type DemoteOrigin = EnsureRootWithSuccess>; type Polls = RankedPolls; @@ -875,7 +878,7 @@ impl pallet_ranked_collective::Config for Runtime { impl pallet_remark::Config for Runtime { type WeightInfo = pallet_remark::weights::SubstrateWeight; - type Event = Event; + type RuntimeEvent = RuntimeEvent; } parameter_types! { @@ -889,8 +892,8 @@ parameter_types! { } impl pallet_democracy::Config for Runtime { - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type EnactmentPeriod = EnactmentPeriod; type LaunchPeriod = LaunchPeriod; @@ -948,8 +951,8 @@ parameter_types! { type CouncilCollective = pallet_collective::Instance1; impl pallet_collective::Config for Runtime { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = CouncilMotionDuration; type MaxProposals = CouncilMaxProposals; type MaxMembers = CouncilMaxMembers; @@ -975,7 +978,7 @@ parameter_types! { const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); impl pallet_elections_phragmen::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletId = ElectionsPhragmenPalletId; type Currency = Balances; type ChangeMembers = Council; @@ -1005,8 +1008,8 @@ parameter_types! { type TechnicalCollective = pallet_collective::Instance2; impl pallet_collective::Config for Runtime { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = TechnicalMotionDuration; type MaxProposals = TechnicalMaxProposals; type MaxMembers = TechnicalMaxMembers; @@ -1019,7 +1022,7 @@ type EnsureRootOrHalfCouncil = EitherOfDiverse< pallet_collective::EnsureProportionMoreThan, >; impl pallet_membership::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type AddOrigin = EnsureRootOrHalfCouncil; type RemoveOrigin = EnsureRootOrHalfCouncil; type SwapOrigin = EnsureRootOrHalfCouncil; @@ -1056,7 +1059,7 @@ impl pallet_treasury::Config for Runtime { EnsureRoot, pallet_collective::EnsureProportionMoreThan, >; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ProposalBondMinimum; @@ -1082,7 +1085,7 @@ parameter_types! { } impl pallet_bounties::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BountyDepositBase = BountyDepositBase; type BountyDepositPayoutDelay = BountyDepositPayoutDelay; type BountyUpdatePeriod = BountyUpdatePeriod; @@ -1101,14 +1104,14 @@ parameter_types! { } impl pallet_child_bounties::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MaxActiveChildBountyCount = ConstU32<5>; type ChildBountyValueMinimum = ChildBountyValueMinimum; type WeightInfo = pallet_child_bounties::weights::SubstrateWeight; } impl pallet_tips::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DataDepositPerByte = DataDepositPerByte; type MaximumReasonLength = MaximumReasonLength; type Tippers = Elections; @@ -1136,8 +1139,8 @@ impl pallet_contracts::Config for Runtime { type Time = Timestamp; type Randomness = RandomnessCollectiveFlip; type Currency = Balances; - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; /// The safest default is to allow no calls at all. /// /// Runtimes should whitelist dispatchables that are allowed to be called from contracts @@ -1162,8 +1165,8 @@ impl pallet_contracts::Config for Runtime { } impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; } parameter_types! { @@ -1178,14 +1181,14 @@ parameter_types! { impl frame_system::offchain::CreateSignedTransaction for Runtime where - Call: From, + RuntimeCall: From, { fn create_transaction>( - call: Call, + call: RuntimeCall, public: ::Signer, account: AccountId, nonce: Index, - ) -> Option<(Call, ::SignaturePayload)> { + ) -> Option<(RuntimeCall, ::SignaturePayload)> { let tip = 0; // take the biggest period possible. let period = @@ -1225,15 +1228,15 @@ impl frame_system::offchain::SigningTypes for Runtime { impl frame_system::offchain::SendTransactionTypes for Runtime where - Call: From, + RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; } impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type NextSessionRotation = Babe; type ValidatorSet = Historical; type ReportUnresponsiveness = Offences; @@ -1245,7 +1248,7 @@ impl pallet_im_online::Config for Runtime { } impl pallet_offences::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } @@ -1255,8 +1258,7 @@ impl pallet_authority_discovery::Config for Runtime { } impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; type KeyOwnerProofSystem = Historical; @@ -1288,7 +1290,7 @@ parameter_types! { } impl pallet_identity::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BasicDeposit = BasicDeposit; type FieldDeposit = FieldDeposit; @@ -1310,9 +1312,9 @@ parameter_types! { } impl pallet_recovery::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = pallet_recovery::weights::SubstrateWeight; - type Call = Call; + type RuntimeCall = RuntimeCall; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; @@ -1333,7 +1335,7 @@ parameter_types! { } impl pallet_society::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletId = SocietyPalletId; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; @@ -1356,7 +1358,7 @@ parameter_types! { } impl pallet_vesting::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; @@ -1383,10 +1385,10 @@ parameter_types! { impl pallet_lottery::Config for Runtime { type PalletId = LotteryPalletId; - type Call = Call; + type RuntimeCall = RuntimeCall; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ManagerOrigin = EnsureRoot; type MaxCalls = MaxCalls; type ValidateCall = Lottery; @@ -1403,7 +1405,7 @@ parameter_types! { } impl pallet_assets::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Balance = u128; type AssetId = u32; type Currency = Balances; @@ -1431,7 +1433,7 @@ parameter_types! { } impl pallet_gilt::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = Balance; type AdminOrigin = frame_system::EnsureRoot; @@ -1456,7 +1458,7 @@ parameter_types! { } impl pallet_uniques::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type CollectionId = u32; type ItemId = u32; type Currency = Balances; @@ -1477,9 +1479,9 @@ impl pallet_uniques::Config for Runtime { } impl pallet_transaction_storage::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type Call = Call; + type RuntimeCall = RuntimeCall; type FeeDestination = (); type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; type MaxBlockTransactions = @@ -1489,8 +1491,8 @@ impl pallet_transaction_storage::Config for Runtime { } impl pallet_whitelist::Config for Runtime { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type WhitelistOrigin = EnsureRoot; type DispatchWhitelistedOrigin = EnsureRoot; type PreimageProvider = Preimage; @@ -1504,7 +1506,7 @@ parameter_types! { } impl pallet_state_trie_migration::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ControlOrigin = EnsureRoot; type Currency = Balances; type MaxKeyLen = MigrationMaxKeyLen; @@ -1529,8 +1531,8 @@ parameter_types! { type AllianceCollective = pallet_collective::Instance3; impl pallet_collective::Config for Runtime { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = AllianceMotionDuration; type MaxProposals = AllianceMaxProposals; type MaxMembers = AllianceMaxMembers; @@ -1547,8 +1549,8 @@ parameter_types! { } impl pallet_alliance::Config for Runtime { - type Event = Event; - type Proposal = Call; + type RuntimeEvent = RuntimeEvent; + type Proposal = RuntimeCall; type AdminOrigin = EitherOfDiverse< EnsureRoot, pallet_collective::EnsureProportionMoreThan, @@ -1673,12 +1675,14 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_asset_tx_payment::ChargeAssetTxPayment, ); + /// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; /// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; +pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; +pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, @@ -1977,13 +1981,13 @@ impl_runtime_apis! { } } - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi for Runtime { - fn query_call_info(call: Call, len: u32) -> RuntimeDispatchInfo { + fn query_call_info(call: RuntimeCall, len: u32) -> RuntimeDispatchInfo { TransactionPayment::query_call_info(call, len) } - fn query_call_fee_details(call: Call, len: u32) -> FeeDetails { + fn query_call_fee_details(call: RuntimeCall, len: u32) -> FeeDetails { TransactionPayment::query_call_fee_details(call, len) } } @@ -2178,7 +2182,7 @@ mod tests { fn validate_transaction_submitter_bounds() { fn is_submit_signed_transaction() where - T: CreateSignedTransaction, + T: CreateSignedTransaction, { } @@ -2198,11 +2202,11 @@ mod tests { #[test] fn call_size() { - let size = core::mem::size_of::(); + let size = core::mem::size_of::(); assert!( size <= 208, - "size of Call {} is more than 208 bytes: some calls have too big arguments, use Box to reduce the - size of Call. + "size of RuntimeCall {} is more than 208 bytes: some calls have too big arguments, use Box to reduce the + size of RuntimeCall. If the limit is too strong, maybe consider increase the limit to 300.", size, ); diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 534f0a4f09732..7980cc102fb38 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -35,8 +35,8 @@ use crate::{ use codec::{Decode, Encode}; use futures::executor; use kitchensink_runtime::{ - constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, - Signature, SystemCall, UncheckedExtrinsic, + constants::currency::DOLLARS, AccountId, BalancesCall, CheckedExtrinsic, MinimumPeriod, + RuntimeCall, Signature, SystemCall, UncheckedExtrinsic, }; use node_primitives::Block; use sc_block_builder::BlockBuilderProvider; @@ -308,12 +308,12 @@ impl<'a> Iterator for BlockContentIterator<'a> { )), function: match self.content.block_type { BlockType::RandomTransfersKeepAlive => - Call::Balances(BalancesCall::transfer_keep_alive { + RuntimeCall::Balances(BalancesCall::transfer_keep_alive { dest: sp_runtime::MultiAddress::Id(receiver), value: kitchensink_runtime::ExistentialDeposit::get() + 1, }), BlockType::RandomTransfersReaping => { - Call::Balances(BalancesCall::transfer { + RuntimeCall::Balances(BalancesCall::transfer { dest: sp_runtime::MultiAddress::Id(receiver), // Transfer so that ending balance would be 1 less than existential // deposit so that we kill the sender account. @@ -321,7 +321,8 @@ impl<'a> Iterator for BlockContentIterator<'a> { (kitchensink_runtime::ExistentialDeposit::get() - 1), }) }, - BlockType::Noop => Call::System(SystemCall::remark { remark: Vec::new() }), + BlockType::Noop => + RuntimeCall::System(SystemCall::remark { remark: Vec::new() }), }, }, self.runtime_version.spec_version, diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index bcba7d88047ce..210023c3aec5a 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -148,7 +148,7 @@ And update the overall definition for weights on frame and a few related types a + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = Index; @@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index de2fc392db8f1..e07d7c44a97ff 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -35,7 +35,7 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 8f39b6b51cabd..9d320c4215a70 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -240,7 +240,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The outer call dispatch type. type Proposal: Parameter @@ -249,7 +250,7 @@ pub mod pallet { + From> + GetDispatchInfo + IsSubType> - + IsType<::Call>; + + IsType<::RuntimeCall>; /// Origin for admin-level operations, like setting the Alliance's rules. type AdminOrigin: EnsureOrigin; diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index 45c13b76cc525..42c97876d40b0 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -48,7 +48,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = BlockNumber; type Hash = H256; @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type DbWeight = (); type Version = (); @@ -77,7 +77,7 @@ parameter_types! { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -96,8 +96,8 @@ parameter_types! { type AllianceCollective = pallet_collective::Instance1; impl pallet_collective::Config for Test { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = MotionDuration; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -124,7 +124,7 @@ type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy type EnsureTwoOrRoot = EitherOfDiverse, EnsureSignedBy>; impl pallet_identity::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type BasicDeposit = BasicDeposit; type FieldDeposit = FieldDeposit; @@ -162,11 +162,11 @@ impl IdentityVerifier for AllianceIdentityVerifier { } pub struct AllianceProposalProvider; -impl ProposalProvider for AllianceProposalProvider { +impl ProposalProvider for AllianceProposalProvider { fn propose_proposal( who: u64, threshold: u32, - proposal: Box, + proposal: Box, length_bound: u32, ) -> Result<(u32, u32), DispatchError> { AllianceMotion::do_propose_proposed(who, threshold, proposal, length_bound) @@ -194,7 +194,7 @@ impl ProposalProvider for AllianceProposalProvider { AllianceMotion::do_close(proposal_hash, proposal_index, proposal_weight_bound, length_bound) } - fn proposal_of(proposal_hash: H256) -> Option { + fn proposal_of(proposal_hash: H256) -> Option { AllianceMotion::proposal_of(proposal_hash) } } @@ -207,8 +207,8 @@ parameter_types! { pub const RetirementPeriod: BlockNumber = MOTION_DURATION_IN_BLOCKS + 1; } impl Config for Test { - type Event = Event; - type Proposal = Call; + type RuntimeEvent = RuntimeEvent; + type Proposal = RuntimeCall; type AdminOrigin = EnsureSignedBy; type MembershipManager = EnsureSignedBy; type AnnouncementOrigin = EnsureSignedBy; @@ -380,19 +380,19 @@ pub fn test_cid() -> Cid { Cid::new_v0(&*result) } -pub fn make_remark_proposal(value: u64) -> (Call, u32, H256) { - make_proposal(Call::System(frame_system::Call::remark { remark: value.encode() })) +pub fn make_remark_proposal(value: u64) -> (RuntimeCall, u32, H256) { + make_proposal(RuntimeCall::System(frame_system::Call::remark { remark: value.encode() })) } -pub fn make_set_rule_proposal(rule: Cid) -> (Call, u32, H256) { - make_proposal(Call::Alliance(pallet_alliance::Call::set_rule { rule })) +pub fn make_set_rule_proposal(rule: Cid) -> (RuntimeCall, u32, H256) { + make_proposal(RuntimeCall::Alliance(pallet_alliance::Call::set_rule { rule })) } -pub fn make_kick_member_proposal(who: u64) -> (Call, u32, H256) { - make_proposal(Call::Alliance(pallet_alliance::Call::kick_member { who })) +pub fn make_kick_member_proposal(who: u64) -> (RuntimeCall, u32, H256) { + make_proposal(RuntimeCall::Alliance(pallet_alliance::Call::kick_member { who })) } -pub fn make_proposal(proposal: Call) -> (Call, u32, H256) { +pub fn make_proposal(proposal: RuntimeCall) -> (RuntimeCall, u32, H256) { let len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); (proposal, len, hash) diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 943c39a2d69c0..79fd33965edb5 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -63,7 +63,7 @@ fn init_members_works() { // assert a retiring member from previous Alliance not removed assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); - System::assert_last_event(mock::Event::Alliance(crate::Event::MembersInitialized { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MembersInitialized { founders: vec![5, 8], fellows: vec![4], allies: vec![2], @@ -117,7 +117,7 @@ fn disband_works() { // deposit unreserved assert_eq!(Balances::free_balance(9), 40); - System::assert_last_event(mock::Event::Alliance(crate::Event::AllianceDisbanded { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::AllianceDisbanded { voting_members: 2, ally_members: 1, unreserved: 1, @@ -154,7 +154,7 @@ fn propose_works() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { + event: mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -182,13 +182,13 @@ fn vote_works() { assert_eq!( System::events(), vec![ - record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { account: 2, proposal_hash: hash, voted: true, @@ -236,19 +236,19 @@ fn veto_works() { assert_eq!( System::events(), vec![ - record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 1, proposal_hash: vetoable_hash, threshold: 3 })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Disapproved { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Disapproved { proposal_hash: vetoable_hash })), ] @@ -276,42 +276,42 @@ fn close_works() { assert_eq!( System::events(), vec![ - record(mock::Event::AllianceMotion(AllianceMotionEvent::Proposed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0, })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0, })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Voted { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Voted { account: 3, proposal_hash: hash, voted: true, yes: 3, no: 0, })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Closed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Closed { proposal_hash: hash, yes: 3, no: 0, })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Approved { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Approved { proposal_hash: hash })), - record(mock::Event::AllianceMotion(AllianceMotionEvent::Executed { + record(mock::RuntimeEvent::AllianceMotion(AllianceMotionEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin), })) @@ -327,7 +327,9 @@ fn set_rule_works() { assert_ok!(Alliance::set_rule(Origin::signed(1), cid.clone())); assert_eq!(Alliance::rule(), Some(cid.clone())); - System::assert_last_event(mock::Event::Alliance(crate::Event::NewRuleSet { rule: cid })); + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { + rule: cid, + })); }); } @@ -341,7 +343,7 @@ fn announce_works() { assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); - System::assert_last_event(mock::Event::Alliance(crate::Event::Announced { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid, })); }); @@ -353,7 +355,7 @@ fn remove_announcement_works() { let cid = test_cid(); assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); - System::assert_last_event(mock::Event::Alliance(crate::Event::Announced { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid.clone(), })); @@ -361,9 +363,9 @@ fn remove_announcement_works() { assert_ok!(Alliance::remove_announcement(Origin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![]); - System::assert_last_event(mock::Event::Alliance(crate::Event::AnnouncementRemoved { - announcement: cid, - })); + System::assert_last_event(mock::RuntimeEvent::Alliance( + crate::Event::AnnouncementRemoved { announcement: cid }, + )); }); } @@ -498,7 +500,7 @@ fn give_retirement_notice_work() { assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); - System::assert_last_event(mock::Event::Alliance( + System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::MemberRetirementPeriodStarted { member: (3) }, )); @@ -531,7 +533,7 @@ fn retire_works() { System::set_block_number(System::block_number() + RetirementPeriod::get()); assert_ok!(Alliance::retire(Origin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); - System::assert_last_event(mock::Event::Alliance(crate::Event::MemberRetired { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { member: (3), unreserved: None, })); @@ -585,7 +587,7 @@ fn kick_member_works() { assert_ok!(Alliance::kick_member(Origin::signed(2), 2)); assert_eq!(Alliance::members(MemberRole::Founder), vec![1]); assert_eq!(>::get(2), None); - System::assert_last_event(mock::Event::Alliance(crate::Event::MemberKicked { + System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { member: (2), slashed: Some(25), })); diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 043d3f1a1aef4..ca891c2f42e4a 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -140,11 +140,11 @@ fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { } } -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } -fn assert_event, I: 'static>(generic_event: >::Event) { +fn assert_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_has_event(generic_event.into()); } diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index d7ca83c8a84e3..fbb4a3ea1568b 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -180,7 +180,8 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The units in which we record balances. type Balance: Member diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 5429c89084e78..9344e5920d205 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -50,7 +50,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -85,7 +85,7 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Balance = u64; type AssetId = u32; type Currency = Balances; diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 1b6c62c55ee20..9c6056497118c 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -166,7 +166,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Swap action. type SwapAction: SwapAction + Parameter + MaxEncodedLen; /// Limit of proof size. diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index ffb548a1f29ed..d3d8b37d82794 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -41,12 +41,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -65,14 +65,14 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SwapAction = BalanceSwapAction; type ProofLimit = ConstU32<1024>; } diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 5fa298bb008b0..673190c95f2e1 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -59,13 +59,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index d5f9783f153c8..3505f20a05929 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -209,7 +209,7 @@ mod tests { type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type NextSessionRotation = pallet_session::PeriodicSessions; @@ -238,13 +238,13 @@ mod tests { type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AuthorityId; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index ff18e047db048..3f484ef24eec0 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -471,13 +471,13 @@ mod tests { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 3a6348404b9a7..0ac8214f45c23 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -77,14 +77,14 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = DummyValidatorId; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -98,10 +98,10 @@ impl frame_system::Config for Test { impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; - type Extrinsic = TestXt; + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; } impl_opaque_keys! { @@ -111,7 +111,7 @@ impl_opaque_keys! { } impl pallet_session::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; @@ -147,7 +147,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); @@ -184,7 +184,7 @@ impl pallet_staking::Config for Test { type MaxNominations = ConstU32<16>; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; type Slash = (); @@ -209,7 +209,7 @@ impl pallet_staking::Config for Test { } impl pallet_offences::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 70f8b2a32f817..02adc064790f2 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -105,7 +105,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index 3bc1b34657262..1044e4c450a41 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -52,13 +52,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -78,7 +78,7 @@ parameter_types! { } impl bags_list::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = StakingMock; diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 0a45350366449..e425381172dab 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -218,7 +218,8 @@ pub mod pallet { type DustRemoval: OnUnbalanced>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The minimum amount required to keep an account open. #[pallet::constant] diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 96bee9be1991c..9c641f6b38a22 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -38,15 +38,15 @@ macro_rules! decl_tests { const ID_1: LockIdentifier = *b"1 "; const ID_2: LockIdentifier = *b"2 "; - pub const CALL: &<$test as frame_system::Config>::Call = - &Call::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); + pub const CALL: &<$test as frame_system::Config>::RuntimeCall = + &RuntimeCall::Balances(pallet_balances::Call::transfer { dest: 0, value: 0 }); /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { DispatchInfo { weight: w, ..Default::default() } } - fn events() -> Vec { + fn events() -> Vec { let evt = System::events().into_iter().map(|evt| evt.event).collect::>(); System::reset_events(); @@ -314,7 +314,7 @@ macro_rules! decl_tests { <$ext_builder>::default().monied(true).build().execute_with(|| { assert_eq!(Balances::total_balance(&1), 10); assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); - System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 10 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 10 })); assert_eq!(Balances::total_balance(&1), 20); assert_eq!(>::get(), 120); }); @@ -342,7 +342,7 @@ macro_rules! decl_tests { fn balance_works() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 42 })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { who: 1, amount: 42 })); assert_eq!(Balances::free_balance(1), 42); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::total_balance(&1), 42); @@ -444,7 +444,7 @@ macro_rules! decl_tests { let _ = Balances::withdraw( &2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive ); - System::assert_last_event(Event::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Withdraw { who: 2, amount: 11 })); assert_eq!(Balances::free_balance(2), 100); assert_eq!(>::get(), 100); }); @@ -505,7 +505,7 @@ macro_rules! decl_tests { assert_ok!(Balances::reserve(&1, 110)); assert_ok!(Balances::repatriate_reserved(&1, &2, 41, Status::Free), 0); System::assert_last_event( - Event::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) + RuntimeEvent::Balances(crate::Event::ReserveRepatriated { from: 1, to: 2, amount: 41, destination_status: Status::Free }) ); assert_eq!(Balances::reserved_balance(1), 69); assert_eq!(Balances::free_balance(1), 0); @@ -740,18 +740,18 @@ macro_rules! decl_tests { System::set_block_number(2); assert_ok!(Balances::reserve(&1, 10)); - System::assert_last_event(Event::Balances(crate::Event::Reserved { who: 1, amount: 10 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Reserved { who: 1, amount: 10 })); System::set_block_number(3); assert!(Balances::unreserve(&1, 5).is_zero()); - System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); System::set_block_number(4); assert_eq!(Balances::unreserve(&1, 6), 1); // should only unreserve 5 - System::assert_last_event(Event::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Unreserved { who: 1, amount: 5 })); }); } @@ -766,9 +766,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::System(system::Event::NewAccount { account: 1 }), - Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -778,9 +778,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::System(system::Event::KilledAccount { account: 1 }), - Event::Balances(crate::Event::DustLost { account: 1, amount: 99 }), - Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }), + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 99 }), + RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }), ] ); }); @@ -797,9 +797,9 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::System(system::Event::NewAccount { account: 1 }), - Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -809,8 +809,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::System(system::Event::KilledAccount { account: 1 }), - Event::Balances(crate::Event::Slashed { who: 1, amount: 100 }), + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 100 }), ] ); }); @@ -830,7 +830,7 @@ macro_rules! decl_tests { assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); - System::assert_last_event(Event::Balances(crate::Event::Slashed { who: 1, amount: 900 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 900 })); // SCENARIO: Slash will kill account because not enough balance left. assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 266c37063d2f8..fa1d2db8b1e49 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -57,13 +57,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -77,7 +77,7 @@ impl frame_system::Config for Test { } impl pallet_transaction_payment::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -88,7 +88,7 @@ impl pallet_transaction_payment::Config for Test { impl Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = frame_system::Pallet; type MaxLocks = (); diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index ebb82f41a545c..3301d90027682 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -58,13 +58,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -78,7 +78,7 @@ impl frame_system::Config for Test { } impl pallet_transaction_payment::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter, ()>; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -89,7 +89,7 @@ impl pallet_transaction_payment::Config for Test { impl Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = StorageMapShim, system::Provider, u64, super::AccountData>; @@ -158,9 +158,9 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - Event::System(system::Event::NewAccount { account: 1 }), - Event::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), - Event::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), + RuntimeEvent::System(system::Event::NewAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::Endowed { account: 1, free_balance: 100 }), + RuntimeEvent::Balances(crate::Event::BalanceSet { who: 1, free: 100, reserved: 0 }), ] ); @@ -168,7 +168,10 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!(res, (NegativeImbalance::new(98), 0)); // no events - assert_eq!(events(), [Event::Balances(crate::Event::Slashed { who: 1, amount: 98 })]); + assert_eq!( + events(), + [RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 98 })] + ); let res = Balances::slash(&1, 1); assert_eq!(res, (NegativeImbalance::new(1), 0)); @@ -176,9 +179,9 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ - Event::System(system::Event::KilledAccount { account: 1 }), - Event::Balances(crate::Event::DustLost { account: 1, amount: 1 }), - Event::Balances(crate::Event::Slashed { who: 1, amount: 1 }) + RuntimeEvent::System(system::Event::KilledAccount { account: 1 }), + RuntimeEvent::Balances(crate::Event::DustLost { account: 1, amount: 1 }), + RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 1 }) ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index fbfd62eb63259..6fda1697dcfeb 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -62,13 +62,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -91,7 +91,7 @@ impl OnUnbalanced> for OnDustRemoval { impl Config for Test { type Balance = u64; type DustRemoval = OnDustRemoval; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = StorageMapShim, system::Provider, u64, super::AccountData>; @@ -157,16 +157,19 @@ fn transfer_dust_removal_tst1_should_work() { // Verify the events assert_eq!(System::events().len(), 12); - System::assert_has_event(Event::Balances(crate::Event::Transfer { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { from: 2, to: 3, amount: 450, })); - System::assert_has_event(Event::Balances(crate::Event::DustLost { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); }); } @@ -192,16 +195,19 @@ fn transfer_dust_removal_tst2_should_work() { // Verify the events assert_eq!(System::events().len(), 10); - System::assert_has_event(Event::Balances(crate::Event::Transfer { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Transfer { from: 2, to: 1, amount: 450, })); - System::assert_has_event(Event::Balances(crate::Event::DustLost { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_has_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); + System::assert_has_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); }); } @@ -236,18 +242,21 @@ fn repatriating_reserved_balance_dust_removal_should_work() { // Verify the events assert_eq!(System::events().len(), 11); - System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::ReserveRepatriated { from: 2, to: 1, amount: 450, destination_status: Status::Free, })); - System::assert_has_event(Event::Balances(crate::Event::DustLost { + System::assert_has_event(RuntimeEvent::Balances(crate::Event::DustLost { account: 2, amount: 50, })); - System::assert_last_event(Event::Balances(crate::Event::Deposit { who: 1, amount: 50 })); + System::assert_last_event(RuntimeEvent::Balances(crate::Event::Deposit { + who: 1, + amount: 50, + })); }); } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 8a673c9d4e914..792b775b77998 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -71,12 +71,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -90,7 +90,7 @@ impl frame_system::Config for Test { } impl pallet_session::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index 3bb59c7c39485..7c4cd1af01533 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -66,12 +66,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -95,7 +95,7 @@ parameter_types! { } impl pallet_session::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 1ceb9a4f8904c..38aa7b261cb01 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -179,13 +179,13 @@ pub mod mock { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index b8a888767bf2c..fd7e0cce03c22 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -94,12 +94,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index ef8351d37e957..7617696a949cf 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -33,7 +33,7 @@ mod pallet_test { frame_support::decl_storage! { trait Store for Module, I: Instance = DefaultInstance> as Test where - ::OtherEvent: Into<>::Event> + ::OtherEvent: Into<>::RuntimeEvent> { pub Value get(fn value): Option; } @@ -41,7 +41,7 @@ mod pallet_test { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call where - origin: T::Origin, ::OtherEvent: Into<>::Event> + origin: T::Origin, ::OtherEvent: Into<>::RuntimeEvent> { #[weight = 0] fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { @@ -64,9 +64,9 @@ mod pallet_test { pub trait Config: frame_system::Config + OtherConfig where - Self::OtherEvent: Into<>::Event>, + Self::OtherEvent: Into<>::RuntimeEvent>, { - type Event; + type RuntimeEvent; type LowerBound: Get; type UpperBound: Get; } @@ -95,12 +95,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -114,13 +114,13 @@ impl frame_system::Config for Test { } impl pallet_test::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type LowerBound = ConstU32<1>; type UpperBound = ConstU32<100>; } impl pallet_test::OtherConfig for Test { - type OtherEvent = Event; + type OtherEvent = RuntimeEvent; } fn new_test_ext() -> sp_io::TestExternalities { @@ -142,8 +142,8 @@ mod benchmarks { where_clause { where ::OtherEvent: Clone - + Into<>::Event>, - >::Event: Clone, + + Into<>::RuntimeEvent>, + >::RuntimeEvent: Clone, } set_value { diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 0c8c875dc611c..9dc920f6cca28 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -82,7 +82,7 @@ fn setup_pot_account, I: 'static>() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index 74e67b7ea5b92..b95940a2835ce 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -230,7 +230,8 @@ pub mod pallet { type DataDepositPerByte: Get>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Maximum acceptable reason length. /// diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index cf4b8d23b0458..f1d794eb89dbe 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -71,13 +71,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -95,7 +95,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -113,7 +113,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -132,7 +132,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -155,7 +155,7 @@ parameter_types! { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<20>; @@ -170,7 +170,7 @@ impl Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<20>; @@ -204,7 +204,7 @@ fn last_event() -> BountiesEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::Bounties(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::Bounties(inner) = e { Some(inner) } else { None }) .last() .unwrap() } diff --git a/frame/child-bounties/src/benchmarking.rs b/frame/child-bounties/src/benchmarking.rs index ca5af50276b9d..697ed40e0071f 100644 --- a/frame/child-bounties/src/benchmarking.rs +++ b/frame/child-bounties/src/benchmarking.rs @@ -160,7 +160,7 @@ fn setup_pot_account() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/child-bounties/src/lib.rs b/frame/child-bounties/src/lib.rs index 5f396cd2d4567..2dfe0660ad68e 100644 --- a/frame/child-bounties/src/lib.rs +++ b/frame/child-bounties/src/lib.rs @@ -146,7 +146,7 @@ pub mod pallet { type ChildBountyValueMinimum: Get>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index 0edee8468b86f..c9dbc8742c56a 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -74,13 +74,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -98,7 +98,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -115,7 +115,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -136,7 +136,7 @@ parameter_types! { } impl pallet_bounties::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BountyDepositBase = ConstU64<80>; type BountyDepositPayoutDelay = ConstU64<3>; type BountyUpdatePeriod = ConstU64<10>; @@ -150,7 +150,7 @@ impl pallet_bounties::Config for Test { type ChildBountyManager = ChildBounties; } impl pallet_child_bounties::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MaxActiveChildBountyCount = ConstU32<2>; type ChildBountyValueMinimum = ConstU64<1>; type WeightInfo = (); @@ -172,7 +172,7 @@ fn last_event() -> ChildBountiesEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::ChildBounties(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::ChildBounties(inner) = e { Some(inner) } else { None }) .last() .unwrap() } diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index b80a4aef28d38..7c444dbaa7b6a 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -30,7 +30,7 @@ const SEED: u32 = 0; const MAX_BYTES: u32 = 1_024; -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 02faf876806e1..7944f826255ec 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -187,7 +187,8 @@ pub mod pallet { + GetDispatchInfo; /// The outer event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The time-out for council motions. type MotionDuration: Get; diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index e7b97a39e020a..b063cc21bd426 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -32,7 +32,7 @@ use sp_runtime::{ }; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where @@ -61,7 +61,8 @@ mod mock_democracy { #[pallet::config] pub trait Config: frame_system::Config + Sized { - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; type ExternalMajorityOrigin: EnsureOrigin; } @@ -99,13 +100,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -119,8 +120,8 @@ impl frame_system::Config for Test { } impl Config for Test { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -129,8 +130,8 @@ impl Config for Test { } impl Config for Test { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -138,13 +139,13 @@ impl Config for Test { type WeightInfo = (); } impl mock_democracy::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExternalMajorityOrigin = EnsureProportionAtLeast; } impl Config for Test { type Origin = Origin; - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; type MaxProposals = MaxProposals; type MaxMembers = MaxMembers; @@ -171,11 +172,13 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn make_proposal(value: u64) -> Call { - Call::System(frame_system::Call::remark_with_event { remark: value.to_be_bytes().to_vec() }) +fn make_proposal(value: u64) -> RuntimeCall { + RuntimeCall::System(frame_system::Call::remark_with_event { + remark: value.to_be_bytes().to_vec(), + }) } -fn record(event: Event) -> EventRecord { +fn record(event: RuntimeEvent) -> EventRecord { EventRecord { phase: Phase::Initialization, event, topics: vec![] } } @@ -216,32 +219,34 @@ fn close_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 1 })), - record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) + record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { + proposal_hash: hash + })) ] ); }); @@ -250,7 +255,7 @@ fn close_works() { #[test] fn proposal_weight_limit_works_on_approve() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members { + let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), @@ -286,7 +291,7 @@ fn proposal_weight_limit_works_on_approve() { #[test] fn proposal_weight_limit_ignored_on_disapprove() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members { + let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), @@ -342,32 +347,34 @@ fn close_with_prime_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 1 })), - record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) + record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { + proposal_hash: hash + })) ] ); }); @@ -402,33 +409,33 @@ fn close_with_voting_prime_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 3, no: 0 })), - record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(Event::Collective(CollectiveEvent::Executed { + record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(RuntimeEvent::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })) @@ -473,42 +480,42 @@ fn close_with_no_prime_but_majority_works() { assert_eq!( System::events(), vec![ - record(Event::CollectiveMajority(CollectiveEvent::Proposed { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 5 })), - record(Event::CollectiveMajority(CollectiveEvent::Voted { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::CollectiveMajority(CollectiveEvent::Voted { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::CollectiveMajority(CollectiveEvent::Voted { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Voted { account: 3, proposal_hash: hash, voted: true, yes: 3, no: 0 })), - record(Event::CollectiveMajority(CollectiveEvent::Closed { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Closed { proposal_hash: hash, yes: 5, no: 0 })), - record(Event::CollectiveMajority(CollectiveEvent::Approved { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Approved { proposal_hash: hash })), - record(Event::CollectiveMajority(CollectiveEvent::Executed { + record(RuntimeEvent::CollectiveMajority(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })) @@ -635,7 +642,7 @@ fn propose_works() { assert_eq!( System::events(), - vec![record(Event::Collective(CollectiveEvent::Proposed { + vec![record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -670,7 +677,7 @@ fn limit_active_proposals() { #[test] fn correct_validate_and_get_proposal() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members { + let proposal = RuntimeCall::Collective(crate::Call::set_members { new_members: vec![1, 2, 3], prime: None, old_count: MaxMembers::get(), @@ -803,20 +810,20 @@ fn motions_vote_after_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: false, @@ -915,7 +922,7 @@ fn motions_reproposing_disapproved_works() { #[test] fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { new_test_ext().execute_with(|| { - let proposal = Call::Democracy(mock_democracy::Call::external_propose_majority {}); + let proposal = RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {}); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); @@ -936,33 +943,33 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(Event::Collective(CollectiveEvent::Executed { + record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(RuntimeEvent::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })), @@ -985,41 +992,43 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 1, proposal_hash: hash, threshold: 2 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 3, proposal_hash: hash, voted: true, yes: 3, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 3, no: 0 })), - record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(Event::Democracy(mock_democracy::pallet::Event::::ExternalProposed)), - record(Event::Collective(CollectiveEvent::Executed { + record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(RuntimeEvent::Democracy( + mock_democracy::pallet::Event::::ExternalProposed + )), + record(RuntimeEvent::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Ok(()) })), @@ -1048,32 +1057,34 @@ fn motions_disapproval_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 3 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: false, yes: 1, no: 1 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 1, no: 1 })), - record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), + record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { + proposal_hash: hash + })), ] ); }); @@ -1099,33 +1110,33 @@ fn motions_approval_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Approved { proposal_hash: hash })), - record(Event::Collective(CollectiveEvent::Executed { + record(RuntimeEvent::Collective(CollectiveEvent::Approved { proposal_hash: hash })), + record(RuntimeEvent::Collective(CollectiveEvent::Executed { proposal_hash: hash, result: Err(DispatchError::BadOrigin) })), @@ -1149,7 +1160,7 @@ fn motion_with_no_votes_closes_with_disapproval() { )); assert_eq!( System::events()[0], - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, @@ -1173,7 +1184,7 @@ fn motion_with_no_votes_closes_with_disapproval() { // Events show that the close ended in a disapproval. assert_eq!( System::events()[1], - record(Event::Collective(CollectiveEvent::Closed { + record(RuntimeEvent::Collective(CollectiveEvent::Closed { proposal_hash: hash, yes: 0, no: 3 @@ -1181,7 +1192,7 @@ fn motion_with_no_votes_closes_with_disapproval() { ); assert_eq!( System::events()[2], - record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) + record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })) ); }) } @@ -1241,27 +1252,29 @@ fn disapprove_proposal_works() { assert_eq!( System::events(), vec![ - record(Event::Collective(CollectiveEvent::Proposed { + record(RuntimeEvent::Collective(CollectiveEvent::Proposed { account: 1, proposal_index: 0, proposal_hash: hash, threshold: 2 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 1, proposal_hash: hash, voted: true, yes: 1, no: 0 })), - record(Event::Collective(CollectiveEvent::Voted { + record(RuntimeEvent::Collective(CollectiveEvent::Voted { account: 2, proposal_hash: hash, voted: true, yes: 2, no: 0 })), - record(Event::Collective(CollectiveEvent::Disapproved { proposal_hash: hash })), + record(RuntimeEvent::Collective(CollectiveEvent::Disapproved { + proposal_hash: hash + })), ] ); }) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f91be3a9e491c..0fbf821b2e284 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -282,7 +282,7 @@ pub trait Ext: sealing::Sealed { fn append_debug_buffer(&mut self, msg: &str) -> bool; /// Call some dispatchable and return the result. - fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo; + fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo; /// Recovers ECDSA compressed public key based on signature and message hash. fn ecdsa_recover(&self, signature: &[u8; 65], message_hash: &[u8; 32]) -> Result<[u8; 33], ()>; @@ -1305,7 +1305,7 @@ where } } - fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { + fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo { let mut origin: T::Origin = RawOrigin::Signed(self.address().clone()).into(); origin.add_filter(T::CallFilter::contains); call.dispatch(origin) @@ -1370,7 +1370,8 @@ mod tests { storage::Storage, tests::{ test_utils::{get_balance, hash, place_contract, set_balance}, - Call, Event as MetaEvent, ExtBuilder, Test, TestFilter, ALICE, BOB, CHARLIE, GAS_LIMIT, + ExtBuilder, RuntimeCall, RuntimeEvent as MetaEvent, Test, TestFilter, ALICE, BOB, + CHARLIE, GAS_LIMIT, }, Error, }; @@ -2575,7 +2576,7 @@ mod tests { #[test] fn call_runtime_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { - let call = Call::System(frame_system::Call::remark_with_event { + let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: b"Hello World".to_vec(), }); ctx.ext.call_runtime(call).unwrap(); @@ -2636,10 +2637,11 @@ mod tests { // remark should still be allowed let allowed_call = - Call::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); + RuntimeCall::System(SysCall::remark_with_event { remark: b"Hello".to_vec() }); // transfers are disallowed by the `TestFiler` (see below) - let forbidden_call = Call::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); + let forbidden_call = + RuntimeCall::Balances(BalanceCall::transfer { dest: CHARLIE, value: 22 }); // simple cases: direct call assert_err!( @@ -2648,7 +2650,7 @@ mod tests { ); // as part of a patch: return is OK (but it interrupted the batch) - assert_ok!(ctx.ext.call_runtime(Call::Utility(UtilCall::batch { + assert_ok!(ctx.ext.call_runtime(RuntimeCall::Utility(UtilCall::batch { calls: vec![allowed_call.clone(), forbidden_call, allowed_call] })),); @@ -2659,7 +2661,7 @@ mod tests { }); TestFilter::set_filter(|call| match call { - Call::Balances(pallet_balances::Call::transfer { .. }) => false, + RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => false, _ => true, }); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 3c63ad86016f3..9890bd840cc3a 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -243,13 +243,13 @@ pub mod pallet { type Currency: ReservableCurrency; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. - type Call: Dispatchable + type RuntimeCall: Dispatchable + GetDispatchInfo + codec::Decode - + IsType<::Call>; + + IsType<::RuntimeCall>; /// Filter that is applied to calls dispatched by contracts. /// @@ -260,7 +260,7 @@ pub mod pallet { /// # Stability /// /// The runtime **must** make sure that all dispatchables that are callable by - /// contracts remain stable. In addition [`Self::Call`] itself must remain stable. + /// contracts remain stable. In addition [`Self::RuntimeCall`] itself must remain stable. /// This means that no existing variants are allowed to switch their positions. /// /// # Note @@ -270,7 +270,7 @@ pub mod pallet { /// Therefore please make sure to be restrictive about which dispatchables are allowed /// in order to not introduce a new DoS vector like memory allocation patterns that can /// be exploited to drive the runtime into a panic. - type CallFilter: Contains<::Call>; + type CallFilter: Contains<::RuntimeCall>; /// Used to answer contracts' queries regarding the current weight price. This is **not** /// used to calculate the actual fee and is only for informational purposes. @@ -1119,7 +1119,7 @@ where fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( &topics, - ::Event::from(event).into(), + ::RuntimeEvent::from(event).into(), ) } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 8bca18c5d1c42..1193d17676e1d 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -164,27 +164,27 @@ impl ChainExtension for TestExtension { let func_id = env.func_id(); let id = env.ext_id() as u32 | func_id as u32; match func_id { - 0x8000 => { + 0 => { let mut env = env.buf_in_buf_out(); let input = env.read(8)?; env.write(&input, false, None)?; TestExtensionTestValue::mutate(|e| e.last_seen_buffer = input); Ok(RetVal::Converging(id)) }, - 0x8001 => { + 1 => { let env = env.only_in(); TestExtensionTestValue::mutate(|e| { e.last_seen_inputs = (env.val0(), env.val1(), env.val2(), env.val3()) }); Ok(RetVal::Converging(id)) }, - 0x8002 => { + 2 => { let mut env = env.buf_in_buf_out(); let weight = Weight::from_ref_time(env.read(5)?[4].into()); env.charge_weight(weight)?; Ok(RetVal::Converging(id)) }, - 0x8003 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), + 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { panic!("Passed unknown id to test chain extension: {}", func_id); }, @@ -281,12 +281,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -304,7 +304,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -318,8 +318,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_utility::Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -346,7 +346,7 @@ pub struct TestFilter; #[derive(Clone)] pub struct Filters { - filter: fn(&Call) -> bool, + filter: fn(&RuntimeCall) -> bool, } impl Default for Filters { @@ -360,13 +360,13 @@ parameter_types! { } impl TestFilter { - pub fn set_filter(filter: fn(&Call) -> bool) { + pub fn set_filter(filter: fn(&RuntimeCall) -> bool) { CallFilter::mutate(|fltr| fltr.filter = filter); } } -impl Contains for TestFilter { - fn contains(call: &Call) -> bool { +impl Contains for TestFilter { + fn contains(call: &RuntimeCall) -> bool { (CallFilter::get().filter)(call) } } @@ -379,8 +379,8 @@ impl Config for Test { type Time = Timestamp; type Randomness = Randomness; type Currency = Balances; - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type CallFilter = TestFilter; type CallStack = [Frame; 31]; type WeightPrice = Self; @@ -536,12 +536,14 @@ fn instantiate_and_call_and_deposit_event() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: addr.clone() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed { + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -549,7 +551,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -558,7 +560,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -566,7 +568,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: value, @@ -575,7 +577,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::ContractEmitted { + event: RuntimeEvent::Contracts(crate::Event::ContractEmitted { contract: addr.clone(), data: vec![1, 2, 3, 4] }), @@ -583,7 +585,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated { + event: RuntimeEvent::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone() }), @@ -828,14 +830,14 @@ fn deploy_and_call_other_contract() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount { + event: RuntimeEvent::System(frame_system::Event::NewAccount { account: callee_addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed { + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: callee_addr.clone(), free_balance: min_balance, }), @@ -843,7 +845,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: callee_addr.clone(), amount: min_balance, @@ -852,7 +854,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: callee_addr.clone(), amount: min_balance, }), @@ -860,7 +862,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: caller_addr.clone(), to: callee_addr.clone(), amount: 32768, // hard coded in wasm @@ -869,7 +871,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated { + event: RuntimeEvent::Contracts(crate::Event::Instantiated { deployer: caller_addr.clone(), contract: callee_addr.clone(), }), @@ -877,7 +879,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: caller_addr.clone(), to: callee_addr.clone(), amount: 32768, @@ -886,7 +888,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: caller_addr.clone(), contract: callee_addr.clone(), }), @@ -894,7 +896,7 @@ fn deploy_and_call_other_contract() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: caller_addr.clone(), }), @@ -1093,7 +1095,7 @@ fn cannot_self_destruct_by_refund_after_slash() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Slashed { + event: RuntimeEvent::Balances(pallet_balances::Event::Slashed { who: addr.clone(), amount: 90, }), @@ -1101,7 +1103,7 @@ fn cannot_self_destruct_by_refund_after_slash() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -1109,7 +1111,7 @@ fn cannot_self_destruct_by_refund_after_slash() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::ReserveRepatriated { + event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: 10, @@ -1201,7 +1203,7 @@ fn self_destruct_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: addr.clone(), to: DJANGO, amount: 100_000, @@ -1210,7 +1212,7 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Terminated { + event: RuntimeEvent::Contracts(crate::Event::Terminated { contract: addr.clone(), beneficiary: DJANGO }), @@ -1218,7 +1220,7 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -1226,14 +1228,14 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::KilledAccount { + event: RuntimeEvent::System(frame_system::Event::KilledAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::ReserveRepatriated { + event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: 1_000, @@ -1698,22 +1700,21 @@ fn chain_extension_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - // 0x8000 = read input buffer and pass it through as output - let input: Vec = - ExtensionInput { extension_id: 0, func_id: 0x8000, extra: &[99] }.into(); + // 0 = read input buffer and pass it through as output + let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, Bytes(input)); - // 0x8001 = treat inputs as integer primitives and store the supplied integers + // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 0x8001, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), false, ) .result @@ -1721,14 +1722,14 @@ fn chain_extension_works() { // those values passed in the fixture assert_eq!(TestExtension::last_seen_inputs(), (4, 4, 16, 12)); - // 0x8002 = charge some extra weight (amount supplied in the fifth byte) + // 2 = charge some extra weight (amount supplied in the fifth byte) let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[0] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), false, ); assert_ok!(result.result); @@ -1739,7 +1740,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[42] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), false, ); assert_ok!(result.result); @@ -1750,20 +1751,20 @@ fn chain_extension_works() { 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 0x8002, extra: &[95] }.into(), + ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), false, ); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 95); - // 0x8003 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer + // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( ALICE, addr.clone(), 0, GAS_LIMIT, None, - ExtensionInput { extension_id: 0, func_id: 0x8003, extra: &[] }.into(), + ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), false, ) .result @@ -2550,7 +2551,7 @@ fn gas_estimation_call_runtime() { // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. - let call = Call::Contracts(crate::Call::call { + let call = RuntimeCall::Contracts(crate::Call::call { dest: addr_callee, value: 0, gas_limit: GAS_LIMIT / 3, @@ -2660,7 +2661,7 @@ fn upload_code_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, amount: 240, }), @@ -2668,7 +2669,7 @@ fn upload_code_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, ] @@ -2739,7 +2740,7 @@ fn remove_code_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, amount: 240, }), @@ -2747,12 +2748,12 @@ fn remove_code_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Unreserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Unreserved { who: ALICE, amount: 240, }), @@ -2760,7 +2761,7 @@ fn remove_code_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeRemoved { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeRemoved { code_hash }), topics: vec![code_hash], }, ] @@ -2794,7 +2795,7 @@ fn remove_code_wrong_origin() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, amount: 240, }), @@ -2802,7 +2803,7 @@ fn remove_code_wrong_origin() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, ] @@ -2895,12 +2896,14 @@ fn instantiate_with_zero_balance_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: addr.clone() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed { + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -2908,7 +2911,7 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -2917,7 +2920,7 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -2925,7 +2928,7 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, amount: 240, }), @@ -2933,12 +2936,12 @@ fn instantiate_with_zero_balance_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated { + event: RuntimeEvent::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone(), }), @@ -2986,12 +2989,14 @@ fn instantiate_with_below_existential_deposit_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: addr.clone() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed { + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance, }), @@ -2999,7 +3004,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance, @@ -3008,7 +3013,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: min_balance, }), @@ -3016,7 +3021,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: 50, @@ -3025,7 +3030,7 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, amount: 240, }), @@ -3033,12 +3038,12 @@ fn instantiate_with_below_existential_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored { code_hash }), + event: RuntimeEvent::Contracts(crate::Event::CodeStored { code_hash }), topics: vec![code_hash], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated { + event: RuntimeEvent::Contracts(crate::Event::Instantiated { deployer: ALICE, contract: addr.clone(), }), @@ -3115,7 +3120,7 @@ fn storage_deposit_works() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: 42, @@ -3124,7 +3129,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -3132,7 +3137,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: charged0, @@ -3141,7 +3146,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: charged0, }), @@ -3149,7 +3154,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -3157,7 +3162,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: charged1, @@ -3166,7 +3171,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Reserved { + event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: addr.clone(), amount: charged1, }), @@ -3174,7 +3179,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -3182,7 +3187,7 @@ fn storage_deposit_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::ReserveRepatriated { + event: RuntimeEvent::Balances(pallet_balances::Event::ReserveRepatriated { from: addr.clone(), to: ALICE, amount: refunded0, @@ -3264,7 +3269,7 @@ fn set_code_extrinsic() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Contracts(pallet_contracts::Event::ContractCodeUpdated { + event: RuntimeEvent::Contracts(pallet_contracts::Event::ContractCodeUpdated { contract: addr.clone(), new_code_hash, old_code_hash: code_hash, @@ -3343,14 +3348,14 @@ fn call_after_killed_account_needs_funding() { vec![ EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::KilledAccount { + event: RuntimeEvent::System(frame_system::Event::KilledAccount { account: addr.clone() }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Slashed { + event: RuntimeEvent::Balances(pallet_balances::Event::Slashed { who: addr.clone(), amount: min_balance + 700 }), @@ -3358,7 +3363,7 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -3366,12 +3371,14 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount { account: addr.clone() }), + event: RuntimeEvent::System(frame_system::Event::NewAccount { + account: addr.clone() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed { + event: RuntimeEvent::Balances(pallet_balances::Event::Endowed { account: addr.clone(), free_balance: min_balance }), @@ -3379,7 +3386,7 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Transfer { + event: RuntimeEvent::Balances(pallet_balances::Event::Transfer { from: ALICE, to: addr.clone(), amount: min_balance @@ -3388,7 +3395,7 @@ fn call_after_killed_account_needs_funding() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: addr.clone(), }), @@ -3571,7 +3578,7 @@ fn set_code_hash() { &[ EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::ContractCodeUpdated { + event: RuntimeEvent::Contracts(crate::Event::ContractCodeUpdated { contract: contract_addr.clone(), new_code_hash, old_code_hash: code_hash, @@ -3580,7 +3587,7 @@ fn set_code_hash() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: contract_addr.clone(), }), @@ -3588,7 +3595,7 @@ fn set_code_hash() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Called { + event: RuntimeEvent::Contracts(crate::Event::Called { caller: ALICE, contract: contract_addr.clone(), }), diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 3718be756e39d..e850f458bb745 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -280,7 +280,7 @@ mod tests { }, gas::GasMeter, storage::WriteOutcome, - tests::{Call, Test, ALICE, BOB}, + tests::{RuntimeCall, Test, ALICE, BOB}, BalanceOf, CodeHash, Error, Pallet as Contracts, }; use assert_matches::assert_matches; @@ -339,7 +339,7 @@ mod tests { transfers: Vec, // (topics, data) events: Vec<(Vec, Vec)>, - runtime_calls: RefCell>, + runtime_calls: RefCell>, schedule: Schedule, gas_meter: GasMeter, debug_buffer: Vec, @@ -532,7 +532,10 @@ mod tests { self.debug_buffer.extend(msg.as_bytes()); true } - fn call_runtime(&self, call: ::Call) -> DispatchResultWithPostInfo { + fn call_runtime( + &self, + call: ::RuntimeCall, + ) -> DispatchResultWithPostInfo { self.runtime_calls.borrow_mut().push(call); Ok(Default::default()) } @@ -2299,7 +2302,8 @@ mod tests { #[test] #[cfg(feature = "unstable-interface")] fn call_runtime_works() { - let call = Call::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); + let call = + RuntimeCall::System(frame_system::Call::remark { remark: b"Hello World".to_vec() }); let mut ext = MockExt::default(); let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); assert_eq!(*ext.runtime_calls.borrow(), vec![call]); diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 334223e3b8432..5b94611843169 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -2316,7 +2316,7 @@ pub mod env { ) -> Result { use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; - let call: ::Call = + let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(call_ptr, call_len)?; let dispatch_info = call.get_dispatch_info(); let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index cd16fccd6661d..534941d6f7f66 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -98,7 +98,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { // System level stuff. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// Currency type with which voting happens. diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index cbd2b0619ac2b..7a97a46086398 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -50,9 +50,9 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &Call) -> bool { - !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &RuntimeCall) -> bool { + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) } } @@ -68,13 +68,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -92,7 +92,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -187,7 +187,7 @@ impl Polling> for TestPolls { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = pallet_balances::Pallet; type VoteLockingPeriod = ConstU64<3>; type MaxVotes = ConstU32<3>; diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index d5db82de3840d..ab4075bc82fc5 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -37,7 +37,7 @@ const MAX_REFERENDUMS: u32 = 99; const MAX_SECONDERS: u32 = 100; const MAX_BYTES: u32 = 16_384; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index bbc5b767e97ff..f6696065bfc87 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -254,7 +254,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { type Proposal: Parameter + Dispatchable + From>; - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Currency type for this pallet. type Currency: ReservableCurrency diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index def8f84067909..e3ed046813e23 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -70,9 +70,9 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &Call) -> bool { - !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &RuntimeCall) -> bool { + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) } } @@ -88,13 +88,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -110,10 +110,10 @@ parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } impl pallet_scheduler::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type PalletsOrigin = OriginCaller; - type Call = Call; + type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = (); @@ -128,7 +128,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -156,8 +156,8 @@ impl SortedMembers for OneToFive { } impl Config for Test { - type Proposal = Call; - type Event = Event; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type Currency = pallet_balances::Pallet; type EnactmentPeriod = ConstU64<2>; type LaunchPeriod = ConstU64<2>; @@ -217,14 +217,18 @@ fn params_should_work() { } fn set_balance_proposal(value: u64) -> Vec { - Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }) - .encode() + RuntimeCall::Balances(pallet_balances::Call::set_balance { + who: 42, + new_free: value, + new_reserved: 0, + }) + .encode() } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + let call = RuntimeCall::decode(&mut &set_balance_proposal(i)[..]).unwrap(); assert!(!::BaseCallFilter::contains(&call)); } } diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index a9ce35499a329..f0378ea7dc83f 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -570,8 +570,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + SendTransactionTypes> { - type Event: From> - + IsType<::Event> + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + TryInto>; /// Currency type. diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 4e9672fc1d4f6..efe9d8ea587cb 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -50,7 +50,8 @@ use sp_runtime::{ use std::sync::Arc; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -85,7 +86,7 @@ pub(crate) fn multi_phase_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::MultiPhase(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::MultiPhase(inner) = e { Some(inner) } else { None }) .collect::>() } @@ -201,13 +202,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -231,7 +232,7 @@ parameter_types! { impl pallet_balances::Config for Runtime { type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -362,7 +363,7 @@ impl MinerConfig for Runtime { } impl crate::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type EstimateCallFee = frame_support::traits::ConstU32<8>; type SignedPhase = SignedPhase; @@ -394,13 +395,13 @@ impl crate::Config for Runtime { impl frame_system::offchain::SendTransactionTypes for Runtime where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; type Extrinsic = Extrinsic; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index e99f2e9cef028..7dadd320abdf8 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -855,8 +855,8 @@ mod tests { use super::*; use crate::{ mock::{ - roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, Call as OuterCall, - ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, Origin, Runtime, System, + roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, + MinerMaxWeight, MultiPhase, Origin, Runtime, RuntimeCall as OuterCall, System, TestNposSolution, TrimHelpers, UnsignedPhase, }, CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 62e76c3888822..c899e021f974a 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -214,13 +214,13 @@ mod tests { type Origin = Origin; type Index = AccountId; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = (); + type RuntimeEvent = (); type BlockHashCount = (); type DbWeight = (); type BlockLength = (); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 539e90d0179ff..18210513f8ab6 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -195,7 +195,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Identifier for the elections-phragmen pallet's lock #[pallet::constant] @@ -1185,13 +1185,13 @@ mod tests { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -1206,7 +1206,7 @@ mod tests { impl pallet_balances::Config for Test { type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = frame_system::Pallet; @@ -1279,7 +1279,7 @@ mod tests { impl Config for Test { type PalletId = ElectionsPhragmenPalletId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type ChangeMembers = TestChangeMembers; @@ -1298,7 +1298,8 @@ mod tests { } pub type Block = sp_runtime::generic::Block; - pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + pub type UncheckedExtrinsic = + sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where @@ -2201,7 +2202,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::EmptyTerm)); + System::assert_last_event(RuntimeEvent::Elections(super::Event::EmptyTerm)); }) } @@ -2217,7 +2218,7 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::NewTerm { + System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { new_members: vec![(4, 35), (5, 45)], })); @@ -2230,7 +2231,7 @@ mod tests { System::set_block_number(10); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::NewTerm { + System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { new_members: vec![], })); @@ -2302,7 +2303,7 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); assert!(members_ids().is_empty()); - System::assert_last_event(Event::Elections(super::Event::NewTerm { + System::assert_last_event(RuntimeEvent::Elections(super::Event::NewTerm { new_members: vec![], })); }); @@ -2610,7 +2611,7 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - System::assert_has_event(Event::Elections(super::Event::NewTerm { + System::assert_has_event(RuntimeEvent::Elections(super::Event::NewTerm { new_members: vec![(4, 35), (5, 45)], })); }) diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index ad46bdc4185bd..756333b197090 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -370,7 +370,7 @@ pub mod pallet { type MagicNumber: Get; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Type representing the weight of this pallet type WeightInfo: WeightInfo; @@ -719,11 +719,11 @@ impl sp_std::fmt::Debug for WatchDummy { impl SignedExtension for WatchDummy where - ::Call: IsSubType>, + ::RuntimeCall: IsSubType>, { const IDENTIFIER: &'static str = "WatchDummy"; type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::RuntimeCall; type AdditionalSigned = (); type Pre = (); diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 567117e621921..78cdb3794f8d3 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -63,12 +63,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -87,7 +87,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -95,7 +95,7 @@ impl pallet_balances::Config for Test { impl Config for Test { type MagicNumber = ConstU64<1_000_000_000>; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } diff --git a/frame/examples/offchain-worker/src/lib.rs b/frame/examples/offchain-worker/src/lib.rs index b40311051594b..fdf8b61a01acd 100644 --- a/frame/examples/offchain-worker/src/lib.rs +++ b/frame/examples/offchain-worker/src/lib.rs @@ -126,10 +126,7 @@ pub mod pallet { type AuthorityId: AppCrypto; /// The overarching event type. - type Event: From> + IsType<::Event>; - - /// The overarching dispatch call type. - type Call: From>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; // Configuration parameters diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 5b03614333cc9..2c189935ac19d 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -61,7 +61,7 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -69,7 +69,7 @@ impl frame_system::Config for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -82,7 +82,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } -type Extrinsic = TestXt; +type Extrinsic = TestXt; type AccountId = <::Signer as IdentifyAccount>::AccountId; impl frame_system::offchain::SigningTypes for Test { @@ -92,22 +92,22 @@ impl frame_system::offchain::SigningTypes for Test { impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; type Extrinsic = Extrinsic; } impl frame_system::offchain::CreateSignedTransaction for Test where - Call: From, + RuntimeCall: From, { fn create_transaction>( - call: Call, + call: RuntimeCall, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(Call, ::SignaturePayload)> { + ) -> Option<(RuntimeCall, ::SignaturePayload)> { Some((call, (nonce, ()))) } } @@ -117,9 +117,8 @@ parameter_types! { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type AuthorityId = crypto::TestAuthId; - type Call = Call; type GracePeriod = ConstU64<5>; type UnsignedInterval = ConstU64<128>; type UnsignedPriority = UnsignedPriority; @@ -233,7 +232,7 @@ fn should_submit_signed_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, Call::Example(crate::Call::submit_price { price: 15523 })); + assert_eq!(tx.call, RuntimeCall::Example(crate::Call::submit_price { price: 15523 })); }); } @@ -278,7 +277,7 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { + if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, }) = tx.call @@ -337,7 +336,7 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload { + if let RuntimeCall::Example(crate::Call::submit_price_unsigned_with_signed_payload { price_payload: body, signature, }) = tx.call @@ -379,7 +378,10 @@ fn should_submit_raw_unsigned_transaction_on_chain() { assert_eq!(tx.signature, None); assert_eq!( tx.call, - Call::Example(crate::Call::submit_price_unsigned { block_number: 1, price: 15523 }) + RuntimeCall::Example(crate::Call::submit_price_unsigned { + block_number: 1, + price: 15523 + }) ); }); } diff --git a/frame/examples/parallel/src/lib.rs b/frame/examples/parallel/src/lib.rs index 7b8948c2ebd09..3432a79638664 100644 --- a/frame/examples/parallel/src/lib.rs +++ b/frame/examples/parallel/src/lib.rs @@ -41,10 +41,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching dispatch call type. - type Call: From>; - } + pub trait Config: frame_system::Config {} #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/frame/examples/parallel/src/tests.rs b/frame/examples/parallel/src/tests.rs index 67d823d8b204b..80c8213fdfb1c 100644 --- a/frame/examples/parallel/src/tests.rs +++ b/frame/examples/parallel/src/tests.rs @@ -46,7 +46,7 @@ parameter_types! { impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type PalletInfo = PalletInfo; type Index = u64; type BlockNumber = u64; @@ -55,7 +55,7 @@ impl frame_system::Config for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU64<250>; type DbWeight = (); type BlockWeights = (); @@ -70,9 +70,7 @@ impl frame_system::Config for Test { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl Config for Test { - type Call = Call; -} +impl Config for Test {} fn test_pub(n: u8) -> sp_core::sr25519::Public { sp_core::sr25519::Public::from_raw([n; 32]) diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index b23342800f8b9..7b71224904378 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -786,14 +786,14 @@ mod tests { type DbWeight = (); type Origin = Origin; type Index = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type BlockNumber = u64; type Hash = sp_core::H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = RuntimeVersion; type PalletInfo = PalletInfo; @@ -809,7 +809,7 @@ mod tests { type Balance = u64; impl pallet_balances::Config for Runtime { type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -823,7 +823,7 @@ mod tests { pub const TransactionByteFee: Balance = 0; } impl pallet_transaction_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee; @@ -850,7 +850,7 @@ mod tests { frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, ); - type TestXt = sp_runtime::testing::TestXt; + type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; type TestUncheckedExtrinsic = TestXt; @@ -889,8 +889,8 @@ mod tests { Some((who, extra(nonce, fee))) } - fn call_transfer(dest: u64, value: u64) -> Call { - Call::Balances(BalancesCall::transfer { dest, value }) + fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer { dest, value }) } #[test] @@ -1033,7 +1033,7 @@ mod tests { let mut t = new_test_ext(10000); // given: TestXt uses the encoded len as fixed Len: let xt = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let encoded = xt.encode(); @@ -1056,7 +1056,7 @@ mod tests { for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); @@ -1081,15 +1081,15 @@ mod tests { #[test] fn block_weight_and_size_is_stored_per_tx() { let xt = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); let x1 = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 1, 0), ); let x2 = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 2, 0), ); let len = xt.clone().encode().len() as u32; @@ -1145,8 +1145,8 @@ mod tests { #[test] fn validate_unsigned() { - let valid = TestXt::new(Call::Custom(custom::Call::allowed_unsigned {}), None); - let invalid = TestXt::new(Call::Custom(custom::Call::unallowed_unsigned {}), None); + let valid = TestXt::new(RuntimeCall::Custom(custom::Call::allowed_unsigned {}), None); + let invalid = TestXt::new(RuntimeCall::Custom(custom::Call::unallowed_unsigned {}), None); let mut t = new_test_ext(1); t.execute_with(|| { @@ -1184,7 +1184,7 @@ mod tests { id, &1, 110, lock, ); let xt = TestXt::new( - Call::System(SystemCall::remark { remark: vec![1u8] }), + RuntimeCall::System(SystemCall::remark { remark: vec![1u8] }), sign_extra(1, 0, 0), ); let weight = xt.get_dispatch_info().weight + @@ -1360,7 +1360,7 @@ mod tests { #[test] fn custom_runtime_upgrade_is_called_when_using_execute_block_trait() { let xt = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); @@ -1459,7 +1459,7 @@ mod tests { #[test] fn calculating_storage_root_twice_works() { - let call = Call::Custom(custom::Call::calculate_storage_root {}); + let call = RuntimeCall::Custom(custom::Call::calculate_storage_root {}); let xt = TestXt::new(call, sign_extra(1, 0, 0)); let header = new_test_ext(1).execute_with(|| { @@ -1486,10 +1486,10 @@ mod tests { #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] fn invalid_inherent_position_fail() { let xt1 = TestXt::new( - Call::Balances(BalancesCall::transfer { dest: 33, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 33, value: 0 }), sign_extra(1, 0, 0), ); - let xt2 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); + let xt2 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); let header = new_test_ext(1).execute_with(|| { // Let's build some fake block. @@ -1514,7 +1514,7 @@ mod tests { #[test] fn valid_inherents_position_works() { - let xt1 = TestXt::new(Call::Custom(custom::Call::inherent_call {}), None); + let xt1 = TestXt::new(RuntimeCall::Custom(custom::Call::inherent_call {}), None); let xt2 = TestXt::new(call_transfer(33, 0), sign_extra(1, 0, 0)); let header = new_test_ext(1).execute_with(|| { diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index b94b7d164f04c..157aa8284f2f1 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -98,7 +98,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Currency type that this works on. type Currency: ReservableCurrency; diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index 369b34ba77f44..a9cca0ff22780 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -50,7 +50,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = frame_support::traits::ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -92,7 +92,7 @@ ord_parameter_types! { } impl pallet_gilt::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; type AdminOrigin = frame_system::EnsureSignedBy; diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 1781f0a8e40a2..1fcf5c67150b5 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -87,12 +87,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The event type of this module. - type Event: From - + Into<::Event> - + IsType<::Event>; - - /// The function call. - type Call: From>; + type RuntimeEvent: From + + Into<::RuntimeEvent> + + IsType<::RuntimeEvent>; /// The proof of key ownership, used for validating equivocation reports /// The proof must include the session index and validator count of the diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index d246466cf0db4..5aae751de9ea6 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -82,13 +82,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -103,10 +103,10 @@ impl frame_system::Config for Test { impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; - type Extrinsic = TestXt; + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; } parameter_types! { @@ -116,7 +116,7 @@ parameter_types! { /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. impl pallet_session::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = u64; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = pallet_session::PeriodicSessions, ConstU64<0>>; @@ -145,7 +145,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU128<1>; type AccountStore = System; type WeightInfo = (); @@ -188,7 +188,7 @@ impl pallet_staking::Config for Test { type MaxNominations = ConstU32<16>; type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type CurrencyBalance = ::Balance; type Slash = (); @@ -213,7 +213,7 @@ impl pallet_staking::Config for Test { } impl pallet_offences::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } @@ -224,8 +224,7 @@ parameter_types! { } impl Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; type KeyOwnerProofSystem = Historical; diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 6596f227383f2..7ab5d416eb533 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -32,7 +32,7 @@ use sp_runtime::traits::Bounded; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index d24b17eb86ee0..c4f35a06e830a 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -105,7 +105,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency trait. type Currency: ReservableCurrency; diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 4c1ced0a83324..0fb5d4587af16 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -60,12 +60,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -81,7 +81,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -103,7 +103,7 @@ ord_parameter_types! { type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy>; type EnsureTwoOrRoot = EitherOfDiverse, EnsureSignedBy>; impl pallet_identity::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type Slashed = (); type BasicDeposit = ConstU64<10>; diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 34c1c70d79f75..342522ff29b19 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -337,7 +337,7 @@ pub mod pallet { type MaxPeerDataEncodingSize: Get; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A type for retrieving the validators supposed to be online in a session. type ValidatorSet: ValidatorSetWithIdentification; diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 6ea5a16d64f58..a193843a3d661 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -83,7 +83,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager } /// An extrinsic type used for tests. -pub type Extrinsic = TestXt; +pub type Extrinsic = TestXt; type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; @@ -130,13 +130,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -162,7 +162,7 @@ impl pallet_session::Config for Runtime { type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } @@ -216,7 +216,7 @@ impl frame_support::traits::EstimateNextSessionRotation for TestNextSession impl Config for Runtime { type AuthorityId = UintAuthorityId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorSet = Historical; type NextSessionRotation = TestNextSessionRotation; type ReportUnresponsiveness = OffenceHandler; @@ -229,9 +229,9 @@ impl Config for Runtime { impl frame_system::offchain::SendTransactionTypes for Runtime where - Call: From, + RuntimeCall: From, { - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; type Extrinsic = Extrinsic; } diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 6ef7cce7c353b..e6154a634f4a4 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -240,7 +240,8 @@ fn should_generate_heartbeats() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => + heartbeat, e => panic!("Unexpected call: {:?}", e), }; @@ -355,7 +356,8 @@ fn should_not_send_a_report_if_already_online() { // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => heartbeat, + crate::mock::RuntimeCall::ImOnline(crate::Call::heartbeat { heartbeat, .. }) => + heartbeat, e => panic!("Unexpected call: {:?}", e), }; diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 49380f18e24db..41893254c3c97 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -68,7 +68,7 @@ pub mod pallet { type Deposit: Get>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 693296a3b1064..255080f345eee 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -53,7 +53,7 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -61,7 +61,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = Indices; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -80,7 +80,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -90,7 +90,7 @@ impl Config for Test { type AccountIndex = u64; type Currency = Balances; type Deposit = ConstU64<1>; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 02df65a3336bf..505dd1bb98caa 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -96,17 +96,17 @@ pub struct LotteryConfig { } pub trait ValidateCall { - fn validate_call(call: &::Call) -> bool; + fn validate_call(call: &::RuntimeCall) -> bool; } impl ValidateCall for () { - fn validate_call(_: &::Call) -> bool { + fn validate_call(_: &::RuntimeCall) -> bool { false } } impl ValidateCall for Pallet { - fn validate_call(call: &::Call) -> bool { + fn validate_call(call: &::RuntimeCall) -> bool { let valid_calls = CallIndices::::get(); let call_index = match Self::call_to_index(call) { Ok(call_index) => call_index, @@ -134,7 +134,7 @@ pub mod pallet { type PalletId: Get; /// A dispatchable call. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From>; @@ -146,7 +146,7 @@ pub mod pallet { type Randomness: Randomness; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The manager origin. type ManagerOrigin: EnsureOrigin; @@ -300,7 +300,10 @@ pub mod pallet { T::WeightInfo::buy_ticket() .saturating_add(call.get_dispatch_info().weight) )] - pub fn buy_ticket(origin: OriginFor, call: Box<::Call>) -> DispatchResult { + pub fn buy_ticket( + origin: OriginFor, + call: Box<::RuntimeCall>, + ) -> DispatchResult { let caller = ensure_signed(origin.clone())?; call.clone().dispatch(origin).map_err(|e| e.error)?; @@ -315,7 +318,10 @@ pub mod pallet { /// /// This extrinsic must be called by the Manager origin. #[pallet::weight(T::WeightInfo::set_calls(calls.len() as u32))] - pub fn set_calls(origin: OriginFor, calls: Vec<::Call>) -> DispatchResult { + pub fn set_calls( + origin: OriginFor, + calls: Vec<::RuntimeCall>, + ) -> DispatchResult { T::ManagerOrigin::ensure_origin(origin)?; ensure!(calls.len() <= T::MaxCalls::get() as usize, Error::::TooManyCalls); if calls.is_empty() { @@ -404,7 +410,7 @@ impl Pallet { /// Converts a vector of calls into a vector of call indices. fn calls_to_indices( - calls: &[::Call], + calls: &[::RuntimeCall], ) -> Result, DispatchError> { let mut indices = BoundedVec::::with_bounded_capacity(calls.len()); for c in calls.iter() { @@ -415,7 +421,7 @@ impl Pallet { } /// Convert a call to it's call index by encoding the call and taking the first two bytes. - fn call_to_index(call: &::Call) -> Result { + fn call_to_index(call: &::RuntimeCall) -> Result { let encoded_call = call.encode(); if encoded_call.len() < 2 { return Err(Error::::EncodingFailed.into()) @@ -424,7 +430,7 @@ impl Pallet { } /// Logic for buying a ticket. - fn do_buy_ticket(caller: &T::AccountId, call: &::Call) -> DispatchResult { + fn do_buy_ticket(caller: &T::AccountId, call: &::RuntimeCall) -> DispatchResult { // Check the call is valid lottery let config = Lottery::::get().ok_or(Error::::NotConfigured)?; let block_number = frame_system::Pallet::::block_number(); diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index 592551fb6b93f..b521b83e7ce2b 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -59,14 +59,14 @@ impl frame_system::Config for Test { type DbWeight = (); type Origin = Origin; type Index = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -84,7 +84,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -97,10 +97,10 @@ parameter_types! { impl Config for Test { type PalletId = LotteryPalletId; - type Call = Call; + type RuntimeCall = RuntimeCall; type Currency = Balances; type Randomness = TestRandomness; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ManagerOrigin = EnsureRoot; type MaxCalls = ConstU32<2>; type ValidateCall = Lottery; diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs index d8dd6e4b7fe6c..ff3111b52db77 100644 --- a/frame/lottery/src/tests.rs +++ b/frame/lottery/src/tests.rs @@ -20,7 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, Call, Lottery, Origin, SystemCall, Test, + new_test_ext, run_to_block, Balances, BalancesCall, Lottery, Origin, RuntimeCall, SystemCall, + Test, }; use pallet_balances::Error as BalancesError; use sp_runtime::traits::BadOrigin; @@ -43,8 +44,8 @@ fn basic_end_to_end_works() { let length = 20; let delay = 5; let calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; // Set calls for the lottery @@ -55,7 +56,7 @@ fn basic_end_to_end_works() { assert!(crate::Lottery::::get().is_some()); assert_eq!(Balances::free_balance(&1), 100); - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); // 20 from the transfer, 10 from buying a ticket assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); @@ -127,17 +128,17 @@ fn set_calls_works() { assert!(!CallIndices::::exists()); let calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(Origin::root(), calls)); assert!(CallIndices::::exists()); let too_many_calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), - Call::System(SystemCall::remark { remark: vec![] }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::System(SystemCall::remark { remark: vec![] }), ]; assert_noop!( @@ -155,8 +156,8 @@ fn set_calls_works() { fn call_to_indices_works() { new_test_ext().execute_with(|| { let calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; let indices = Lottery::calls_to_indices(&calls).unwrap().into_inner(); // Only comparing the length since it is otherwise dependant on the API @@ -164,9 +165,9 @@ fn call_to_indices_works() { assert_eq!(indices.len(), calls.len()); let too_many_calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), - Call::System(SystemCall::remark { remark: vec![] }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::System(SystemCall::remark { remark: vec![] }), ]; assert_noop!(Lottery::calls_to_indices(&too_many_calls), Error::::TooManyCalls); }); @@ -202,7 +203,7 @@ fn buy_ticket_works_as_simple_passthrough() { // as a simple passthrough to the real call. new_test_ext().execute_with(|| { // No lottery set up - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 20 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); // This is just a basic transfer then assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20); @@ -210,8 +211,8 @@ fn buy_ticket_works_as_simple_passthrough() { // Lottery is set up, but too expensive to enter, so `do_buy_ticket` fails. let calls = vec![ - Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(Origin::root(), calls)); @@ -222,24 +223,28 @@ fn buy_ticket_works_as_simple_passthrough() { assert_eq!(TicketsCount::::get(), 0); // If call would fail, the whole thing still fails the same - let fail_call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); + let fail_call = + Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); assert_noop!( Lottery::buy_ticket(Origin::signed(1), fail_call), BalancesError::::InsufficientBalance, ); - let bad_origin_call = - Box::new(Call::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 })); + let bad_origin_call = Box::new(RuntimeCall::Balances(BalancesCall::force_transfer { + source: 0, + dest: 0, + value: 0, + })); assert_noop!(Lottery::buy_ticket(Origin::signed(1), bad_origin_call), BadOrigin,); // User can call other txs, but doesn't get a ticket let remark_call = - Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); assert_ok!(Lottery::buy_ticket(Origin::signed(2), remark_call)); assert_eq!(TicketsCount::::get(), 0); let successful_call = - Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); assert_ok!(Lottery::buy_ticket(Origin::signed(2), successful_call)); assert_eq!(TicketsCount::::get(), 1); }); @@ -250,13 +255,13 @@ fn buy_ticket_works() { new_test_ext().execute_with(|| { // Set calls for the lottery. let calls = vec![ - Call::System(SystemCall::remark { remark: vec![] }), - Call::Balances(BalancesCall::transfer { dest: 0, value: 0 }), + RuntimeCall::System(SystemCall::remark { remark: vec![] }), + RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; assert_ok!(Lottery::set_calls(Origin::root(), calls)); // Can't buy ticket before start - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 2, value: 1 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 0); @@ -269,12 +274,13 @@ fn buy_ticket_works() { assert_eq!(TicketsCount::::get(), 1); // Can't buy another of the same ticket (even if call is slightly changed) - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 3, value: 30 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 3, value: 30 })); assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); // Buy ticket for remark - let call = Box::new(Call::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); + let call = + Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 2); @@ -296,7 +302,7 @@ fn buy_ticket_works() { #[test] fn do_buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); @@ -311,7 +317,7 @@ fn do_buy_ticket_already_participating() { #[test] fn buy_ticket_already_participating() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); @@ -331,7 +337,7 @@ fn buy_ticket_already_participating() { #[test] fn buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 100. assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); @@ -346,7 +352,7 @@ fn buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 101. assert_ok!(Lottery::start_lottery(Origin::root(), 101, 10, 10, false)); @@ -363,7 +369,7 @@ fn do_buy_ticket_insufficient_balance() { #[test] fn do_buy_ticket_keep_alive() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); // Price set to 100. assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); @@ -415,7 +421,7 @@ fn choose_ticket_trivial_cases() { #[test] fn choose_account_one_participant() { new_test_ext().execute_with(|| { - let calls = vec![Call::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; + let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); assert_ok!(Lottery::start_lottery(Origin::root(), 10, 10, 10, false)); let call = Box::new(calls[0].clone()); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index b4c9c3b38f1da..a3f79ab2052e9 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -55,7 +55,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Required origin for adding a member (though can always be Root). type AddOrigin: EnsureOrigin; @@ -147,7 +148,7 @@ pub mod pallet { /// One of the members' keys changed. KeyChanged, /// Phantom member, never used. - Dummy { _phantom_data: PhantomData<(T::AccountId, >::Event)> }, + Dummy { _phantom_data: PhantomData<(T::AccountId, >::RuntimeEvent)> }, } #[pallet::error] @@ -546,12 +547,12 @@ mod tests { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -600,7 +601,7 @@ mod tests { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type AddOrigin = EnsureSignedBy; type RemoveOrigin = EnsureSignedBy; type SwapOrigin = EnsureSignedBy; diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index 30dd6da3d3b8d..abaa7b106d539 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -48,7 +48,7 @@ frame_support::construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type AccountId = sp_core::sr25519::Public; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type BlockWeights = (); diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index dafc421a7b72d..8d0651002305b 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -42,7 +42,7 @@ fn setup_multi( } signatories.sort(); // Must first convert to outer call type. - let call: ::Call = + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_data = OpaqueCall::::from_encoded(call.encode()); Ok((signatories, call_data)) @@ -54,7 +54,7 @@ benchmarks! { let z in 0 .. 10_000; let max_signatories = T::MaxSignatories::get().into(); let (mut signatories, _) = setup_multi::(max_signatories, z)?; - let call: ::Call = frame_system::Call::::remark { + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_hash = call.using_encoded(blake2_256); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 01904de9a96fa..4593d6dade690 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -99,7 +99,7 @@ pub struct Multisig { approvals: Vec, } -type OpaqueCall = WrapperKeepOpaque<::Call>; +type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; type CallHash = [u8; 32]; @@ -117,10 +117,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From>; @@ -266,7 +266,7 @@ pub mod pallet { pub fn as_multi_threshold_1( origin: OriginFor, other_signatories: Vec, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let max_sigs = T::MaxSignatories::get() as usize; @@ -698,7 +698,7 @@ impl Pallet { fn get_call( hash: &[u8; 32], maybe_known: Option<&OpaqueCall>, - ) -> Option<(::Call, usize)> { + ) -> Option<(::RuntimeCall, usize)> { maybe_known.map_or_else( || { Calls::::get(hash) diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index ddf5197ab734c..7eb9a9c3aae32 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -61,12 +61,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -84,7 +84,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -92,19 +92,19 @@ impl pallet_balances::Config for Test { } pub struct TestBaseCallFilter; -impl Contains for TestBaseCallFilter { - fn contains(c: &Call) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &RuntimeCall) -> bool { match *c { - Call::Balances(_) => true, + RuntimeCall::Balances(_) => true, // Needed for benchmarking - Call::System(frame_system::Call::remark { .. }) => true, + RuntimeCall::System(frame_system::Call::remark { .. }) => true, _ => false, } } } impl Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type DepositBase = ConstU64<1>; type DepositFactor = ConstU64<1>; @@ -130,8 +130,8 @@ fn now() -> Timepoint { Multisig::timepoint() } -fn call_transfer(dest: u64, value: u64) -> Call { - Call::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer { dest, value }) } #[test] @@ -850,7 +850,7 @@ fn multisig_1_of_3_works() { #[test] fn multisig_filters() { new_test_ext().execute_with(|| { - let call = Box::new(Call::System(frame_system::Call::set_code { code: vec![] })); + let call = Box::new(RuntimeCall::System(frame_system::Call::set_code { code: vec![] })); assert_noop!( Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), DispatchError::from(frame_system::Error::::CallFiltered), diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index cf5af649b7843..0ab9a509a568f 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -58,7 +58,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency trait. type Currency: ReservableCurrency; @@ -284,12 +284,12 @@ mod tests { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -307,7 +307,7 @@ mod tests { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -318,7 +318,7 @@ mod tests { pub const One: u64 = 1; } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ReservationFee = ConstU64<2>; type Slashed = (); diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 427e71af6e8c4..42f1fcf7b55e6 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -67,7 +67,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The maximum number of well known nodes that are allowed to set #[pallet::constant] diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs index d959d1b8610f5..448f8cef80e28 100644 --- a/frame/node-authorization/src/mock.rs +++ b/frame/node-authorization/src/mock.rs @@ -56,12 +56,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -82,7 +82,7 @@ ord_parameter_types! { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MaxWellKnownNodes = ConstU32<4>; type MaxPeerIdLength = ConstU32<2>; type AddOrigin = EnsureSignedBy; diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index d239d4f072b80..aad38ee8475d2 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -35,13 +35,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -69,7 +69,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -96,7 +96,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -123,7 +123,7 @@ parameter_types! { } impl pallet_bags_list::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = Staking; @@ -151,7 +151,7 @@ parameter_types! { } impl pallet_nomination_pools::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; type CurrencyBalance = Balance; diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 9e872955669f8..dfe4bf1931068 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -1151,7 +1151,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: weights::WeightInfo; diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index f1574b3684aab..13df09c47fa92 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -131,13 +131,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = sp_runtime::traits::IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type DbWeight = (); type BlockLength = (); @@ -161,7 +161,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -189,7 +189,7 @@ parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); } impl pools::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; type CurrencyBalance = Balance; @@ -329,7 +329,7 @@ pub(crate) fn pool_events_since_last_call() -> Vec> { let events = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::Pools(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::Pools(inner) = e { Some(inner) } else { None }) .collect::>(); let already_seen = PoolsEvents::get(); PoolsEvents::set(&(events.len() as u32)); @@ -341,7 +341,7 @@ pub(crate) fn balances_events_since_last_call() -> Vec>(); let already_seen = BalancesEvents::get(); BalancesEvents::set(&(events.len() as u32)); diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 362fece3f3daf..bcc735a9cef99 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -4147,7 +4147,7 @@ mod create { Balances::make_free_balance_be(&11, 5 + 20); // Then - let create = Call::Pools(crate::Call::::create { + let create = RuntimeCall::Pools(crate::Call::::create { amount: 20, root: 11, nominator: 11, diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 055ba7b4b3c06..d616ee1ffc180 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -46,13 +46,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -81,7 +81,7 @@ impl pallet_balances::Config for Runtime { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -111,7 +111,7 @@ impl pallet_staking::Config for Runtime { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -138,7 +138,7 @@ parameter_types! { } impl pallet_bags_list::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type BagThresholds = BagThresholds; type ScoreProvider = Staking; @@ -165,7 +165,7 @@ parameter_types! { } impl pallet_nomination_pools::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; type CurrencyBalance = Balance; @@ -248,7 +248,7 @@ pub(crate) fn pool_events_since_last_call() -> Vec>(); let already_seen = ObservedEventsPools::get(); ObservedEventsPools::set(events.len()); @@ -259,7 +259,7 @@ pub(crate) fn staking_events_since_last_call() -> Vec>(); let already_seen = ObservedEventsStaking::get(); ObservedEventsStaking::set(events.len()); diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 5d81a71d4c47c..f73dd9caac9c6 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -216,7 +216,7 @@ fn make_offenders_im_online( } #[cfg(test)] -fn check_events::Event>>(expected: I) { +fn check_events::RuntimeEvent>>(expected: I) { let events = System::::events() .into_iter() .map(|frame_system::EventRecord { event, .. }| event) @@ -308,16 +308,16 @@ benchmarks! { let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( - ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) + ::RuntimeEvent::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) ); let balance_slash = |id| core::iter::once( - ::Event::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) + ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( - ::Event::from(StakingEvent::::Chilled(id)) + ::RuntimeEvent::from(StakingEvent::::Chilled(id)) ); let balance_deposit = |id, amount: u32| - ::Event::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); + ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); let mut first = true; let slash_events = raw_offenders.into_iter() .flat_map(|offender| { @@ -339,7 +339,7 @@ benchmarks! { .flat_map(|reporter| vec![ balance_deposit(reporter.clone(), reward).into(), frame_system::Event::::NewAccount { account: reporter.clone() }.into(), - ::Event::from( + ::RuntimeEvent::from( pallet_balances::Event::::Endowed{account: reporter, free_balance: reward.into()} ).into(), ]) @@ -366,7 +366,7 @@ benchmarks! { check_events::( std::iter::empty() .chain(slash_events.into_iter().map(Into::into)) - .chain(std::iter::once(::Event::from( + .chain(std::iter::once(::RuntimeEvent::from( pallet_offences::Event::Offence{ kind: UnresponsivenessOffence::::ID, timeslot: 0_u32.to_le_bytes().to_vec(), diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 6d66f5251c78c..3483f5ec240a5 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -53,13 +53,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -77,7 +77,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -128,7 +128,7 @@ impl pallet_session::Config for Test { type ShouldEndSession = pallet_session::PeriodicSessions; type NextSessionRotation = pallet_session::PeriodicSessions; type SessionHandler = TestSessionHandler; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type WeightInfo = (); @@ -148,7 +148,7 @@ parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; } -pub type Extrinsic = sp_runtime::testing::TestXt; +pub type Extrinsic = sp_runtime::testing::TestXt; pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { @@ -165,7 +165,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = (); type Reward = (); type SessionsPerEra = (); @@ -188,7 +188,7 @@ impl pallet_staking::Config for Test { impl pallet_im_online::Config for Test { type AuthorityId = UintAuthorityId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorSet = Historical; type NextSessionRotation = pallet_session::PeriodicSessions; type ReportUnresponsiveness = Offences; @@ -200,23 +200,23 @@ impl pallet_im_online::Config for Test { } impl pallet_offences::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; } impl frame_system::offchain::SendTransactionTypes for Test where - Call: From, + RuntimeCall: From, { type Extrinsic = Extrinsic; - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; } impl crate::Config for Test {} pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Test where diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index ae454d6b06740..7858b02719c4c 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -57,7 +57,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + IsType<::Event>; + type RuntimeEvent: From + IsType<::RuntimeEvent>; /// Full identification of the validator. type IdentificationTuple: Parameter; /// A handler called for every offence report. diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index bf4daf0c89944..5b07aab5bd85e 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -95,13 +95,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -115,7 +115,7 @@ impl frame_system::Config for Runtime { } impl Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type IdentificationTuple = u64; type OnOffenceHandler = OnOffenceHandler; } diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 49bd2fb5a6923..266e05debf050 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ - new_test_ext, offence_reports, report_id, with_on_offence_fractions, Event, Offence, Offences, - System, KIND, + new_test_ext, offence_reports, report_id, with_on_offence_fractions, Offence, Offences, + RuntimeEvent, System, KIND, }; use frame_system::{EventRecord, Phase}; use sp_runtime::Perbill; @@ -114,7 +114,7 @@ fn should_deposit_event() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Offences(crate::Event::Offence { + event: RuntimeEvent::Offences(crate::Event::Offence { kind: KIND, timeslot: time_slot.encode() }), @@ -148,7 +148,7 @@ fn doesnt_deposit_event_for_dups() { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Offences(crate::Event::Offence { + event: RuntimeEvent::Offences(crate::Event::Offence { kind: KIND, timeslot: time_slot.encode() }), diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 09f6ecd52f9ad..e4616078b0761 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -76,7 +76,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The Weight information for this pallet. type WeightInfo: weights::WeightInfo; diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 1a1f05cb9c088..75e8767f3e59f 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = RocksDbWeight; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -66,7 +66,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -81,7 +81,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<5>; type AccountStore = System; @@ -102,7 +102,7 @@ ord_parameter_types! { impl Config for Test { type WeightInfo = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureSignedBy; type MaxSize = ConstU32<1024>; diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index c01a8da000c09..f7e440d1f7d6f 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -27,7 +27,7 @@ use sp_runtime::traits::Bounded; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -88,7 +88,7 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); }: _(RawOrigin::Signed(caller), real_lookup, Some(T::ProxyType::default()), Box::new(call)) verify { assert_last_event::(Event::ProxyExecuted { result: Ok(()) }.into()) @@ -105,7 +105,7 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(delegate.clone()).into(), real_lookup.clone(), @@ -126,7 +126,7 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real); - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup.clone(), @@ -149,7 +149,7 @@ benchmarks! { // ... and "real" is the traditional caller. This is not a typo. let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::announce( RawOrigin::Signed(caller.clone()).into(), real_lookup, @@ -172,7 +172,7 @@ benchmarks! { let real: T::AccountId = whitelisted_caller(); let real_lookup = T::Lookup::unlookup(real.clone()); add_announcements::(a, Some(caller.clone()), None)?; - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); let call_hash = T::CallHasher::hash_of(&call); }: _(RawOrigin::Signed(caller.clone()), real_lookup, call_hash) verify { diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 2d8dfc238bcd0..7467fb0988dd6 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -110,15 +110,15 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From> + IsSubType> - + IsType<::Call>; + + IsType<::RuntimeCall>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -131,7 +131,7 @@ pub mod pallet { + Member + Ord + PartialOrd - + InstanceFilter<::Call> + + InstanceFilter<::RuntimeCall> + Default + MaxEncodedLen; @@ -208,7 +208,7 @@ pub mod pallet { origin: OriginFor, real: AccountIdLookupOf, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResult { let who = ensure_signed(origin)?; let real = T::Lookup::lookup(real)?; @@ -539,7 +539,7 @@ pub mod pallet { delegate: AccountIdLookupOf, real: AccountIdLookupOf, force_proxy_type: Option, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResult { ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; @@ -817,12 +817,12 @@ impl Pallet { fn do_proxy( def: ProxyDefinition, real: T::AccountId, - call: ::Call, + call: ::RuntimeCall, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); - origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); + origin.add_filter(move |c: &::RuntimeCall| { + let c = ::RuntimeCall::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index b8d5a55705efa..0025b1510526e 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -65,12 +65,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -88,15 +88,15 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); } impl pallet_utility::Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -124,14 +124,14 @@ impl Default for ProxyType { Self::Any } } -impl InstanceFilter for ProxyType { - fn filter(&self, c: &Call) -> bool { +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { match self { ProxyType::Any => true, ProxyType::JustTransfer => { - matches!(c, Call::Balances(pallet_balances::Call::transfer { .. })) + matches!(c, RuntimeCall::Balances(pallet_balances::Call::transfer { .. })) }, - ProxyType::JustUtility => matches!(c, Call::Utility { .. }), + ProxyType::JustUtility => matches!(c, RuntimeCall::Utility { .. }), } } fn is_superset(&self, o: &Self) -> bool { @@ -139,19 +139,19 @@ impl InstanceFilter for ProxyType { } } pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(c: &Call) -> bool { +impl Contains for BaseFilter { + fn contains(c: &RuntimeCall) -> bool { match *c { // Remark is used as a no-op call in the benchmarking - Call::System(SystemCall::remark { .. }) => true, - Call::System(_) => false, + RuntimeCall::System(SystemCall::remark { .. }) => true, + RuntimeCall::System(_) => false, _ => true, } } } impl Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type ProxyType = ProxyType; type ProxyDepositBase = ConstU64<1>; @@ -183,7 +183,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn last_events(n: usize) -> Vec { +fn last_events(n: usize) -> Vec { system::Pallet::::events() .into_iter() .rev() @@ -193,12 +193,12 @@ fn last_events(n: usize) -> Vec { .collect() } -fn expect_events(e: Vec) { +fn expect_events(e: Vec) { assert_eq!(last_events(e.len()), e); } -fn call_transfer(dest: u64, value: u64) -> Call { - Call::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer { dest, value }) } #[test] @@ -346,8 +346,10 @@ fn filtering_works() { assert!(Balances::mutate_account(&derivative_id, |a| a.free = 1000).is_ok()); let inner = Box::new(call_transfer(6, 1)); - let call = - Box::new(Call::Utility(UtilityCall::as_derivative { index: 0, call: inner.clone() })); + let call = Box::new(RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: inner.clone(), + })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); @@ -359,7 +361,7 @@ fn filtering_works() { ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); + let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), @@ -376,9 +378,12 @@ fn filtering_works() { ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - let inner = - Box::new(Call::Proxy(ProxyCall::new_call_variant_add_proxy(5, ProxyType::Any, 0))); - let call = Box::new(Call::Utility(UtilityCall::batch { calls: vec![*inner] })); + let inner = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_add_proxy( + 5, + ProxyType::Any, + 0, + ))); + let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), @@ -395,7 +400,7 @@ fn filtering_works() { ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - let call = Box::new(Call::Proxy(ProxyCall::remove_proxies {})); + let call = Box::new(RuntimeCall::Proxy(ProxyCall::remove_proxies {})); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), @@ -517,15 +522,17 @@ fn proxying_works() { System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(Call::System(SystemCall::set_code { code: vec![] })); + let call = Box::new(RuntimeCall::System(SystemCall::set_code { code: vec![] })); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - let call = - Box::new(Call::Balances(BalancesCall::transfer_keep_alive { dest: 6, value: 1 })); - assert_ok!(Call::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer_keep_alive { + dest: 6, + value: 1, + })); + assert_ok!(RuntimeCall::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) .dispatch(Origin::signed(2))); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), @@ -572,7 +579,7 @@ fn anonymous_works() { System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(Call::Proxy(ProxyCall::new_call_variant_kill_anonymous( + let call = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_kill_anonymous( 1, ProxyType::Any, 0, diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 467cae9728fae..bd86cc0d4029d 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -200,13 +200,13 @@ mod tests { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index 611a4ce7334a9..a4d074450e836 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -27,7 +27,7 @@ use frame_system::RawOrigin as SystemOrigin; const SEED: u32 = 0; -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index b8eaac9823634..7f212b3859776 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -363,7 +363,8 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The outer event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The origin required to add or promote a mmember. The success value indicates the /// maximum rank *to which* the promotion may be. diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index b4173b30b0c2e..25413b490483c 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -61,13 +61,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -171,7 +171,7 @@ impl Polling> for TestPolls { impl Config for Test { type WeightInfo = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PromoteOrigin = EitherOf< // Root can promote arbitrarily. frame_system::EnsureRootWithSuccess>, diff --git a/frame/recovery/src/benchmarking.rs b/frame/recovery/src/benchmarking.rs index 56d5df22d49c5..870543d9bd290 100644 --- a/frame/recovery/src/benchmarking.rs +++ b/frame/recovery/src/benchmarking.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::Bounded; const SEED: u32 = 0; const DEFAULT_DELAY: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -107,7 +107,7 @@ benchmarks! { let caller: T::AccountId = whitelisted_caller(); let recovered_account: T::AccountId = account("recovered_account", 0, SEED); let recovered_account_lookup = T::Lookup::unlookup(recovered_account.clone()); - let call: ::Call = frame_system::Call::::remark { remark: vec![] }.into(); + let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![] }.into(); Proxy::::insert(&caller, &recovered_account); }: _( diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 45260b577f700..dd9e7208320c4 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -226,13 +226,13 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The overarching call type. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From>; @@ -384,7 +384,7 @@ pub mod pallet { pub fn as_recovered( origin: OriginFor, account: AccountIdLookupOf, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResult { let who = ensure_signed(origin)?; let account = T::Lookup::lookup(account)?; diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 5dc49fff09b32..6496ceb253b77 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -64,7 +64,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -87,7 +87,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); @@ -102,9 +102,9 @@ parameter_types! { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); - type Call = Call; + type RuntimeCall = RuntimeCall; type Currency = Balances; type ConfigDepositBase = ConfigDepositBase; type FriendDepositFactor = FriendDepositFactor; diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index a900a5b6bfa2a..a3f19e5a61bc8 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -20,8 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, bounded_vec, traits::Currency}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, Call, MaxFriends, Origin, Recovery, - RecoveryCall, Test, + new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Origin, Recovery, RecoveryCall, + RuntimeCall, Test, }; use sp_runtime::traits::BadOrigin; @@ -45,7 +45,7 @@ fn set_recovered_works() { // Root can set a recovered account though assert_ok!(Recovery::set_recovered(Origin::root(), 5, 1)); // Account 1 should now be able to make a call through account 5 - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 100 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 100 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 has successfully drained the funds from account 5 assert_eq!(Balances::free_balance(1), 200); @@ -77,15 +77,15 @@ fn recovery_life_cycle_works() { assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); // Account 1 can use account 5 to close the active recovery process, claiming the deposited // funds used to initiate the recovery process into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); + let call = Box::new(RuntimeCall::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 can then use account 5 to remove the recovery configuration, claiming the // deposited funds used to create the recovery configuration into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery {})); + let call = Box::new(RuntimeCall::Recovery(RecoveryCall::remove_recovery {})); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // Account 1 should now be able to make a call through account 5 to get all of their funds assert_eq!(Balances::free_balance(5), 110); - let call = Box::new(Call::Balances(BalancesCall::transfer { dest: 1, value: 110 })); + let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 110 })); assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 4a64a9aa03b77..2116d56f4647b 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -32,7 +32,7 @@ use sp_runtime::traits::{Bounded, Hash}; const SEED: u32 = 0; #[allow(dead_code)] -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index e9e14e1d4a96a..92e5ecf5e539b 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -123,8 +123,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { // System level stuff. - type Call: Parameter + Dispatchable + From>; - type Event: From> + IsType<::Event>; + type RuntimeCall: Parameter + Dispatchable + From>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 698bea8cc9f67..e046b29898e7b 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -55,9 +55,9 @@ frame_support::construct_runtime!( // Test that a fitlered call can be dispatched. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &Call) -> bool { - !matches!(call, &Call::Balances(pallet_balances::Call::set_balance { .. })) +impl Contains for BaseFilter { + fn contains(call: &RuntimeCall) -> bool { + !matches!(call, &RuntimeCall::Balances(pallet_balances::Call::set_balance { .. })) } } @@ -74,13 +74,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -93,7 +93,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } impl pallet_preimage::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; @@ -102,10 +102,10 @@ impl pallet_preimage::Config for Test { type ByteDeposit = (); } impl pallet_scheduler::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type PalletsOrigin = OriginCaller; - type Call = Call; + type RuntimeCall = RuntimeCall; type MaximumWeight = MaxWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = ConstU32<100>; @@ -119,7 +119,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type MaxLocks = ConstU32<10>; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -213,8 +213,8 @@ impl TracksInfo for TestTracksInfo { impl Config for Test { type WeightInfo = (); - type Call = Call; - type Event = Event; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; type Scheduler = Scheduler; type Currency = pallet_balances::Pallet; type SubmitOrigin = frame_system::EnsureSigned; @@ -297,12 +297,16 @@ impl VoteTally for Tally { } pub fn set_balance_proposal(value: u64) -> Vec { - Call::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }) - .encode() + RuntimeCall::Balances(pallet_balances::Call::set_balance { + who: 42, + new_free: value, + new_reserved: 0, + }) + .encode() } pub fn set_balance_proposal_hash(value: u64) -> H256 { - let c = Call::Balances(pallet_balances::Call::set_balance { + let c = RuntimeCall::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0, diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index d5435daf185bd..69cd62f79159a 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -500,7 +500,7 @@ fn kill_errors_works() { #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = crate::mock::Call::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + let call = crate::mock::RuntimeCall::decode(&mut &set_balance_proposal(i)[..]).unwrap(); assert!(!::BaseCallFilter::contains(&call)); } } diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index 3eba783246e10..984bd0642d959 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -30,7 +30,7 @@ pub type BalanceOf = pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; -pub type CallOf = >::Call; +pub type CallOf = >::RuntimeCall; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; pub type PalletsOriginOf = <::Origin as OriginTrait>::PalletsOrigin; diff --git a/frame/remark/src/benchmarking.rs b/frame/remark/src/benchmarking.rs index d30a8aa5df07d..c0db8d5d3d59b 100644 --- a/frame/remark/src/benchmarking.rs +++ b/frame/remark/src/benchmarking.rs @@ -27,9 +27,9 @@ use sp_std::*; #[cfg(test)] use crate::Pallet as Remark; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::RuntimeEvent = generic_event.into(); let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } diff --git a/frame/remark/src/lib.rs b/frame/remark/src/lib.rs index c69f95907019f..b61c79f7f273d 100644 --- a/frame/remark/src/lib.rs +++ b/frame/remark/src/lib.rs @@ -43,7 +43,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } diff --git a/frame/remark/src/mock.rs b/frame/remark/src/mock.rs index 67a0399e9c386..6ce7115345e57 100644 --- a/frame/remark/src/mock.rs +++ b/frame/remark/src/mock.rs @@ -46,7 +46,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -54,7 +54,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -69,7 +69,7 @@ impl frame_system::Config for Test { } impl pallet_remark::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } diff --git a/frame/remark/src/tests.rs b/frame/remark/src/tests.rs index 580b64cc64997..2278e3817b48a 100644 --- a/frame/remark/src/tests.rs +++ b/frame/remark/src/tests.rs @@ -31,7 +31,7 @@ fn generates_event() { assert_ok!(Remark::::store(RawOrigin::Signed(caller).into(), data.clone(),)); let events = System::events(); // this one we create as we expect it - let system_event: ::Event = Event::Stored { + let system_event: ::RuntimeEvent = Event::Stored { content_hash: sp_io::hashing::blake2_256(&data).into(), sender: caller, } diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index a8e36b72814b7..fbd59555b5ebc 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -77,9 +77,9 @@ fn fill_schedule( Ok(()) } -fn call_and_hash(i: u32) -> (::Call, T::Hash) { +fn call_and_hash(i: u32) -> (::RuntimeCall, T::Hash) { // Essentially a no-op call. - let call: ::Call = frame_system::Call::remark { remark: i.encode() }.into(); + let call: ::RuntimeCall = frame_system::Call::remark { remark: i.encode() }.into(); let hash = T::Hashing::hash_of(&call); (call, hash) } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 7746b7d0123f4..39dbac17cedd2 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -82,7 +82,8 @@ pub type PeriodicIndex = u32; /// The location of a scheduled task that can be used to remove it. pub type TaskAddress = (BlockNumber, u32); -pub type CallOrHashOf = MaybeHashed<::Call, ::Hash>; +pub type CallOrHashOf = + MaybeHashed<::RuntimeCall, ::Hash>; #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] #[derive(Clone, RuntimeDebug, Encode, Decode)] @@ -113,7 +114,7 @@ pub struct ScheduledV3 { use crate::ScheduledV3 as ScheduledV2; pub type ScheduledV2Of = ScheduledV3< - ::Call, + ::RuntimeCall, ::BlockNumber, ::PalletsOrigin, ::AccountId, @@ -198,7 +199,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The aggregated origin which the dispatch will take. type Origin: OriginTrait @@ -209,7 +210,7 @@ pub mod pallet { type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; /// The aggregated call type. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable::Origin, PostInfo = PostDispatchInfo> + GetDispatchInfo + From>; @@ -543,27 +544,28 @@ impl Pallet { pub fn migrate_v1_to_v3() -> Weight { let mut weight = T::DbWeight::get().reads_writes(1, 1); - Agenda::::translate::::Call, T::BlockNumber>>>, _>( - |_, agenda| { - Some( - agenda - .into_iter() - .map(|schedule| { - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - - schedule.map(|schedule| ScheduledV3 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call.into(), - maybe_periodic: schedule.maybe_periodic, - origin: system::RawOrigin::Root.into(), - _phantom: Default::default(), - }) + Agenda::::translate::< + Vec::RuntimeCall, T::BlockNumber>>>, + _, + >(|_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + schedule.map(|schedule| ScheduledV3 { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call.into(), + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), }) - .collect::>(), - ) - }, - ); + }) + .collect::>(), + ) + }); #[allow(deprecated)] frame_support::storage::migration::remove_storage_prefix( @@ -859,7 +861,7 @@ impl Pallet { } } -impl schedule::v2::Anon::Call, T::PalletsOrigin> +impl schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; @@ -891,7 +893,7 @@ impl schedule::v2::Anon::Call, T::Palle } } -impl schedule::v2::Named::Call, T::PalletsOrigin> +impl schedule::v2::Named::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 1639c9a046142..6460d0f4c1cd1 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -58,7 +58,7 @@ pub mod logger { #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; } #[pallet::event] @@ -110,9 +110,9 @@ frame_support::construct_runtime!( // Scheduler must dispatch with root and no filter, this tests base filter is indeed not used. pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(call: &Call) -> bool { - !matches!(call, Call::Logger(LoggerCall::log { .. })) +impl Contains for BaseFilter { + fn contains(call: &RuntimeCall) -> bool { + !matches!(call, RuntimeCall::Logger(LoggerCall::log { .. })) } } @@ -126,7 +126,7 @@ impl system::Config for Test { type BlockLength = (); type DbWeight = RocksDbWeight; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -134,7 +134,7 @@ impl system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -147,7 +147,7 @@ impl system::Config for Test { type MaxConsumers = ConstU32<16>; } impl logger::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; @@ -158,7 +158,7 @@ ord_parameter_types! { } impl pallet_preimage::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = (); type ManagerOrigin = EnsureRoot; @@ -168,10 +168,10 @@ impl pallet_preimage::Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type PalletsOrigin = OriginCaller; - type Call = Call; + type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EitherOfDiverse, EnsureSignedBy>; type MaxScheduledPerBlock = ConstU32<10>; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index 999a810d3b7c1..f1168e6e9d26f 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -18,7 +18,9 @@ //! # Scheduler tests. use super::*; -use crate::mock::{logger, new_test_ext, root, run_to_block, Call, LoggerCall, Scheduler, Test, *}; +use crate::mock::{ + logger, new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Test, *, +}; use frame_support::{ assert_err, assert_noop, assert_ok, traits::{Contains, GetStorageVersion, OnInitialize, PreimageProvider}, @@ -30,7 +32,8 @@ use substrate_test_utils::assert_eq_uvec; #[test] fn basic_scheduling_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into())); run_to_block(3); @@ -45,7 +48,8 @@ fn basic_scheduling_works() { #[test] fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let hashed = MaybeHashed::Hash(hash); assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); @@ -65,7 +69,8 @@ fn scheduling_with_preimages_works() { #[test] fn scheduling_with_preimage_postpones_correctly() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let hashed = MaybeHashed::Hash(hash); @@ -98,7 +103,8 @@ fn scheduling_with_preimage_postpones_correctly() { fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call.into())); @@ -115,7 +121,8 @@ fn schedule_after_works() { fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call.into())); // Will trigger on the next block. @@ -135,7 +142,8 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - Call::Logger(logger::Call::log { i: 42, weight: Weight::from_ref_time(1000) }).into() + RuntimeCall::Logger(logger::Call::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -157,7 +165,8 @@ fn periodic_scheduling_works() { #[test] fn reschedule_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into()).unwrap(), @@ -188,7 +197,8 @@ fn reschedule_works() { #[test] fn reschedule_named_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -230,7 +240,8 @@ fn reschedule_named_works() { #[test] fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { - let call = Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( @@ -292,7 +303,8 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -300,7 +312,8 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ) .unwrap(); run_to_block(3); @@ -322,7 +335,8 @@ fn cancel_named_periodic_scheduling_works() { Some((3, 3)), 127, root(), - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ) .unwrap(); // same id results in error. @@ -332,7 +346,8 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ) .is_err()); // different id is ok. @@ -342,7 +357,8 @@ fn cancel_named_periodic_scheduling_works() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ) .unwrap(); run_to_block(3); @@ -364,16 +380,22 @@ fn scheduler_respects_weight_limits() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); // 69 and 42 do not fit together run_to_block(4); @@ -391,16 +413,22 @@ fn scheduler_respects_hard_deadlines_more() { None, 0, root(), - Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); // With base weights, 69 and 42 should not fit together, but do because of hard // deadlines @@ -417,16 +445,22 @@ fn scheduler_respects_priority_ordering() { None, 1, root(), - Call::Logger(LoggerCall::log { i: 42, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - Call::Logger(LoggerCall::log { i: 69, weight: MaximumSchedulerWeight::get() / 2 }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: MaximumSchedulerWeight::get() / 2 + }) + .into(), )); run_to_block(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); @@ -444,21 +478,23 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { None, 255, root(), - Call::Logger(LoggerCall::log { i: 42, weight: max_weight / 2 - item_weight }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 2 - item_weight }) + .into(), )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: max_weight / 2 - item_weight }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 2 - item_weight }) + .into(), )); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 126, root(), - Call::Logger(LoggerCall::log { + RuntimeCall::Logger(LoggerCall::log { i: 2600, weight: max_weight / 2 - item_weight + Weight::from_ref_time(1) }) @@ -487,8 +523,11 @@ fn on_initialize_weight_is_correct() { None, 255, root(), - Call::Logger(LoggerCall::log { i: 3, weight: call_weight + Weight::from_ref_time(1) }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 3, + weight: call_weight + Weight::from_ref_time(1) + }) + .into(), )); // Anon Periodic assert_ok!(Scheduler::do_schedule( @@ -496,8 +535,11 @@ fn on_initialize_weight_is_correct() { Some((1000, 3)), 128, root(), - Call::Logger(LoggerCall::log { i: 42, weight: call_weight + Weight::from_ref_time(2) }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: call_weight + Weight::from_ref_time(2) + }) + .into(), )); // Anon assert_ok!(Scheduler::do_schedule( @@ -505,8 +547,11 @@ fn on_initialize_weight_is_correct() { None, 127, root(), - Call::Logger(LoggerCall::log { i: 69, weight: call_weight + Weight::from_ref_time(3) }) - .into(), + RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: call_weight + Weight::from_ref_time(3) + }) + .into(), )); // Named Periodic assert_ok!(Scheduler::do_schedule_named( @@ -515,7 +560,7 @@ fn on_initialize_weight_is_correct() { Some((1000, 3)), 126, root(), - Call::Logger(LoggerCall::log { + RuntimeCall::Logger(LoggerCall::log { i: 2600, weight: call_weight + Weight::from_ref_time(4) }) @@ -567,10 +612,12 @@ fn on_initialize_weight_is_correct() { fn root_calls_works() { new_test_ext().execute_with(|| { let call = Box::new( - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ); let call2 = Box::new( - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ); assert_ok!(Scheduler::schedule_named(Origin::root(), 1u32.encode(), 4, None, 127, call,)); assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); @@ -592,15 +639,17 @@ fn fails_to_schedule_task_in_the_past() { run_to_block(3); let call1 = Box::new( - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ); let call2 = Box::new( - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ); let call3 = Box::new( - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ); - assert_err!( Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call1), Error::::TargetBlockNumberInPast, @@ -622,10 +671,12 @@ fn fails_to_schedule_task_in_the_past() { fn should_use_orign() { new_test_ext().execute_with(|| { let call = Box::new( - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ); let call2 = Box::new( - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), @@ -652,10 +703,12 @@ fn should_use_orign() { fn should_check_orign() { new_test_ext().execute_with(|| { let call = Box::new( - Call::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) + .into(), ); let call2 = Box::new( - Call::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }).into(), + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) + .into(), ); assert_noop!( Scheduler::schedule_named( @@ -679,14 +732,14 @@ fn should_check_orign() { fn should_check_orign_for_cancel() { new_test_ext().execute_with(|| { let call = Box::new( - Call::Logger(LoggerCall::log_without_filter { + RuntimeCall::Logger(LoggerCall::log_without_filter { i: 69, weight: Weight::from_ref_time(1000), }) .into(), ); let call2 = Box::new( - Call::Logger(LoggerCall::log_without_filter { + RuntimeCall::Logger(LoggerCall::log_without_filter { i: 42, weight: Weight::from_ref_time(1000), }) @@ -735,7 +788,7 @@ fn migration_to_v3_works() { Some(ScheduledV1 { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100), }), @@ -745,7 +798,7 @@ fn migration_to_v3_works() { Some(ScheduledV1 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000), }), @@ -766,7 +819,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 10, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -779,7 +832,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) @@ -796,7 +849,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 11, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -809,7 +862,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) @@ -826,7 +879,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: None, priority: 12, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -839,7 +892,7 @@ fn migration_to_v3_works() { Some(ScheduledV3Of:: { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) @@ -866,7 +919,7 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100), }) @@ -880,7 +933,7 @@ fn test_migrate_origin() { maybe_id: Some(b"test".to_vec()), priority: 123, origin: 2u32, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000), }) @@ -913,7 +966,7 @@ fn test_migrate_origin() { Some(ScheduledV2::, u64, OriginCaller, u64> { maybe_id: None, priority: 10, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -926,7 +979,7 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) @@ -943,7 +996,7 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: None, priority: 11, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -956,7 +1009,7 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) @@ -973,7 +1026,7 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: None, priority: 12, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) }) @@ -986,7 +1039,7 @@ fn test_migrate_origin() { Some(ScheduledV2 { maybe_id: Some(b"test".to_vec()), priority: 123, - call: Call::Logger(LoggerCall::log { + call: RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index 8d4c4715096b7..c9ba30df80b8b 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -159,7 +159,8 @@ pub mod pallet { + MaxEncodedLen; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; // The deposit which is reserved from candidates if they want to // start a candidacy. The deposit gets returned when the candidacy is diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 74f084673e4c2..20fa47baf639f 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -65,12 +65,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -88,7 +88,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -125,7 +125,7 @@ impl InitializeMembers for TestChangeMembers { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type KickOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 2181493f72947..01be3605601f4 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -55,13 +55,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; @@ -79,7 +79,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -125,7 +125,7 @@ impl pallet_session::Config for Test { type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; type SessionHandler = TestSessionHandler; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; type WeightInfo = (); @@ -159,7 +159,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = (); type Reward = (); type SessionsPerEra = (); diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 34c560984661d..7b97a20860175 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -379,7 +379,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + IsType<::Event>; + type RuntimeEvent: From + IsType<::RuntimeEvent>; /// A stable ID for a validator. type ValidatorId: Member diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index c1858d0bff774..f6d9e65c1e0b0 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -251,13 +251,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -299,7 +299,7 @@ impl Config for Test { type ValidatorId = u64; type ValidatorIdOf = TestValidatorIdOf; type Keys = MockSessionKeys; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type NextSessionRotation = (); type WeightInfo = (); } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 9717486644761..122dedabbcf0d 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -378,7 +378,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// The societies's pallet id #[pallet::constant] diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index ed668e79269fd..1d9eebdda65ce 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -67,12 +67,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u128; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -90,7 +90,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -98,7 +98,7 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = pallet_balances::Pallet; type Randomness = TestRandomness; type CandidateDeposit = ConstU64<25>; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index cdad11589f743..b8f6254d9a8e8 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -134,13 +134,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -157,7 +157,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; @@ -174,7 +174,7 @@ impl pallet_session::Config for Test { type Keys = SessionKeys; type ShouldEndSession = pallet_session::PeriodicSessions; type SessionHandler = (OtherSessionHandler,); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; type NextSessionRotation = pallet_session::PeriodicSessions; @@ -241,7 +241,7 @@ parameter_types! { } impl pallet_bags_list::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type ScoreProvider = Staking; type BagThresholds = BagThresholds; @@ -281,7 +281,7 @@ impl crate::pallet::pallet::Config for Test { type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type RewardRemainder = RewardRemainderMock; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Slash = (); type Reward = MockReward; type SessionsPerEra = SessionsPerEra; @@ -304,7 +304,7 @@ impl crate::pallet::pallet::Config for Test { } pub(crate) type StakingCall = crate::Call; -pub(crate) type TestRuntimeCall = ::Call; +pub(crate) type TestCall = ::RuntimeCall; pub struct ExtBuilder { nominate: bool, @@ -871,7 +871,7 @@ pub(crate) fn staking_events() -> Vec> { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::Staking(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::Staking(inner) = e { Some(inner) } else { None }) .collect() } @@ -882,7 +882,7 @@ parameter_types! { pub(crate) fn staking_events_since_last_call() -> Vec> { let all: Vec<_> = System::events() .into_iter() - .filter_map(|r| if let Event::Staking(inner) = r.event { Some(inner) } else { None }) + .filter_map(|r| if let RuntimeEvent::Staking(inner) = r.event { Some(inner) } else { None }) .collect(); let seen = StakingEventsIndex::get(); StakingEventsIndex::set(all.len()); diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index f651cc06b1cb0..1b38f236fc6f3 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -129,7 +129,7 @@ pub mod pallet { type RewardRemainder: OnUnbalanced>; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for the unbalanced reduction when slashing a staker. type Slash: OnUnbalanced>; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 969ff8e4e2881..9807cffd43909 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -3779,8 +3779,7 @@ fn payout_stakers_handles_weight_refund() { start_active_era(2); // Collect payouts when there are no nominators - let call = - TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); + let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert_ok!(result); @@ -3793,8 +3792,7 @@ fn payout_stakers_handles_weight_refund() { start_active_era(3); // Collect payouts for an era where the validator did not receive any points. - let call = - TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); + let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert_ok!(result); @@ -3807,8 +3805,7 @@ fn payout_stakers_handles_weight_refund() { start_active_era(4); // Collect payouts when the validator has `half_max_nom_rewarded` nominators. - let call = - TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); + let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert_ok!(result); @@ -3831,16 +3828,14 @@ fn payout_stakers_handles_weight_refund() { start_active_era(6); // Collect payouts when the validator had `half_max_nom_rewarded` nominators. - let call = - TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); // Try and collect payouts for an era that has already been collected. - let call = - TestRuntimeCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); + let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert!(result.is_err()); diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index e29115382819a..8c73f99be3aea 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -452,7 +452,7 @@ pub mod pallet { type SignedFilter: EnsureOrigin; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency provider type. type Currency: Currency; @@ -964,7 +964,7 @@ mod benchmarks { ); frame_system::Pallet::::assert_last_event( - ::Event::from(crate::Event::Slashed { + ::RuntimeEvent::from(crate::Event::Slashed { who: caller.clone(), amount: T::SignedDepositBase::get() .saturating_add(T::SignedDepositPerItem::get().saturating_mul(1u32.into())), @@ -1085,7 +1085,7 @@ mod mock { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u32; type Hash = H256; @@ -1093,7 +1093,7 @@ mod mock { type AccountId = u64; type Lookup = IdentityLookup; type Header = sp_runtime::generic::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type DbWeight = (); type Version = (); @@ -1115,7 +1115,7 @@ mod mock { impl pallet_balances::Config for Test { type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -1153,7 +1153,7 @@ mod mock { } impl pallet_state_trie_migration::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ControlOrigin = EnsureRoot; type Currency = Balances; type MaxKeyLen = MigrationMaxKeyLen; diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index bde69f11106dc..96cd2d1615449 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -115,10 +115,12 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A sudo-able call. - type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; + type RuntimeCall: Parameter + + UnfilteredDispatchable + + GetDispatchInfo; } #[pallet::pallet] @@ -143,7 +145,7 @@ pub mod pallet { })] pub fn sudo( origin: OriginFor, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -168,7 +170,7 @@ pub mod pallet { #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn sudo_unchecked_weight( origin: OriginFor, - call: Box<::Call>, + call: Box<::RuntimeCall>, _weight: Weight, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. @@ -230,7 +232,7 @@ pub mod pallet { pub fn sudo_as( origin: OriginFor, who: AccountIdLookupOf, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index c895eaf830136..67c221b234de1 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -40,7 +40,7 @@ pub mod logger { #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -114,8 +114,8 @@ parameter_types! { } pub struct BlockEverything; -impl Contains for BlockEverything { - fn contains(_: &Call) -> bool { +impl Contains for BlockEverything { + fn contains(_: &RuntimeCall) -> bool { false } } @@ -126,7 +126,7 @@ impl frame_system::Config for Test { type BlockLength = (); type DbWeight = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -134,7 +134,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -149,13 +149,13 @@ impl frame_system::Config for Test { // Implement the logger module's `Config` on the Test runtime. impl logger::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } // Implement the sudo module's `Config` on the Test runtime. impl Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; } // New types for dispatchable functions. diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 0508772cc88ec..fbf4063c992e4 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -20,8 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, weights::Weight}; use mock::{ - new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, - Test, + new_test_ext, Logger, LoggerCall, Origin, RuntimeCall, RuntimeEvent as TestEvent, Sudo, + SudoCall, System, Test, }; #[test] @@ -39,7 +39,7 @@ fn sudo_basics() { // Configure a default test environment and set the root `key` to 1. new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1_000), })); @@ -47,7 +47,7 @@ fn sudo_basics() { assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1_000), })); @@ -62,7 +62,7 @@ fn sudo_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1), })); @@ -75,7 +75,7 @@ fn sudo_emits_events_correctly() { fn sudo_unchecked_weight_basics() { new_test_ext(1).execute_with(|| { // A privileged function should work when `sudo` is passed the root `key` as origin. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1_000), })); @@ -87,7 +87,7 @@ fn sudo_unchecked_weight_basics() { assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1_000), })); @@ -99,7 +99,7 @@ fn sudo_unchecked_weight_basics() { assert_eq!(Logger::i32_log(), vec![42i32]); // Controls the dispatched weight. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1), })); @@ -117,7 +117,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { System::set_block_number(1); // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1), })); @@ -164,7 +164,7 @@ fn set_key_emits_events_correctly() { fn sudo_as_basics() { new_test_ext(1).execute_with(|| { // A privileged function will not work when passed to `sudo_as`. - let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, weight: Weight::from_ref_time(1_000), })); @@ -173,14 +173,14 @@ fn sudo_as_basics() { assert!(Logger::account_log().is_empty()); // A non-privileged function should not work when called with a non-root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::from_ref_time(1), })); assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::from_ref_time(1), })); @@ -198,7 +198,7 @@ fn sudo_as_emits_events_correctly() { System::set_block_number(1); // A non-privileged function will work when passed to `sudo_as` with the root `key`. - let call = Box::new(Call::Logger(LoggerCall::non_privileged_log { + let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::from_ref_time(1), })); diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 028e68f0198d1..b415132ab0426 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -55,7 +55,7 @@ pub fn expand_outer_dispatch( #[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ), }); - variant_patterns.push(quote!(Call::#name(call))); + variant_patterns.push(quote!(RuntimeCall::#name(call))); pallet_names.push(name); pallet_attrs.push(attr); query_call_part_macros.push(quote! { @@ -73,11 +73,11 @@ pub fn expand_outer_dispatch( #scrate::scale_info::TypeInfo, #scrate::RuntimeDebug, )] - pub enum Call { + pub enum RuntimeCall { #variant_defs } #[cfg(test)] - impl Call { + impl RuntimeCall { /// Return a list of the module names together with their size in memory. pub const fn sizes() -> &'static [( &'static str, usize )] { use #scrate::dispatch::Callable; @@ -86,7 +86,7 @@ pub fn expand_outer_dispatch( #pallet_attrs ( stringify!(#pallet_names), - size_of::< <#pallet_names as Callable<#runtime>>::Call >(), + size_of::< <#pallet_names as Callable<#runtime>>::RuntimeCall >(), ), )*] } @@ -113,7 +113,7 @@ pub fn expand_outer_dispatch( } } } - impl #scrate::dispatch::GetDispatchInfo for Call { + impl #scrate::dispatch::GetDispatchInfo for RuntimeCall { fn get_dispatch_info(&self) -> #scrate::dispatch::DispatchInfo { match self { #( @@ -123,7 +123,7 @@ pub fn expand_outer_dispatch( } } } - impl #scrate::dispatch::GetCallMetadata for Call { + impl #scrate::dispatch::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { use #scrate::dispatch::GetCallName; match self { @@ -151,16 +151,16 @@ pub fn expand_outer_dispatch( #( #pallet_attrs stringify!(#pallet_names) => - <<#pallet_names as Callable<#runtime>>::Call + <<#pallet_names as Callable<#runtime>>::RuntimeCall as GetCallName>::get_call_names(), )* _ => unreachable!(), } } } - impl #scrate::dispatch::Dispatchable for Call { + impl #scrate::dispatch::Dispatchable for RuntimeCall { type Origin = Origin; - type Config = Call; + type Config = RuntimeCall; type Info = #scrate::weights::DispatchInfo; type PostInfo = #scrate::weights::PostDispatchInfo; fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { @@ -173,7 +173,7 @@ pub fn expand_outer_dispatch( #scrate::traits::UnfilteredDispatchable::dispatch_bypass_filter(self, origin) } } - impl #scrate::traits::UnfilteredDispatchable for Call { + impl #scrate::traits::UnfilteredDispatchable for RuntimeCall { type Origin = Origin; fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { match self { @@ -188,7 +188,7 @@ pub fn expand_outer_dispatch( #( #pallet_attrs - impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + impl #scrate::traits::IsSubType<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for RuntimeCall { #[allow(unreachable_patterns)] fn is_sub_type(&self) -> Option<&#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> { match self { @@ -200,7 +200,7 @@ pub fn expand_outer_dispatch( } #pallet_attrs - impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for Call { + impl From<#scrate::dispatch::CallableCallFor<#pallet_names, #runtime>> for RuntimeCall { fn from(call: #scrate::dispatch::CallableCallFor<#pallet_names, #runtime>) -> Self { #variant_patterns } diff --git a/frame/support/procedural/src/construct_runtime/expand/event.rs b/frame/support/procedural/src/construct_runtime/expand/event.rs index f145327d37af5..b11fcef1bfd53 100644 --- a/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -80,7 +80,7 @@ pub fn expand_outer_event( #scrate::RuntimeDebug, )] #[allow(non_camel_case_types)] - pub enum Event { + pub enum RuntimeEvent { #event_variants } @@ -148,13 +148,13 @@ fn expand_event_conversion( quote! { #attr - impl From<#pallet_event> for Event { + impl From<#pallet_event> for RuntimeEvent { fn from(x: #pallet_event) -> Self { - Event::#variant_name(x) + RuntimeEvent::#variant_name(x) } } #attr - impl TryInto<#pallet_event> for Event { + impl TryInto<#pallet_event> for RuntimeEvent { type Error = (); fn try_into(self) -> #scrate::sp_std::result::Result<#pallet_event, Self::Error> { diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index a4bb0d9cbaa02..d234ca6415115 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -105,7 +105,7 @@ pub fn expand_outer_origin( #[derive(Clone)] pub struct Origin { caller: OriginCaller, - filter: #scrate::sp_std::rc::Rc::Call) -> bool>>, + filter: #scrate::sp_std::rc::Rc::RuntimeCall) -> bool>>, } #[cfg(not(feature = "std"))] @@ -132,7 +132,7 @@ pub fn expand_outer_origin( } impl #scrate::traits::OriginTrait for Origin { - type Call = <#runtime as #system_path::Config>::Call; + type Call = <#runtime as #system_path::Config>::RuntimeCall; type PalletsOrigin = OriginCaller; type AccountId = <#runtime as #system_path::Config>::AccountId; @@ -147,7 +147,7 @@ pub fn expand_outer_origin( fn reset_filter(&mut self) { let filter = < <#runtime as #system_path::Config>::BaseCallFilter - as #scrate::traits::Contains<<#runtime as #system_path::Config>::Call> + as #scrate::traits::Contains<<#runtime as #system_path::Config>::RuntimeCall> >::contains; self.filter = #scrate::sp_std::rc::Rc::new(Box::new(filter)); diff --git a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs index 310516793472f..1d528779423a7 100644 --- a/frame/support/procedural/src/construct_runtime/expand/unsigned.rs +++ b/frame/support/procedural/src/construct_runtime/expand/unsigned.rs @@ -55,14 +55,14 @@ pub fn expand_outer_validate_unsigned( #( #query_validate_unsigned_part_macros )* impl #scrate::unsigned::ValidateUnsigned for #runtime { - type Call = Call; + type Call = RuntimeCall; fn pre_dispatch(call: &Self::Call) -> Result<(), #scrate::unsigned::TransactionValidityError> { #[allow(unreachable_patterns)] match call { #( #pallet_attrs - Call::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), + RuntimeCall::#pallet_names(inner_call) => #pallet_names::pre_dispatch(inner_call), )* // pre-dispatch should not stop inherent extrinsics, validation should prevent // including arbitrary (non-inherent) extrinsics to blocks. @@ -79,7 +79,7 @@ pub fn expand_outer_validate_unsigned( match call { #( #pallet_attrs - Call::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), + RuntimeCall::#pallet_names(inner_call) => #pallet_names::validate_unsigned(source, inner_call), )* _ => #scrate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 00204b7a4d906..0f72b28748cee 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -311,7 +311,7 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// System: frame_system::{Pallet, Call, Event, Config} = 0, /// Test: path::to::test::{Pallet, Call} = 1, /// -/// // Pallets with instances +/// // Pallets with instances. /// Test2_Instance1: test2::::{Pallet, Call, Storage, Event, Config, Origin}, /// Test2_DefaultInstance: test2::{Pallet, Call, Storage, Event, Config, Origin} = 4, /// diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index a9468451ad1d4..8b2922b5fa4f2 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -300,7 +300,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #frame_support::dispatch::Callable for #pallet_ident<#type_use_gen> #where_clause { - type Call = #call_ident<#type_use_gen>; + type RuntimeCall = #call_ident<#type_use_gen>; } impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index 791f930302207..abed680eb245e 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -61,7 +61,8 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { let event_where_clause = &event.where_clause; // NOTE: actually event where clause must be a subset of config where clause because of - // `type Event: From>`. But we merge either way for potential better error message + // `type RuntimeEvent: From>`. But we merge either way for potential better error + // message let completed_where_clause = super::merge_where_clauses(&[&event.where_clause, &def.config.where_clause]); @@ -136,13 +137,13 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> Pallet<#type_use_gen> #completed_where_clause { #fn_vis fn deposit_event(event: Event<#event_use_gen>) { let event = < - ::Event as + ::RuntimeEvent as From> >::from(event); let event = < - ::Event as - Into<::Event> + ::RuntimeEvent as + Into<::RuntimeEvent> >::into(event); <#frame_system::Pallet>::deposit_event(event) diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 60888fc5dd357..0f3aa69b170ce 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -28,6 +28,7 @@ mod keyword { syn::custom_keyword!(I); syn::custom_keyword!(config); syn::custom_keyword!(IsType); + syn::custom_keyword!(RuntimeEvent); syn::custom_keyword!(Event); syn::custom_keyword!(constant); syn::custom_keyword!(frame_system); @@ -42,8 +43,9 @@ pub struct ConfigDef { pub has_instance: bool, /// Const associated type. pub consts_metadata: Vec, - /// Whether the trait has the associated type `Event`, note that those bounds are checked: - /// * `IsType::Event` + /// Whether the trait has the associated type `Event`, note that those bounds are + /// checked: + /// * `IsType::RuntimeEvent` /// * `From` or `From>` or `From>` pub has_event_type: bool, /// The where clause on trait definition but modified so `Self` is `T`. @@ -159,7 +161,7 @@ impl syn::parse::Parse for ConfigBoundParse { } } -/// Parse for `IsType<::Event>` and retrieve `$ident` +/// Parse for `IsType<::RuntimeEvent>` and retrieve `$ident` pub struct IsTypeBoundEventParse(syn::Ident); impl syn::parse::Parse for IsTypeBoundEventParse { @@ -174,7 +176,7 @@ impl syn::parse::Parse for IsTypeBoundEventParse { input.parse::()?; input.parse::]>()?; input.parse::()?; - input.parse::()?; + input.parse::()?; input.parse::]>()?; Ok(Self(ident)) @@ -212,7 +214,7 @@ impl syn::parse::Parse for FromEventParse { } } -/// Check if trait_item is `type Event`, if so checks its bounds are those expected. +/// Check if trait_item is `type RuntimeEvent`, if so checks its bounds are those expected. /// (Event type is reserved type) fn check_event_type( frame_system: &syn::Ident, @@ -220,10 +222,10 @@ fn check_event_type( trait_has_instance: bool, ) -> syn::Result { if let syn::TraitItem::Type(type_) = trait_item { - if type_.ident == "Event" { + if type_.ident == "RuntimeEvent" { // Check event has no generics if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { - let msg = "Invalid `type Event`, associated type `Event` is reserved and must have\ + let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must have\ no generics nor where_clause"; return Err(syn::Error::new(trait_item.span(), msg)) } @@ -236,8 +238,8 @@ fn check_event_type( if !has_is_type_bound { let msg = format!( - "Invalid `type Event`, associated type `Event` is reserved and must \ - bound: `IsType<::Event>`", + "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ + bound: `IsType<::RuntimeEvent>`", frame_system, ); return Err(syn::Error::new(type_.span(), msg)) @@ -251,14 +253,14 @@ fn check_event_type( let from_event_bound = if let Some(b) = from_event_bound { b } else { - let msg = "Invalid `type Event`, associated type `Event` is reserved and must \ + let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must \ bound: `From` or `From>` or `From>`"; return Err(syn::Error::new(type_.span(), msg)) }; if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) { - let msg = "Invalid `type Event`, associated type `Event` bounds inconsistent \ + let msg = "Invalid `type RuntimeEvent`, associated type `RuntimeEvent` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; return Err(syn::Error::new(type_.span(), msg)) diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index a436f7e09c1d7..f1e520952d872 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -105,11 +105,11 @@ impl Def { let m = hooks::HooksDef::try_from(span, index, item)?; hooks = Some(m); }, - Some(PalletAttr::Call(span)) if call.is_none() => + Some(PalletAttr::RuntimeCall(span)) if call.is_none() => call = Some(call::CallDef::try_from(span, index, item)?), Some(PalletAttr::Error(span)) if error.is_none() => error = Some(error::ErrorDef::try_from(span, index, item)?), - Some(PalletAttr::Event(span)) if event.is_none() => + Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => event = Some(event::EventDef::try_from(span, index, item)?), Some(PalletAttr::GenesisConfig(_)) if genesis_config.is_none() => { let g = genesis_config::GenesisConfigDef::try_from(index, item)?; @@ -182,19 +182,19 @@ impl Def { } /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared - /// and trait defines type Event, or not declared and no trait associated type. + /// and trait defines type RuntimeEvent, or not declared and no trait associated type. fn check_event_usage(&self) -> syn::Result<()> { match (self.config.has_event_type, self.event.is_some()) { (true, false) => { - let msg = "Invalid usage of Event, `Config` contains associated type `Event`, \ + let msg = "Invalid usage of RuntimeEvent, `Config` contains associated type `RuntimeEvent`, \ but enum `Event` is not declared (i.e. no use of `#[pallet::event]`). \ - Note that type `Event` in trait is reserved to work alongside pallet event."; + Note that type `RuntimeEvent` in trait is reserved to work alongside pallet event."; Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) }, (false, true) => { - let msg = "Invalid usage of Event, `Config` contains no associated type \ - `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). \ - An Event associated type must be declare on trait `Config`."; + let msg = "Invalid usage of RuntimeEvent, `Config` contains no associated type \ + `RuntimeEvent`, but enum `Event` is declared (in use of `#[pallet::event]`). \ + An RuntimeEvent associated type must be declare on trait `Config`."; Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) }, _ => Ok(()), @@ -391,9 +391,9 @@ enum PalletAttr { Config(proc_macro2::Span), Pallet(proc_macro2::Span), Hooks(proc_macro2::Span), - Call(proc_macro2::Span), + RuntimeCall(proc_macro2::Span), Error(proc_macro2::Span), - Event(proc_macro2::Span), + RuntimeEvent(proc_macro2::Span), Origin(proc_macro2::Span), Inherent(proc_macro2::Span), Storage(proc_macro2::Span), @@ -410,9 +410,9 @@ impl PalletAttr { Self::Config(span) => *span, Self::Pallet(span) => *span, Self::Hooks(span) => *span, - Self::Call(span) => *span, + Self::RuntimeCall(span) => *span, Self::Error(span) => *span, - Self::Event(span) => *span, + Self::RuntimeEvent(span) => *span, Self::Origin(span) => *span, Self::Inherent(span) => *span, Self::Storage(span) => *span, @@ -441,11 +441,11 @@ impl syn::parse::Parse for PalletAttr { } else if lookahead.peek(keyword::hooks) { Ok(PalletAttr::Hooks(content.parse::()?.span())) } else if lookahead.peek(keyword::call) { - Ok(PalletAttr::Call(content.parse::()?.span())) + Ok(PalletAttr::RuntimeCall(content.parse::()?.span())) } else if lookahead.peek(keyword::error) { Ok(PalletAttr::Error(content.parse::()?.span())) } else if lookahead.peek(keyword::event) { - Ok(PalletAttr::Event(content.parse::()?.span())) + Ok(PalletAttr::RuntimeEvent(content.parse::()?.span())) } else if lookahead.peek(keyword::origin) { Ok(PalletAttr::Origin(content.parse::()?.span())) } else if lookahead.peek(keyword::inherent) { diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index fc976b03556a4..9a6654d5524d4 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -56,12 +56,12 @@ pub type DispatchErrorWithPostInfo = /// Serializable version of pallet dispatchable. pub trait Callable { - type Call: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; + type RuntimeCall: UnfilteredDispatchable + Codec + Clone + PartialEq + Eq; } // dirty hack to work around serde_derive issue // https://github.com/rust-lang/rust/issues/51331 -pub type CallableCallFor = >::Call; +pub type CallableCallFor = >::RuntimeCall; /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] @@ -1497,7 +1497,7 @@ macro_rules! decl_module { { /// Deposits an event using `frame_system::Pallet::deposit_event`. $vis fn deposit_event( - event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::Event> + event: impl Into<< $trait_instance as $trait_name $(<$instance>)? >::RuntimeEvent> ) { <$system::Pallet<$trait_instance>>::deposit_event(event.into()) } @@ -2376,7 +2376,7 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::dispatch::Callable<$trait_instance> for $mod_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - type Call = $call_type<$trait_instance $(, $instance)?>; + type RuntimeCall = $call_type<$trait_instance $(, $instance)?>; } $crate::__dispatch_impl_metadata! { @@ -2648,9 +2648,9 @@ mod tests { pub trait Config: 'static { type AccountId; - type Call; + type RuntimeCall; type BaseCallFilter; - type Origin: crate::traits::OriginTrait; + type Origin: crate::traits::OriginTrait; type BlockNumber: Into; type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; @@ -2751,7 +2751,7 @@ mod tests { } impl crate::traits::OriginTrait for OuterOrigin { - type Call = ::Call; + type Call = ::RuntimeCall; type PalletsOrigin = OuterOrigin; type AccountId = ::AccountId; @@ -2799,7 +2799,7 @@ mod tests { impl system::Config for TraitImpl { type Origin = OuterOrigin; type AccountId = u32; - type Call = (); + type RuntimeCall = (); type BaseCallFilter = frame_support::traits::Everything; type BlockNumber = u32; type PalletInfo = Self; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 55ded6da9c5a1..315d856e5d0f8 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1452,9 +1452,9 @@ pub mod pallet_prelude { /// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, /// optionally other supertrait and where clause. /// -/// The associated type `Event` is reserved, if defined it must bounds `From` and -/// `IsType<::Event>`, see `#[pallet::event]` for more -/// information. +/// The associated type `RuntimeEvent` is reserved, if defined it must bounds +/// `From` and `IsType<::RuntimeEvent>`, see +/// `#[pallet::event]` for more information. /// /// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: /// ```ignore @@ -2078,7 +2078,7 @@ pub mod pallet_prelude { /// #[pallet::constant] // put the constant in metadata /// type MyGetParam: Get; /// type Balance: Parameter + MaxEncodedLen + From; -/// type Event: From> + IsType<::Event>; +/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// } /// /// // Define some additional constant to put into the constant metadata. @@ -2267,7 +2267,7 @@ pub mod pallet_prelude { /// #[pallet::constant] /// type MyGetParam: Get; /// type Balance: Parameter + MaxEncodedLen + From; -/// type Event: From> + IsType<::Event>; +/// type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// } /// /// #[pallet::extra_constants] @@ -2423,7 +2423,7 @@ pub mod pallet_prelude { /// ``` /// 5. **migrate Config**: move trait into the module with /// * all const in decl_module to `#[pallet::constant]` -/// * add bound `IsType<::Event>` to `type Event` +/// * add bound `IsType<::RuntimeEvent>` to `type RuntimeEvent` /// 7. **migrate decl_module**: write: /// ```ignore /// #[pallet::hooks] diff --git a/frame/support/test/compile_pass/src/lib.rs b/frame/support/test/compile_pass/src/lib.rs index 7850726048546..6e4d2363b2f7c 100644 --- a/frame/support/test/compile_pass/src/lib.rs +++ b/frame/support/test/compile_pass/src/lib.rs @@ -68,9 +68,9 @@ impl frame_system::Config for Runtime { type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); type OnNewAccount = (); type OnKilledAccount = (); @@ -82,7 +82,7 @@ impl frame_system::Config for Runtime { pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 04b3dbcedcbf2..4f3c783252e00 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -250,9 +250,9 @@ impl system::Config for Runtime { type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); } @@ -280,7 +280,7 @@ frame_support::construct_runtime!( pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; #[test] fn check_modules_error_type() { @@ -373,7 +373,7 @@ fn check_modules_error_type() { message: Some("Something") })), ); - }); + }) } #[test] @@ -422,53 +422,53 @@ fn event_codec() { use codec::Encode; let event = system::Event::::ExtrinsicSuccess; - assert_eq!(Event::from(event).encode()[0], 30); + assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 31); + assert_eq!(RuntimeEvent::from(event).encode()[0], 31); let event = module2::Event::A; - assert_eq!(Event::from(event).encode()[0], 32); + assert_eq!(RuntimeEvent::from(event).encode()[0], 32); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 33); + assert_eq!(RuntimeEvent::from(event).encode()[0], 33); let event = nested::module3::Event::A; - assert_eq!(Event::from(event).encode()[0], 34); + assert_eq!(RuntimeEvent::from(event).encode()[0], 34); let event = module3::Event::A; - assert_eq!(Event::from(event).encode()[0], 35); + assert_eq!(RuntimeEvent::from(event).encode()[0], 35); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 4); + assert_eq!(RuntimeEvent::from(event).encode()[0], 4); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 1); + assert_eq!(RuntimeEvent::from(event).encode()[0], 1); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 2); + assert_eq!(RuntimeEvent::from(event).encode()[0], 2); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 12); + assert_eq!(RuntimeEvent::from(event).encode()[0], 12); let event = module1::Event::::A(test_pub()); - assert_eq!(Event::from(event).encode()[0], 13); + assert_eq!(RuntimeEvent::from(event).encode()[0], 13); } #[test] fn call_codec() { use codec::Encode; - assert_eq!(Call::System(system::Call::noop {}).encode()[0], 30); - assert_eq!(Call::Module1_1(module1::Call::fail {}).encode()[0], 31); - assert_eq!(Call::Module2(module2::Call::fail {}).encode()[0], 32); - assert_eq!(Call::Module1_2(module1::Call::fail {}).encode()[0], 33); - assert_eq!(Call::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); - assert_eq!(Call::Module3(module3::Call::fail {}).encode()[0], 35); - assert_eq!(Call::Module1_4(module1::Call::fail {}).encode()[0], 3); - assert_eq!(Call::Module1_6(module1::Call::fail {}).encode()[0], 1); - assert_eq!(Call::Module1_7(module1::Call::fail {}).encode()[0], 2); - assert_eq!(Call::Module1_8(module1::Call::fail {}).encode()[0], 12); - assert_eq!(Call::Module1_9(module1::Call::fail {}).encode()[0], 13); + assert_eq!(RuntimeCall::System(system::Call::noop {}).encode()[0], 30); + assert_eq!(RuntimeCall::Module1_1(module1::Call::fail {}).encode()[0], 31); + assert_eq!(RuntimeCall::Module2(module2::Call::fail {}).encode()[0], 32); + assert_eq!(RuntimeCall::Module1_2(module1::Call::fail {}).encode()[0], 33); + assert_eq!(RuntimeCall::NestedModule3(nested::module3::Call::fail {}).encode()[0], 34); + assert_eq!(RuntimeCall::Module3(module3::Call::fail {}).encode()[0], 35); + assert_eq!(RuntimeCall::Module1_4(module1::Call::fail {}).encode()[0], 3); + assert_eq!(RuntimeCall::Module1_6(module1::Call::fail {}).encode()[0], 1); + assert_eq!(RuntimeCall::Module1_7(module1::Call::fail {}).encode()[0], 2); + assert_eq!(RuntimeCall::Module1_8(module1::Call::fail {}).encode()[0], 12); + assert_eq!(RuntimeCall::Module1_9(module1::Call::fail {}).encode()[0], 13); } #[test] @@ -537,7 +537,7 @@ fn call_name() { #[test] fn call_metadata() { use frame_support::dispatch::{CallMetadata, GetCallMetadata}; - let call = Call::Module3(module3::Call::::aux_4 {}); + let call = RuntimeCall::Module3(module3::Call::::aux_4 {}); let metadata = call.get_call_metadata(); let expected = CallMetadata { function_name: "aux_4".into(), pallet_name: "Module3".into() }; assert_eq!(metadata, expected); @@ -553,7 +553,7 @@ fn get_call_names() { #[test] fn get_module_names() { use frame_support::dispatch::GetCallMetadata; - let module_names = Call::get_module_names(); + let module_names = RuntimeCall::get_module_names(); assert_eq!( [ "System", @@ -575,13 +575,13 @@ fn get_module_names() { #[test] fn call_subtype_conversion() { use frame_support::{dispatch::CallableCallFor, traits::IsSubType}; - let call = Call::Module3(module3::Call::::fail {}); + let call = RuntimeCall::Module3(module3::Call::::fail {}); let subcall: Option<&CallableCallFor> = call.is_sub_type(); let subcall_none: Option<&CallableCallFor> = call.is_sub_type(); assert_eq!(Some(&module3::Call::::fail {}), subcall); assert_eq!(None, subcall_none); - let from = Call::from(subcall.unwrap().clone()); + let from = RuntimeCall::from(subcall.unwrap().clone()); assert_eq!(from, call); } diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs index 98cd1f197f619..7a074db9986a2 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr index 608d57d6a97fc..5f1fccd43c549 100644 --- a/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr +++ b/frame/support/test/tests/construct_runtime_ui/both_use_and_excluded_parts.stderr @@ -1,28 +1,19 @@ error: Unexpected tokens, expected one of `=`, `,` - --> $DIR/both_use_and_excluded_parts.rs:29:43 + --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:29:43 | 29 | Pallet: pallet exclude_parts { Pallet } use_parts { Pallet }, | ^^^^^^^^^ -error[E0412]: cannot find type `Call` in this scope - --> $DIR/both_use_and_excluded_parts.rs:18:64 - | -18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^ not found in this scope - | -help: consider importing one of these items - | -1 | use crate::pallet::Call; - | -1 | use frame_support_test::Call; - | -1 | use frame_system::Call; - | -1 | use test_pallet::Call; +error[E0412]: cannot find type `RuntimeCall` in this scope + --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:18:64 | +18 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | - ^^^^^^^^^^^ not found in this scope + | | + | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> $DIR/both_use_and_excluded_parts.rs:20:25 + --> tests/construct_runtime_ui/both_use_and_excluded_parts.rs:20:25 | 20 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs index 51be7e30bd3eb..302b5af7ecd26 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.rs @@ -20,7 +20,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr index 4e31cfb75c074..c623ecfbf4cdf 100644 --- a/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/exclude_undefined_part.stderr @@ -1,28 +1,19 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> $DIR/exclude_undefined_part.rs:34:34 + --> tests/construct_runtime_ui/exclude_undefined_part.rs:34:34 | 34 | Pallet: pallet exclude_parts { Call }, | ^^^^ -error[E0412]: cannot find type `Call` in this scope - --> $DIR/exclude_undefined_part.rs:23:64 - | -23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^ not found in this scope - | -help: consider importing one of these items - | -1 | use crate::pallet::Call; - | -1 | use frame_support_test::Call; - | -1 | use frame_system::Call; - | -1 | use test_pallet::Call; +error[E0412]: cannot find type `RuntimeCall` in this scope + --> tests/construct_runtime_ui/exclude_undefined_part.rs:23:64 | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | - ^^^^^^^^^^^ not found in this scope + | | + | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> $DIR/exclude_undefined_part.rs:25:25 + --> tests/construct_runtime_ui/exclude_undefined_part.rs:25:25 | 25 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs index c06333795e3c5..fd6b51db07672 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs @@ -6,7 +6,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl test_pallet::Config for Runtime {} @@ -15,13 +15,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index 827d8a58af733..f73c611d29686 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -40,7 +40,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -49,13 +49,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index 1653e830f0b4f..f90af88d23a9b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index b8f91cf4bc690..60d4b166c41fd 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 2af4d3fb15000..920e627d43c31 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -45,36 +45,3 @@ help: if you import `Event`, refer to it directly 57 - } 58 - } | - -error[E0412]: cannot find type `Event` in module `pallet` - --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 - | -49 | / construct_runtime! { -50 | | pub enum Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } - | |_^ not found in `pallet` - | - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items - | -1 | use crate::Event; - | -1 | use frame_system::Event; - | -help: if you import `Event`, refer to it directly - | -49 - construct_runtime! { -50 - pub enum Runtime where -51 - Block = Block, -52 - NodeBlock = Block, -53 - UncheckedExtrinsic = UncheckedExtrinsic -54 - { -55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 - Pallet: pallet::{Pallet, Event}, -57 - } -58 - } - | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index a61d545b3279e..5f7beed686339 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index 6e4764286ab41..fb866c161ccb8 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 9233404a865b9..26ca5d6eb4cb0 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 621683aca3754..81c040c660451 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -15,7 +15,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u32; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} @@ -24,13 +24,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = frame_support::traits::ConstU32<250>; type BlockWeights = (); type BlockLength = (); diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index 94226075d9a4b..a5e4fe3c1cd5a 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -15,14 +15,14 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you | = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) -error[E0599]: no variant or associated item named `Pallet` found for enum `Call` in the current scope +error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:56:3 | 49 | construct_runtime! { | ------------------ variant or associated item `Pallet` not found here ... 56 | Pallet: pallet::{Pallet, ValidateUnsigned}, - | ^^^^^^ variant or associated item not found in `Call` + | ^^^^^^ variant or associated item not found in `RuntimeCall` error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs index 1664dcc42b755..c74e29bc05469 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.rs @@ -20,7 +20,7 @@ pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl pallet::Config for Runtime {} diff --git a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr index ed41f0ce673a4..e289c75fb008a 100644 --- a/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/use_undefined_part.stderr @@ -1,28 +1,19 @@ error: Invalid pallet part specified, the pallet `Pallet` doesn't have the `Call` part. Available parts are: `Pallet`, `Storage`. - --> $DIR/use_undefined_part.rs:34:30 + --> tests/construct_runtime_ui/use_undefined_part.rs:34:30 | 34 | Pallet: pallet use_parts { Call }, | ^^^^ -error[E0412]: cannot find type `Call` in this scope - --> $DIR/use_undefined_part.rs:23:64 - | -23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; - | ^^^^ not found in this scope - | -help: consider importing one of these items - | -1 | use crate::pallet::Call; - | -1 | use frame_support_test::Call; - | -1 | use frame_system::Call; - | -1 | use test_pallet::Call; +error[E0412]: cannot find type `RuntimeCall` in this scope + --> tests/construct_runtime_ui/use_undefined_part.rs:23:64 | +23 | pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; + | - ^^^^^^^^^^^ not found in this scope + | | + | help: you might be missing a type parameter: `` error[E0412]: cannot find type `Runtime` in this scope - --> $DIR/use_undefined_part.rs:25:25 + --> tests/construct_runtime_ui/use_undefined_part.rs:25:25 | 25 | impl pallet::Config for Runtime {} | ^^^^^^^ not found in this scope diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 75a96f628245a..c9eebdaff6bc4 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -50,7 +50,7 @@ mod module1 { where ::BlockNumber: From, { - type Event: From> + Into<::Event>; + type RuntimeEvent: From> + Into<::RuntimeEvent>; type Origin: From>; type SomeParameter: Get; type GenericType: Default + Clone + Codec + EncodeLike + TypeInfo; @@ -154,7 +154,7 @@ mod module2 { pub trait Config: system::Config { type Amount: Parameter + Default; - type Event: From> + Into<::Event>; + type RuntimeEvent: From> + Into<::RuntimeEvent>; type Origin: From>; } @@ -233,35 +233,35 @@ mod module3 { } impl module1::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module1::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module2::Config for Runtime { type Amount = u16; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; } impl module2::Config for Runtime { type Amount = u32; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; } impl module2::Config for Runtime { type Amount = u32; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; } impl module2::Config for Runtime { type Amount = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Origin = Origin; } impl module3::Config for Runtime { @@ -280,9 +280,9 @@ impl system::Config for Runtime { type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); } @@ -315,7 +315,7 @@ frame_support::construct_runtime!( pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig { diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index d7e3d2cb5b135..0b24a6aa89828 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -155,7 +155,7 @@ pub type BlockNumber = u64; pub type Index = u64; pub type Header = generic::Header; pub type Block = generic::Block; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; @@ -163,9 +163,9 @@ impl system::Config for Runtime { type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = AccountId; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); } diff --git a/frame/support/test/tests/origin.rs b/frame/support/test/tests/origin.rs index cff531ff2e529..53124059e6353 100644 --- a/frame/support/test/tests/origin.rs +++ b/frame/support/test/tests/origin.rs @@ -134,10 +134,10 @@ impl nested::module::Config for RuntimeOriginTest {} impl module::Config for RuntimeOriginTest {} pub struct BaseCallFilter; -impl Contains for BaseCallFilter { - fn contains(c: &Call) -> bool { +impl Contains for BaseCallFilter { + fn contains(c: &RuntimeCall) -> bool { match c { - Call::NestedModule(_) => true, + RuntimeCall::NestedModule(_) => true, _ => false, } } @@ -149,9 +149,9 @@ impl system::Config for RuntimeOriginTest { type Origin = Origin; type BlockNumber = BlockNumber; type AccountId = u32; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type PalletInfo = PalletInfo; - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); } @@ -170,7 +170,7 @@ frame_support::construct_runtime!( pub type Signature = sr25519::Signature; pub type BlockNumber = u64; pub type Header = generic::Header; -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type Block = generic::Block; #[test] @@ -192,7 +192,7 @@ fn origin_default_filter() { assert_eq!(Origin::from(nested::module::Origin).filter_call(&rejected_call), false); let mut origin = Origin::from(Some(0)); - origin.add_filter(|c| matches!(c, Call::Module(_))); + origin.add_filter(|c| matches!(c, RuntimeCall::Module(_))); assert_eq!(origin.filter_call(&accepted_call), false); assert_eq!(origin.filter_call(&rejected_call), false); diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index d683399482bf4..420617a9e4f24 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -125,7 +125,7 @@ pub mod pallet { type Balance: Parameter + Default + TypeInfo; - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; } #[pallet::extra_constants] @@ -477,7 +477,7 @@ pub mod pallet2 { where ::AccountId: From + SomeAssociation1, { - type Event: From + IsType<::Event>; + type RuntimeEvent: From + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -579,13 +579,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -601,7 +601,7 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MyGetParam = ConstU32<10>; type MyGetParam2 = ConstU32<11>; type MyGetParam3 = MyGetParam3; @@ -609,7 +609,7 @@ impl pallet::Config for Runtime { } impl pallet2::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } impl pallet4::Config for Runtime {} @@ -621,7 +621,7 @@ impl pallet3::Config for Runtime { pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -639,10 +639,10 @@ frame_support::construct_runtime!( } ); -// Test that the part `Call` is excluded from Example2 and included in Example4. -fn _ensure_call_is_correctly_excluded_and_included(call: Call) { +// Test that the part `RuntimeCall` is excluded from Example2 and included in Example4. +fn _ensure_call_is_correctly_excluded_and_included(call: RuntimeCall) { match call { - Call::System(_) | Call::Example(_) | Call::Example4(_) => (), + RuntimeCall::System(_) | RuntimeCall::Example(_) | RuntimeCall::Example4(_) => (), } } @@ -665,7 +665,7 @@ fn transactional_works() { .iter() .map(|e| &e.event) .collect::>(), - vec![&Event::Example(pallet::Event::Something(0))], + vec![&RuntimeEvent::Example(pallet::Event::Something(0))], ); }) } @@ -730,7 +730,7 @@ fn inherent_expand() { let inherents = InherentData::new().create_extrinsics(); let expected = vec![UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: None, }]; assert_eq!(expected, inherents); @@ -745,11 +745,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), signature: None, }, ], @@ -767,11 +767,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 0, bar: 0 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 0, bar: 0 }), signature: None, }, ], @@ -788,7 +788,7 @@ fn inherent_expand() { Digest::default(), ), vec![UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }], ); @@ -806,7 +806,7 @@ fn inherent_expand() { Digest::default(), ), vec![UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: Some((1, (), ())), }], ); @@ -825,11 +825,11 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }, ], @@ -847,15 +847,15 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_storage_layer { foo: 0 }), + function: RuntimeCall::Example(pallet::Call::foo_storage_layer { foo: 0 }), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: None, }, ], @@ -873,15 +873,15 @@ fn inherent_expand() { ), vec![ UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 1, bar: 1 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 1 }), signature: None, }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo { foo: 1, bar: 0 }), + function: RuntimeCall::Example(pallet::Call::foo { foo: 1, bar: 0 }), signature: Some((1, (), ())), }, UncheckedExtrinsic { - function: Call::Example(pallet::Call::foo_no_post_info {}), + function: RuntimeCall::Example(pallet::Call::foo_no_post_info {}), signature: None, }, ], @@ -925,7 +925,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Example(pallet::Event::Something(3)), + RuntimeEvent::Example(pallet::Event::Something(3)), ); }) } @@ -1064,27 +1064,27 @@ fn pallet_hooks_expand() { assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Example(pallet::Event::Something(10)), + RuntimeEvent::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - Event::Example2(pallet2::Event::Something(11)), + RuntimeEvent::Example2(pallet2::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - Event::Example(pallet::Event::Something(20)), + RuntimeEvent::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - Event::Example2(pallet2::Event::Something(21)), + RuntimeEvent::Example2(pallet2::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - Event::Example(pallet::Event::Something(30)), + RuntimeEvent::Example(pallet::Event::Something(30)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - Event::Example2(pallet2::Event::Something(31)), + RuntimeEvent::Example2(pallet2::Event::Something(31)), ); }) } @@ -1110,27 +1110,27 @@ fn all_pallets_type_reversed_order_is_correct() { assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Example2(pallet2::Event::Something(11)), + RuntimeEvent::Example2(pallet2::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - Event::Example(pallet::Event::Something(10)), + RuntimeEvent::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - Event::Example2(pallet2::Event::Something(21)), + RuntimeEvent::Example2(pallet2::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - Event::Example(pallet::Event::Something(20)), + RuntimeEvent::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - Event::Example2(pallet2::Event::Something(31)), + RuntimeEvent::Example2(pallet2::Event::Something(31)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - Event::Example(pallet::Event::Something(30)), + RuntimeEvent::Example(pallet::Event::Something(30)), ); }) } diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index dc107b345c2b8..2342f25fb98b9 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -43,7 +43,7 @@ mod pallet_old { + Into + Default + SomeAssociation; - type Event: From> + Into<::Event>; + type RuntimeEvent: From> + Into<::RuntimeEvent>; } decl_storage! { @@ -120,7 +120,7 @@ pub mod pallet { + scale_info::StaticTypeInfo; #[pallet::constant] type SomeConst: Get; - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -229,13 +229,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -251,19 +251,19 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 428de08864f29..b89ab410621e5 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -30,7 +30,7 @@ mod pallet_old { pub trait Config: frame_system::Config { type SomeConst: Get; type Balance: Parameter + codec::HasCompact + From + Into + Default; - type Event: From> + Into<::Event>; + type RuntimeEvent: From> + Into<::RuntimeEvent>; } decl_storage! { @@ -105,7 +105,8 @@ pub mod pallet { + scale_info::StaticTypeInfo; #[pallet::constant] type SomeConst: Get; - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -212,13 +213,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; @@ -231,39 +232,39 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } impl pallet_old::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type SomeConst = ConstU64<10>; type Balance = u64; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 2ae910e73d87e..f01ce17d71bf9 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -42,7 +42,8 @@ pub mod pallet { #[pallet::constant] type MyGetParam: Get; type Balance: Parameter + Default + scale_info::StaticTypeInfo; - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -253,7 +254,8 @@ pub mod pallet2 { #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; } #[pallet::pallet] @@ -288,13 +290,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type BlockWeights = (); type BlockLength = (); @@ -310,25 +312,25 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MyGetParam = ConstU32<10>; type Balance = u64; } impl pallet::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type MyGetParam = ConstU32<10>; type Balance = u64; } impl pallet2::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } impl pallet2::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; frame_support::construct_runtime!( pub enum Runtime where @@ -433,7 +435,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Example(pallet::Event::Something(3)), + RuntimeEvent::Example(pallet::Event::Something(3)), ); }); @@ -444,7 +446,7 @@ fn pallet_expand_deposit_event() { .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Instance1Example(pallet::Event::Something(3)), + RuntimeEvent::Instance1Example(pallet::Event::Something(3)), ); }); } @@ -668,27 +670,27 @@ fn pallet_hooks_expand() { assert_eq!( frame_system::Pallet::::events()[0].event, - Event::Example(pallet::Event::Something(10)), + RuntimeEvent::Example(pallet::Event::Something(10)), ); assert_eq!( frame_system::Pallet::::events()[1].event, - Event::Instance1Example(pallet::Event::Something(11)), + RuntimeEvent::Instance1Example(pallet::Event::Something(11)), ); assert_eq!( frame_system::Pallet::::events()[2].event, - Event::Example(pallet::Event::Something(20)), + RuntimeEvent::Example(pallet::Event::Something(20)), ); assert_eq!( frame_system::Pallet::::events()[3].event, - Event::Instance1Example(pallet::Event::Something(21)), + RuntimeEvent::Instance1Example(pallet::Event::Something(21)), ); assert_eq!( frame_system::Pallet::::events()[4].event, - Event::Example(pallet::Event::Something(30)), + RuntimeEvent::Example(pallet::Event::Something(30)), ); assert_eq!( frame_system::Pallet::::events()[5].event, - Event::Instance1Example(pallet::Event::Something(31)), + RuntimeEvent::Instance1Example(pallet::Event::Something(31)), ); }) } diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.rs b/frame/support/test/tests/pallet_ui/event_field_not_member.rs index 0ecde4c130878..2b45a971788fb 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.rs +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type Event: IsType<::Event> + From>; + type RuntimeEvent: IsType<::RuntimeEvent> + From>; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr index e3126ad6a85dc..2eda72eb5f72f 100644 --- a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -1,4 +1,4 @@ -error: Invalid usage of Event, `Config` contains no associated type `Event`, but enum `Event` is declared (in use of `#[pallet::event]`). An Event associated type must be declare on trait `Config`. +error: Invalid usage of RuntimeEvent, `Config` contains no associated type `RuntimeEvent`, but enum `Event` is declared (in use of `#[pallet::event]`). An RuntimeEvent associated type must be declare on trait `Config`. --> $DIR/event_not_in_trait.rs:1:1 | 1 | #[frame_support::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs index fa3bf04d3530d..a02cc9b9de883 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type Event; + type RuntimeEvent; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr index 1f58a37576d0d..d54149d719a3b 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound.stderr @@ -1,5 +1,5 @@ -error: Invalid `type Event`, associated type `Event` is reserved and must bound: `IsType<::Event>` +error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `IsType<::RuntimeEvent>` --> $DIR/event_type_invalid_bound.rs:9:3 | -9 | type Event; +9 | type RuntimeEvent; | ^^^^ diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs index 564a539b89f57..99df89d67278c 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.rs @@ -6,7 +6,7 @@ mod pallet { #[pallet::config] pub trait Config: frame_system::Config { type Bar; - type Event: IsType<::Event>; + type RuntimeEvent: IsType<::RuntimeEvent>; } #[pallet::pallet] diff --git a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr index 8b8946f3b25eb..ea8b2ff000ceb 100644 --- a/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/event_type_invalid_bound_2.stderr @@ -1,5 +1,5 @@ -error: Invalid `type Event`, associated type `Event` is reserved and must bound: `From` or `From>` or `From>` +error: Invalid `type RuntimeEvent`, associated type `RuntimeEvent` is reserved and must bound: `From` or `From>` or `From>` --> $DIR/event_type_invalid_bound_2.rs:9:3 | -9 | type Event: IsType<::Event>; +9 | type RuntimeEvent: IsType<::RuntimeEvent>; | ^^^^ diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 6415c3c0d2c07..788e852ba58e9 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -18,7 +18,7 @@ pub trait Trait: frame_system::Config { type Balance: frame_support::dispatch::Parameter; /// The overarching event type. - type Event: From> + Into<::Event>; + type RuntimeEvent: From> + Into<::RuntimeEvent>; } frame_support::decl_storage! { @@ -108,7 +108,7 @@ mod tests { type TestHeader = sp_runtime::generic::Header; type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< ::AccountId, - ::Call, + ::RuntimeCall, (), SignedExtra, >; @@ -130,12 +130,12 @@ mod tests { type Index = u64; type BlockNumber = u64; type Hash = sp_core::H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = TestHeader; - type Event = (); + type RuntimeEvent = (); type BlockHashCount = ConstU64<250>; type DbWeight = (); type BlockWeights = (); @@ -153,6 +153,6 @@ mod tests { impl pallet_test::Trait for Runtime { type Balance = u32; - type Event = (); + type RuntimeEvent = (); } } diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index 7404ccace2c09..a3e512302fe4f 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -79,7 +79,7 @@ pub type BlockNumber = u64; pub type Index = u64; pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; -pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; impl frame_system::Config for Runtime { type BlockWeights = (); @@ -89,13 +89,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u32; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_runtime::testing::H256; type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = u64; type Lookup = sp_runtime::traits::IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU32<250>; type Version = (); type PalletInfo = PalletInfo; @@ -255,11 +255,11 @@ fn storage_layer_commit_then_rollback() { fn storage_layer_in_pallet_call() { TestExternalities::default().execute_with(|| { use sp_runtime::traits::Dispatchable; - let call1 = Call::MyPallet(pallet::Call::set_value { value: 2 }); + let call1 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 2 }); assert_ok!(call1.dispatch(Origin::signed(0))); assert_eq!(Value::::get(), 2); - let call2 = Call::MyPallet(pallet::Call::set_value { value: 1 }); + let call2 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 1 }); assert_noop!(call2.dispatch(Origin::signed(0)), Error::::Revert); }); } @@ -270,11 +270,11 @@ fn storage_layer_in_decl_pallet_call() { use frame_support::StorageValue; use sp_runtime::traits::Dispatchable; - let call1 = Call::DeclPallet(decl_pallet::Call::set_value { value: 2 }); + let call1 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 2 }); assert_ok!(call1.dispatch(Origin::signed(0))); assert_eq!(decl_pallet::DeclValue::get(), 2); - let call2 = Call::DeclPallet(decl_pallet::Call::set_value { value: 1 }); + let call2 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 1 }); assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); // Calling the function directly also works with storage layers. assert_noop!(decl_pallet::Module::::set_value(Origin::signed(1), 1), "Revert!"); diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index b30fd8d5ec561..9736c74dc236d 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -25,12 +25,12 @@ pub trait Config: 'static + Eq + Clone { type Origin: Into, Self::Origin>> + From>; - type BaseCallFilter: frame_support::traits::Contains; + type BaseCallFilter: frame_support::traits::Contains; type BlockNumber: Decode + Encode + EncodeLike + Clone + Default + scale_info::TypeInfo; type Hash; type AccountId: Encode + EncodeLike + Decode + scale_info::TypeInfo; - type Call; - type Event: From>; + type RuntimeCall; + type RuntimeEvent: From>; type PalletInfo: frame_support::traits::PalletInfo; type DbWeight: Get; } @@ -43,7 +43,7 @@ frame_support::decl_module! { } impl Module { - pub fn deposit_event(_event: impl Into) {} + pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 017298dab3928..03d3a65812d56 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -37,7 +37,7 @@ mod module { #[pallet::config] pub trait Config: frame_system::Config { - type Event: From + IsType<::Event>; + type RuntimeEvent: From + IsType<::RuntimeEvent>; } #[pallet::event] @@ -79,13 +79,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -99,7 +99,7 @@ impl frame_system::Config for Runtime { } impl module::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; } fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 08b043ae62741..1c0a7efa03735 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -46,13 +46,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = sp_core::H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = sp_runtime::testing::Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index a0679b11487f6..f5811f306cfe3 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -54,7 +54,7 @@ impl CheckGenesis { impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::RuntimeCall; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 59a6e14aca175..f515f1ae0fa27 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -56,7 +56,7 @@ impl sp_std::fmt::Debug for CheckMortality { impl SignedExtension for CheckMortality { type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = T::Hash; type Pre = (); const IDENTIFIER: &'static str = "CheckMortality"; diff --git a/frame/system/src/extensions/check_non_zero_sender.rs b/frame/system/src/extensions/check_non_zero_sender.rs index 9a6c4007b3779..093424999aab3 100644 --- a/frame/system/src/extensions/check_non_zero_sender.rs +++ b/frame/system/src/extensions/check_non_zero_sender.rs @@ -53,10 +53,10 @@ impl CheckNonZeroSender { impl SignedExtension for CheckNonZeroSender where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckNonZeroSender"; diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 476aa2fb7478c..7259b80013ae5 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -60,10 +60,10 @@ impl sp_std::fmt::Debug for CheckNonce { impl SignedExtension for CheckNonce where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 0280d31f657ae..ef5f40402692c 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -54,7 +54,7 @@ impl CheckSpecVersion { impl SignedExtension for CheckSpecVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::RuntimeCall; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckSpecVersion"; diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index b92d8978bde01..be0b8fe2354aa 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -54,7 +54,7 @@ impl CheckTxVersion { impl SignedExtension for CheckTxVersion { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::RuntimeCall; type AdditionalSigned = u32; type Pre = (); const IDENTIFIER: &'static str = "CheckTxVersion"; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 6e73a9e40179f..9af79e480b394 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -40,12 +40,12 @@ pub struct CheckWeight(sp_std::marker::PhantomData); impl CheckWeight where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic /// with given `DispatchClass` can have. fn check_extrinsic_weight( - info: &DispatchInfoOf, + info: &DispatchInfoOf, ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { @@ -59,18 +59,18 @@ where /// /// Upon successes, it returns the new block weight as a `Result`. fn check_block_weight( - info: &DispatchInfoOf, + info: &DispatchInfoOf, ) -> Result { let maximum_weight = T::BlockWeights::get(); let all_weight = Pallet::::block_weight(); - calculate_consumed_weight::(maximum_weight, all_weight, info) + calculate_consumed_weight::(maximum_weight, all_weight, info) } /// Checks if the current extrinsic can fit into the block with respect to block length limits. /// /// Upon successes, it returns the new block length as a `Result`. fn check_block_length( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, ) -> Result { let length_limit = T::BlockLength::get(); @@ -93,7 +93,7 @@ where /// /// It checks and notes the new weight and length. pub fn do_pre_dispatch( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { let next_len = Self::check_block_length(info, len)?; @@ -108,7 +108,7 @@ where /// Do the validate checks. This can be applied to both signed and unsigned. /// /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { + pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { // ignore the next length. If they return `Ok`, then it is below the limit. let _ = Self::check_block_length(info, len)?; // during validation we skip block limit check. Since the `validate_transaction` @@ -170,10 +170,10 @@ where impl SignedExtension for CheckWeight where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; @@ -713,13 +713,13 @@ mod tests { }; // when - assert_ok!(calculate_consumed_weight::<::Call>( + assert_ok!(calculate_consumed_weight::<::RuntimeCall>( maximum_weight.clone(), all_weight.clone(), &mandatory1 )); assert_err!( - calculate_consumed_weight::<::Call>( + calculate_consumed_weight::<::RuntimeCall>( maximum_weight, all_weight, &mandatory2 diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index ba076d963bda7..550e3939f995a 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -205,7 +205,7 @@ pub mod pallet { pub trait Config: 'static + Eq + Clone { /// The basic call filter to use in Origin. All origins are built with this filter as base, /// except Root. - type BaseCallFilter: Contains; + type BaseCallFilter: Contains; /// Block & extrinsics weights: base values and limits. #[pallet::constant] @@ -219,10 +219,10 @@ pub mod pallet { type Origin: Into, Self::Origin>> + From> + Clone - + OriginTrait; + + OriginTrait; - /// The aggregated `Call` type. - type Call: Dispatchable + Debug; + /// The aggregated `RuntimeCall` type. + type RuntimeCall: Dispatchable + Debug; /// Account index (aka nonce) type. This stores the number of previous transactions /// associated with a sender account. @@ -293,11 +293,11 @@ pub mod pallet { type Header: Parameter + traits::Header; /// The aggregated event type of the runtime. - type Event: Parameter + type RuntimeEvent: Parameter + Member + From> + Debug - + IsType<::Event>; + + IsType<::RuntimeEvent>; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). #[pallet::constant] @@ -608,7 +608,7 @@ pub mod pallet { #[pallet::storage] #[pallet::unbounded] pub(super) type Events = - StorageValue<_, Vec>>, ValueQuery>; + StorageValue<_, Vec>>, ValueQuery>; /// The number of events in the `Events` list. #[pallet::storage] @@ -1216,7 +1216,7 @@ impl Pallet { } /// Deposits an event into this block's event record. - pub fn deposit_event(event: impl Into) { + pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); } @@ -1225,7 +1225,7 @@ impl Pallet { /// /// This will update storage entries that correspond to the specified topics. /// It is expected that light-clients could subscribe to this topics. - pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { + pub fn deposit_event_indexed(topics: &[T::Hash], event: T::RuntimeEvent) { let block_number = Self::block_number(); // Don't populate events on genesis. if block_number.is_zero() { @@ -1415,7 +1415,7 @@ impl Pallet { /// impact on the PoV size of a block. Users should use alternative and well bounded storage /// items for any behavior like this. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn events() -> Vec> { + pub fn events() -> Vec> { // Dereferencing the events here is fine since we are not in the // memory-restricted runtime. Self::read_events_no_consensus().into_iter().map(|e| *e).collect() @@ -1425,7 +1425,7 @@ impl Pallet { /// /// Should only be called if you know what you are doing and outside of the runtime block /// execution else it can have a large impact on the PoV size of a block. - pub fn read_events_no_consensus() -> Vec>> { + pub fn read_events_no_consensus() -> Vec>> { Events::::get() } @@ -1470,13 +1470,13 @@ impl Pallet { /// Assert the given `event` exists. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn assert_has_event(event: T::Event) { + pub fn assert_has_event(event: T::RuntimeEvent) { assert!(Self::events().iter().any(|record| record.event == event)) } /// Assert the last event equal to the given `event`. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn assert_last_event(event: T::Event) { + pub fn assert_last_event(event: T::RuntimeEvent) { assert_eq!(Self::events().last().expect("events expected").event, event); } diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 09a018e2be70e..fdaaceaf8abe0 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -94,7 +94,7 @@ impl Config for Test { type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -102,7 +102,7 @@ impl Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<10>; type DbWeight = DbWeight; type Version = Version; @@ -119,8 +119,8 @@ impl Config for Test { pub type SysEvent = frame_system::Event; /// A simple call, which one doesn't matter. -pub const CALL: &::Call = - &Call::System(frame_system::Call::set_heap_pages { pages: 0u64 }); +pub const CALL: &::RuntimeCall = + &RuntimeCall::System(frame_system::Call::set_heap_pages { pages: 0u64 }); /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/system/src/mocking.rs b/frame/system/src/mocking.rs index ccb63f9bb236c..d8cfcb9baf268 100644 --- a/frame/system/src/mocking.rs +++ b/frame/system/src/mocking.rs @@ -22,7 +22,7 @@ use sp_runtime::generic; /// An unchecked extrinsic type to be used in tests. pub type MockUncheckedExtrinsic = generic::UncheckedExtrinsic< ::AccountId, - ::Call, + ::RuntimeCall, Signature, Extra, >; diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 86440188a765c..99a4c1541d30f 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -617,7 +617,7 @@ pub trait SignedPayload: Encode { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Call, Test as TestRuntime, CALL}; + use crate::mock::{RuntimeCall, Test as TestRuntime, CALL}; use codec::Decode; use sp_core::offchain::{testing, TransactionPoolExt}; use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; @@ -627,11 +627,11 @@ mod tests { type Signature = TestSignature; } - type Extrinsic = TestXt; + type Extrinsic = TestXt; - impl SendTransactionTypes for TestRuntime { + impl SendTransactionTypes for TestRuntime { type Extrinsic = Extrinsic; - type OverarchingCall = Call; + type OverarchingCall = RuntimeCall; } #[derive(codec::Encode, codec::Decode)] diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 1ea553aa548ec..7d3da0e8a0b83 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -58,13 +58,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index a4697284e38a0..9313a26e52e00 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -127,7 +127,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_treasury::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Maximum acceptable reason length. /// diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index b12885fc59f16..9b721bd77a8bc 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -68,13 +68,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -92,7 +92,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -133,7 +133,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -152,7 +152,7 @@ impl pallet_treasury::Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; @@ -176,7 +176,7 @@ impl Config for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } @@ -187,7 +187,7 @@ impl Config for Test { type TipFindersFee = TipFindersFee; type TipReportDepositBase = ConstU64<1>; type DataDepositPerByte = ConstU64<1>; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type WeightInfo = (); } @@ -209,7 +209,7 @@ fn last_event() -> TipEvent { System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let Event::Tips(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let RuntimeEvent::Tips(inner) = e { Some(inner) } else { None }) .last() .unwrap() } diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index 08561375247ae..c14c7ba11b546 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -114,7 +114,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + pallet_transaction_payment::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The fungibles instance used to pay for transactions in assets. type Fungibles: Balanced; /// The actual transaction charging logic that charges the fees. @@ -154,7 +154,7 @@ pub struct ChargeAssetTxPayment { impl ChargeAssetTxPayment where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, @@ -170,8 +170,8 @@ where fn withdraw_fee( &self, who: &T::AccountId, - call: &T::Call, - info: &DispatchInfoOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, ) -> Result<(BalanceOf, InitialPayment), TransactionValidityError> { let fee = pallet_transaction_payment::Pallet::::compute_fee(len as u32, info, self.tip); @@ -211,7 +211,7 @@ impl sp_std::fmt::Debug for ChargeAssetTxPayment { impl SignedExtension for ChargeAssetTxPayment where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, AssetBalanceOf: Send + Sync + FixedPointOperand, BalanceOf: Send + Sync + From + FixedPointOperand + IsType>, ChargeAssetIdOf: Send + Sync, @@ -219,7 +219,7 @@ where { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = (); type Pre = ( // tip diff --git a/frame/transaction-payment/asset-tx-payment/src/payment.rs b/frame/transaction-payment/asset-tx-payment/src/payment.rs index 394696cc18929..80ff4e40dcffa 100644 --- a/frame/transaction-payment/asset-tx-payment/src/payment.rs +++ b/frame/transaction-payment/asset-tx-payment/src/payment.rs @@ -54,8 +54,8 @@ pub trait OnChargeAssetTransaction { /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - call: &T::Call, - dispatch_info: &DispatchInfoOf, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, asset_id: Self::AssetId, fee: Self::Balance, tip: Self::Balance, @@ -68,8 +68,8 @@ pub trait OnChargeAssetTransaction { /// Note: The `fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - dispatch_info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, @@ -114,8 +114,8 @@ where /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - _call: &T::Call, - _info: &DispatchInfoOf, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, asset_id: Self::AssetId, fee: Self::Balance, _tip: Self::Balance, @@ -142,8 +142,8 @@ where /// Note: The `corrected_fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - _dispatch_info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, _tip: Self::Balance, paid: Self::LiquidityInfo, diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index dd7c086a82469..dfce28fda43d2 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -54,8 +54,8 @@ frame_support::construct_runtime!( } ); -const CALL: &::Call = - &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); +const CALL: &::RuntimeCall = + &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); parameter_types! { static ExtrinsicBaseWeight: Weight = Weight::zero(); @@ -89,13 +89,13 @@ impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -114,7 +114,7 @@ parameter_types! { impl pallet_balances::Config for Runtime { type Balance = Balance; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<10>; type AccountStore = System; @@ -143,7 +143,7 @@ impl WeightToFeeT for TransactionByteFee { } impl pallet_transaction_payment::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type WeightToFee = WeightToFee; type LengthToFee = TransactionByteFee; @@ -152,7 +152,7 @@ impl pallet_transaction_payment::Config for Runtime { } impl pallet_assets::Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = u32; type Currency = Balances; @@ -198,7 +198,7 @@ impl HandleCredit for CreditToBlockAuthor { } impl Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = FungiblesAdapter< pallet_assets::BalanceToAssetBalance, diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index b3c53cc048bad..5f1f8321456be 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -282,7 +282,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Handler for withdrawing, refunding and depositing the transaction fee. /// Transaction fees are withdrawn before the transaction is executed. @@ -440,7 +440,7 @@ where len: u32, ) -> RuntimeDispatchInfo> where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in @@ -467,7 +467,7 @@ where len: u32, ) -> FeeDetails> where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); @@ -482,11 +482,11 @@ where } /// Query information of a dispatch class, weight, and fee of a given encoded `Call`. - pub fn query_call_info(call: T::Call, len: u32) -> RuntimeDispatchInfo> + pub fn query_call_info(call: T::RuntimeCall, len: u32) -> RuntimeDispatchInfo> where - T::Call: Dispatchable + GetDispatchInfo, + T::RuntimeCall: Dispatchable + GetDispatchInfo, { - let dispatch_info = ::get_dispatch_info(&call); + let dispatch_info = ::get_dispatch_info(&call); let DispatchInfo { weight, class, .. } = dispatch_info; RuntimeDispatchInfo { @@ -497,20 +497,24 @@ where } /// Query fee details of a given encoded `Call`. - pub fn query_call_fee_details(call: T::Call, len: u32) -> FeeDetails> + pub fn query_call_fee_details(call: T::RuntimeCall, len: u32) -> FeeDetails> where - T::Call: Dispatchable + GetDispatchInfo, + T::RuntimeCall: Dispatchable + GetDispatchInfo, { - let dispatch_info = ::get_dispatch_info(&call); + let dispatch_info = ::get_dispatch_info(&call); let tip = 0u32.into(); Self::compute_fee_details(len, &dispatch_info, tip) } /// Compute the final fee value for a particular transaction. - pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf + pub fn compute_fee( + len: u32, + info: &DispatchInfoOf, + tip: BalanceOf, + ) -> BalanceOf where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { Self::compute_fee_details(len, info, tip).final_fee() } @@ -518,11 +522,11 @@ where /// Compute the fee details for a particular transaction. pub fn compute_fee_details( len: u32, - info: &DispatchInfoOf, + info: &DispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) } @@ -533,12 +537,12 @@ where /// weight is used for the weight fee calculation. pub fn compute_actual_fee( len: u32, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> BalanceOf where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() } @@ -546,12 +550,12 @@ where /// Compute the actual post dispatch fee details for a particular transaction. pub fn compute_actual_fee_details( len: u32, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, tip: BalanceOf, ) -> FeeDetails> where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { Self::compute_fee_raw( len, @@ -632,7 +636,7 @@ pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); impl ChargeTransactionPayment where - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { /// utility constructor. Used only in client/factory code. @@ -648,8 +652,8 @@ where fn withdraw_fee( &self, who: &T::AccountId, - call: &T::Call, - info: &DispatchInfoOf, + call: &T::RuntimeCall, + info: &DispatchInfoOf, len: usize, ) -> Result< ( @@ -681,7 +685,7 @@ where /// state of-the-art blockchains, number of per-block transactions is expected to be in a /// range reasonable enough to not saturate the `Balance` type while multiplying by the tip. pub fn get_priority( - info: &DispatchInfoOf, + info: &DispatchInfoOf, len: usize, tip: BalanceOf, final_fee: BalanceOf, @@ -755,11 +759,11 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { impl SignedExtension for ChargeTransactionPayment where BalanceOf: Send + Sync + From + FixedPointOperand, - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type AccountId = T::AccountId; - type Call = T::Call; + type Call = T::RuntimeCall; type AdditionalSigned = (); type Pre = ( // tip @@ -821,7 +825,7 @@ impl EstimateCallFee where BalanceOf: FixedPointOperand, - T::Call: Dispatchable, + T::RuntimeCall: Dispatchable, { fn estimate_call_fee(call: &AnyCall, post_info: PostDispatchInfo) -> BalanceOf { let len = call.encoded_size() as u32; @@ -870,8 +874,8 @@ mod tests { } ); - const CALL: &::Call = - &Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + const CALL: &::RuntimeCall = + &RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); parameter_types! { static ExtrinsicBaseWeight: Weight = Weight::zero(); @@ -906,13 +910,13 @@ mod tests { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -927,7 +931,7 @@ mod tests { impl pallet_balances::Config for Runtime { type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -975,7 +979,7 @@ mod tests { } impl Config for Runtime { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnChargeTransaction = CurrencyAdapter; type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; @@ -1238,7 +1242,7 @@ mod tests { #[test] fn query_info_and_fee_details_works() { - let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); let origin = 111111; let extra = (); let xt = TestXt::new(call.clone(), Some((origin, extra))); @@ -1301,7 +1305,7 @@ mod tests { #[test] fn query_call_info_and_fee_details_works() { - let call = Call::Balances(BalancesCall::transfer { dest: 2, value: 69 }); + let call = RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 69 }); let info = call.get_dispatch_info(); let encoded_call = call.encode(); let len = encoded_call.len() as u32; @@ -1495,13 +1499,11 @@ mod tests { )); assert_eq!(Balances::free_balance(2), 0); // Transfer Event - System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer { - from: 2, - to: 3, - amount: 80, - })); + System::assert_has_event(RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from: 2, to: 3, amount: 80 }, + )); // Killed Event - System::assert_has_event(Event::System(system::Event::KilledAccount { + System::assert_has_event(RuntimeEvent::System(system::Event::KilledAccount { account: 2, })); }); @@ -1560,7 +1562,7 @@ mod tests { )); assert_eq!(Balances::total_balance(&user), 0); // TransactionFeePaid Event - System::assert_has_event(Event::TransactionPayment( + System::assert_has_event(RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: user, actual_fee: 0, diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index 3a5fad0d66a52..ebc9c5c5afd62 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -37,8 +37,8 @@ pub trait OnChargeTransaction { /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - call: &T::Call, - dispatch_info: &DispatchInfoOf, + call: &T::RuntimeCall, + dispatch_info: &DispatchInfoOf, fee: Self::Balance, tip: Self::Balance, ) -> Result; @@ -50,8 +50,8 @@ pub trait OnChargeTransaction { /// Note: The `fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - dispatch_info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, + dispatch_info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, @@ -92,8 +92,8 @@ where /// Note: The `fee` already includes the `tip`. fn withdraw_fee( who: &T::AccountId, - _call: &T::Call, - _info: &DispatchInfoOf, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, fee: Self::Balance, tip: Self::Balance, ) -> Result { @@ -120,8 +120,8 @@ where /// Note: The `corrected_fee` already includes the `tip`. fn correct_and_deposit_fee( who: &T::AccountId, - _dispatch_info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, + _dispatch_info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, corrected_fee: Self::Balance, tip: Self::Balance, already_withdrawn: Self::LiquidityInfo, diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index 83dd37922a31f..cab4c92317bb5 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -105,9 +105,9 @@ const PROOF: &[u8] = &hex_literal::hex!( type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { let events = System::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::RuntimeEvent = generic_event.into(); let EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); } diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 681cd29af8222..e623079ac863b 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -92,9 +92,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A dispatchable call. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From>; diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 771387ef705be..1cb725e63e83b 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -52,7 +52,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -60,7 +60,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -77,7 +77,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -87,8 +87,8 @@ impl pallet_balances::Config for Test { } impl pallet_transaction_storage::Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type Currency = Balances; type FeeDestination = (); type WeightInfo = (); diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index b2b670d86f07c..b3787e2063c45 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -61,7 +61,7 @@ fn setup_pot_account, I: 'static>() { let _ = T::Currency::make_free_balance_be(&pot_account, value); } -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index cd4d9cf036abe..bebbcda2effb6 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -155,7 +155,8 @@ pub mod pallet { type RejectOrigin: EnsureOrigin; /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Handler for the unbalanced decrease when slashing for a rejected proposal or bounty. type OnSlash: OnUnbalanced>; diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index c9c6d01f129e6..af77b419d67d2 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -63,13 +63,13 @@ impl frame_system::Config for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u128; // u64 is not enough to hold bytes used to generate bounty account type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -86,7 +86,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -121,7 +121,7 @@ impl Config for Test { type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type OnSlash = (); type ProposalBond = ProposalBond; type ProposalBondMinimum = ConstU64<1>; diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 3e3148b5b5fc2..ab34558f95eb3 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -126,9 +126,9 @@ fn add_item_attribute, I: 'static>( (key, caller, caller_lookup) } -fn assert_last_event, I: 'static>(generic_event: >::Event) { +fn assert_last_event, I: 'static>(generic_event: >::RuntimeEvent) { let events = frame_system::Pallet::::events(); - let system_event: ::Event = generic_event.into(); + let system_event: ::RuntimeEvent = generic_event.into(); // compare to the last event record let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; assert_eq!(event, &system_event); diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index bbecb08e51b3b..e25607e67a224 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -91,7 +91,8 @@ pub mod pallet { /// The module configuration trait. pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; /// Identifier for the collection of item. type CollectionId: Member + Parameter + MaxEncodedLen + Copy; diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index ff7b791de4950..6808c48007a92 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -50,7 +50,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type Origin = Origin; - type Call = Call; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type DbWeight = (); type Version = (); @@ -75,7 +75,7 @@ impl frame_system::Config for Test { impl pallet_balances::Config for Test { type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -85,7 +85,7 @@ impl pallet_balances::Config for Test { } impl Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type CollectionId = u32; type ItemId = u32; type Currency = Balances; diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 85d1bec574cf0..5ac677758316a 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -74,7 +74,7 @@ fn events() -> Vec> { let result = System::events() .into_iter() .map(|r| r.event) - .filter_map(|e| if let mock::Event::Uniques(inner) = e { Some(inner) } else { None }) + .filter_map(|e| if let mock::RuntimeEvent::Uniques(inner) = e { Some(inner) } else { None }) .collect::>(); System::reset_events(); @@ -887,7 +887,7 @@ fn buy_item_should_work() { // freeze collection assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); - let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, @@ -899,7 +899,7 @@ fn buy_item_should_work() { // freeze item assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); - let buy_item_call = mock::Call::Uniques(crate::Call::::buy_item { + let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 018280f69baeb..c29eded25aa5b 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -25,7 +25,7 @@ use frame_system::RawOrigin; const SEED: u32 = 0; -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event(generic_event: ::RuntimeEvent) { frame_system::Pallet::::assert_last_event(generic_event.into()); } @@ -33,7 +33,7 @@ benchmarks! { where_clause { where ::PalletsOrigin: Clone } batch { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::RuntimeCall> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); @@ -54,7 +54,7 @@ benchmarks! { batch_all { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::RuntimeCall> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); @@ -75,7 +75,7 @@ benchmarks! { force_batch { let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); + let mut calls: Vec<::RuntimeCall> = Vec::new(); for i in 0 .. c { let call = frame_system::Call::remark { remark: vec![] }.into(); calls.push(call); diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 2209bc65d4958..b5bd686b59f00 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -84,16 +84,16 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From + IsType<::Event>; + type RuntimeEvent: From + IsType<::RuntimeEvent>; /// The overarching call type. - type Call: Parameter + type RuntimeCall: Parameter + Dispatchable + GetDispatchInfo + From> + UnfilteredDispatchable + IsSubType> - + IsType<::Call>; + + IsType<::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + @@ -133,8 +133,9 @@ pub mod pallet { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; - let call_size = ((sp_std::mem::size_of::<::Call>() as u32 + CALL_ALIGN - - 1) / CALL_ALIGN) * CALL_ALIGN; + let call_size = ((sp_std::mem::size_of::<::RuntimeCall>() as u32 + + CALL_ALIGN - 1) / CALL_ALIGN) * + CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; @@ -147,7 +148,7 @@ pub mod pallet { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( - sp_std::mem::size_of::<::Call>() as u32 <= CALL_ALIGN, + sp_std::mem::size_of::<::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); @@ -201,7 +202,7 @@ pub mod pallet { })] pub fn batch( origin: OriginFor, - calls: Vec<::Call>, + calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); @@ -262,7 +263,7 @@ pub mod pallet { pub fn as_derivative( origin: OriginFor, index: u16, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; @@ -317,7 +318,7 @@ pub mod pallet { })] pub fn batch_all( origin: OriginFor, - calls: Vec<::Call>, + calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); @@ -333,10 +334,12 @@ pub mod pallet { } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. - filtered_origin.add_filter(move |c: &::Call| { - let c = ::Call::from_ref(c); - !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) - }); + filtered_origin.add_filter( + move |c: &::RuntimeCall| { + let c = ::RuntimeCall::from_ref(c); + !matches!(c.is_sub_type(), Some(Call::batch_all { .. })) + }, + ); call.dispatch(filtered_origin) }; // Add the weight of this call. @@ -376,7 +379,7 @@ pub mod pallet { pub fn dispatch_as( origin: OriginFor, as_origin: Box, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResult { ensure_root(origin)?; @@ -422,7 +425,7 @@ pub mod pallet { })] pub fn force_batch( origin: OriginFor, - calls: Vec<::Call>, + calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 09a2c6a4e4d38..1985cc3022114 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -110,12 +110,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -134,7 +134,7 @@ impl pallet_balances::Config for Test { type ReserveIdentifier = [u8; 8]; type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ConstU64<1>; type AccountStore = System; type WeightInfo = (); @@ -148,23 +148,23 @@ parameter_types! { impl example::Config for Test {} pub struct TestBaseCallFilter; -impl Contains for TestBaseCallFilter { - fn contains(c: &Call) -> bool { +impl Contains for TestBaseCallFilter { + fn contains(c: &RuntimeCall) -> bool { match *c { // Transfer works. Use `transfer_keep_alive` for a call that doesn't pass the filter. - Call::Balances(pallet_balances::Call::transfer { .. }) => true, - Call::Utility(_) => true, + RuntimeCall::Balances(pallet_balances::Call::transfer { .. }) => true, + RuntimeCall::Utility(_) => true, // For benchmarking, this acts as a noop call - Call::System(frame_system::Call::remark { .. }) => true, + RuntimeCall::System(frame_system::Call::remark { .. }) => true, // For tests - Call::Example(_) => true, + RuntimeCall::Example(_) => true, _ => false, } } } impl Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type PalletsOrigin = OriginCaller; type WeightInfo = (); } @@ -187,12 +187,12 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -fn call_transfer(dest: u64, value: u64) -> Call { - Call::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> RuntimeCall { + RuntimeCall::Balances(BalancesCall::transfer { dest, value }) } -fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> Call { - Call::Example(ExampleCall::foobar { err, start_weight, end_weight }) +fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> RuntimeCall { + RuntimeCall::Example(ExampleCall::foobar { err, start_weight, end_weight }) } #[test] @@ -219,8 +219,10 @@ fn as_derivative_handles_weight_refund() { // Full weight when ok let inner_call = call_foobar(false, start_weight, None); - let call = - Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -228,8 +230,10 @@ fn as_derivative_handles_weight_refund() { // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); - let call = - Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -238,8 +242,10 @@ fn as_derivative_handles_weight_refund() { // Full weight when err let inner_call = call_foobar(true, start_weight, None); - let call = - Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_noop!( @@ -256,8 +262,10 @@ fn as_derivative_handles_weight_refund() { // Refund weight when err let inner_call = call_foobar(true, start_weight, Some(end_weight)); - let call = - Call::Utility(UtilityCall::as_derivative { index: 0, call: Box::new(inner_call) }); + let call = RuntimeCall::Utility(UtilityCall::as_derivative { + index: 0, + call: Box::new(inner_call), + }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_noop!( @@ -281,7 +289,7 @@ fn as_derivative_filters() { Utility::as_derivative( Origin::signed(1), 1, - Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive { + Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })), @@ -295,16 +303,25 @@ fn as_derivative_filters() { fn batch_with_root_works() { new_test_ext().execute_with(|| { let k = b"a".to_vec(); - let call = - Call::System(frame_system::Call::set_storage { items: vec![(k.clone(), k.clone())] }); + let call = RuntimeCall::System(frame_system::Call::set_storage { + items: vec![(k.clone(), k.clone())], + }); assert!(!TestBaseCallFilter::contains(&call)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( Origin::root(), vec![ - Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), - Call::Balances(BalancesCall::force_transfer { source: 1, dest: 2, value: 5 }), + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 5 + }), + RuntimeCall::Balances(BalancesCall::force_transfer { + source: 1, + dest: 2, + value: 5 + }), call, // Check filters are correctly bypassed ] )); @@ -333,7 +350,10 @@ fn batch_with_signed_filters() { new_test_ext().execute_with(|| { assert_ok!(Utility::batch( Origin::signed(1), - vec![Call::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 })] + vec![RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { + dest: 2, + value: 1 + })] ),); System::assert_last_event( utility::Event::BatchInterrupted { @@ -363,11 +383,12 @@ fn batch_early_exit_works() { fn batch_weight_calculation_doesnt_overflow() { use sp_runtime::Perbill; new_test_ext().execute_with(|| { - let big_call = Call::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); + let big_call = + RuntimeCall::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); // 3 * 50% saturates to 100% - let batch_call = Call::Utility(crate::Call::batch { + let batch_call = RuntimeCall::Utility(crate::Call::batch { calls: vec![big_call.clone(), big_call.clone(), big_call.clone()], }); @@ -386,7 +407,7 @@ fn batch_handles_weight_refund() { // Full weight when ok let inner_call = call_foobar(false, start_weight, None); let batch_calls = vec![inner_call; batch_len as usize]; - let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -395,7 +416,7 @@ fn batch_handles_weight_refund() { // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); let batch_calls = vec![inner_call; batch_len as usize]; - let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -406,7 +427,7 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, None); let bad_call = call_foobar(true, start_weight, None); let batch_calls = vec![good_call, bad_call]; - let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -421,7 +442,7 @@ fn batch_handles_weight_refund() { let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; let batch_len = batch_calls.len() as u64; - let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -434,7 +455,7 @@ fn batch_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call.clone(), bad_call]; - let call = Call::Utility(UtilityCall::batch { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -471,7 +492,7 @@ fn batch_all_revert() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - let batch_all_calls = Call::Utility(crate::Call::::batch_all { + let batch_all_calls = RuntimeCall::Utility(crate::Call::::batch_all { calls: vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5)], }); assert_noop!( @@ -502,7 +523,7 @@ fn batch_all_handles_weight_refund() { // Full weight when ok let inner_call = call_foobar(false, start_weight, None); let batch_calls = vec![inner_call; batch_len as usize]; - let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -511,7 +532,7 @@ fn batch_all_handles_weight_refund() { // Refund weight when ok let inner_call = call_foobar(false, start_weight, Some(end_weight)); let batch_calls = vec![inner_call; batch_len as usize]; - let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); @@ -522,7 +543,7 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, None); let bad_call = call_foobar(true, start_weight, None); let batch_calls = vec![good_call, bad_call]; - let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); @@ -534,7 +555,7 @@ fn batch_all_handles_weight_refund() { let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call]; let batch_len = batch_calls.len() as u64; - let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); @@ -544,7 +565,7 @@ fn batch_all_handles_weight_refund() { let good_call = call_foobar(false, start_weight, Some(end_weight)); let bad_call = call_foobar(true, start_weight, Some(end_weight)); let batch_calls = vec![good_call, bad_call.clone(), bad_call]; - let call = Call::Utility(UtilityCall::batch_all { calls: batch_calls }); + let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); @@ -559,7 +580,7 @@ fn batch_all_handles_weight_refund() { #[test] fn batch_all_does_not_nest() { new_test_ext().execute_with(|| { - let batch_all = Call::Utility(UtilityCall::batch_all { + let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { calls: vec![call_transfer(2, 1), call_transfer(2, 1), call_transfer(2, 1)], }); @@ -582,7 +603,7 @@ fn batch_all_does_not_nest() { // And for those who want to get a little fancy, we check that the filter persists across // other kinds of dispatch wrapping functions... in this case // `batch_all(batch(batch_all(..)))` - let batch_nested = Call::Utility(UtilityCall::batch { calls: vec![batch_all] }); + let batch_nested = RuntimeCall::Utility(UtilityCall::batch { calls: vec![batch_all] }); // Batch will end with `Ok`, but does not actually execute as we can see from the event // and balances. assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); @@ -601,7 +622,7 @@ fn batch_all_does_not_nest() { #[test] fn batch_limit() { new_test_ext().execute_with(|| { - let calls = vec![Call::System(SystemCall::remark { remark: vec![] }); 40_000]; + let calls = vec![RuntimeCall::System(SystemCall::remark { remark: vec![] }); 40_000]; assert_noop!(Utility::batch(Origin::signed(1), calls.clone()), Error::::TooManyCalls); assert_noop!(Utility::batch_all(Origin::signed(1), calls), Error::::TooManyCalls); }); diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 8ac625e775e4f..1ca8d41f9a41c 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -155,7 +155,7 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The currency trait. type Currency: LockableCurrency; diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index 8875404f03fa2..cc4a3523459d7 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -55,9 +55,9 @@ impl frame_system::Config for Test { type BlockLength = (); type BlockNumber = u64; type BlockWeights = (); - type Call = Call; + type RuntimeCall = RuntimeCall; type DbWeight = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; @@ -78,7 +78,7 @@ impl pallet_balances::Config for Test { type AccountStore = System; type Balance = u64; type DustRemoval = (); - type Event = Event; + type RuntimeEvent = RuntimeEvent; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = ConstU32<10>; type MaxReserves = (); @@ -92,7 +92,7 @@ parameter_types! { impl Config for Test { type BlockNumberToBalance = Identity; type Currency = Balances; - type Event = Event; + type RuntimeEvent = RuntimeEvent; const MAX_VESTING_SCHEDULES: u32 = 3; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); diff --git a/frame/whitelist/src/benchmarking.rs b/frame/whitelist/src/benchmarking.rs index cafd1668819dd..9e55d38fd8140 100644 --- a/frame/whitelist/src/benchmarking.rs +++ b/frame/whitelist/src/benchmarking.rs @@ -71,7 +71,7 @@ benchmarks! { let remark_len = >::MaxSize::get() - 10; let remark = sp_std::vec![1u8; remark_len as usize]; - let call: ::Call = frame_system::Call::remark { remark }.into(); + let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_hash = T::Hashing::hash(&encoded_call[..]); @@ -100,7 +100,7 @@ benchmarks! { let origin = T::DispatchWhitelistedOrigin::successful_origin(); let remark = sp_std::vec![1u8; n as usize]; - let call: ::Call = frame_system::Call::remark { remark }.into(); + let call: ::RuntimeCall = frame_system::Call::remark { remark }.into(); let call_hash = T::Hashing::hash_of(&call); Pallet::::whitelist_call(origin.clone(), call_hash) diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index a3e02b6caf740..19e53f2491614 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -61,10 +61,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. - type Call: IsType<::Call> + type RuntimeCall: IsType<::RuntimeCall> + Dispatchable + GetDispatchInfo + FullCodec @@ -165,7 +165,7 @@ pub mod pallet { let call = T::PreimageProvider::get_preimage(&call_hash) .ok_or(Error::::UnavailablePreImage)?; - let call = ::Call::decode_all_with_depth_limit( + let call = ::RuntimeCall::decode_all_with_depth_limit( sp_api::MAX_EXTRINSIC_DEPTH, &mut &call[..], ) @@ -191,7 +191,7 @@ pub mod pallet { })] pub fn dispatch_whitelisted_call_with_preimage( origin: OriginFor, - call: Box<::Call>, + call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { T::DispatchWhitelistedOrigin::ensure_origin(origin)?; @@ -216,7 +216,7 @@ impl Pallet { /// Clean whitelisting/preimage and dispatch call. /// /// Return the call actual weight of the dispatched call if there is some. - fn clean_and_dispatch(call_hash: T::Hash, call: ::Call) -> Option { + fn clean_and_dispatch(call_hash: T::Hash, call: ::RuntimeCall) -> Option { WhitelistedCall::::remove(call_hash); T::PreimageProvider::unrequest_preimage(&call_hash); diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index b18236099d445..b6eac316bc113 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -62,12 +62,12 @@ impl frame_system::Config for Test { type Index = u64; type BlockNumber = u64; type Hash = H256; - type Call = Call; + type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<250>; type Version = (); type PalletInfo = PalletInfo; @@ -85,7 +85,7 @@ impl pallet_balances::Config for Test { type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u64; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type DustRemoval = (); type ExistentialDeposit = ConstU64<1>; type AccountStore = System; @@ -93,7 +93,7 @@ impl pallet_balances::Config for Test { } impl pallet_preimage::Config for Test { - type Event = Event; + type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; type MaxSize = ConstU32<{ 4096 * 1024 }>; // PreimageMaxSize Taken from Polkadot as reference. @@ -103,8 +103,8 @@ impl pallet_preimage::Config for Test { } impl pallet_whitelist::Config for Test { - type Event = Event; - type Call = Call; + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; type WhitelistOrigin = EnsureRoot; type DispatchWhitelistedOrigin = EnsureRoot; type PreimageProvider = Preimage; diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index 24974bd726d43..ebb111149fd42 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -27,7 +27,7 @@ use sp_runtime::{traits::Hash, DispatchError}; #[test] fn test_whitelist_call_and_remove() { new_test_ext().execute_with(|| { - let call = Call::System(frame_system::Call::remark { remark: vec![] }); + let call = RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -69,7 +69,7 @@ fn test_whitelist_call_and_remove() { #[test] fn test_whitelist_call_and_execute() { new_test_ext().execute_with(|| { - let call = Call::System(frame_system::Call::remark_with_event { remark: vec![1] }); + let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); let call_weight = call.get_dispatch_info().weight; let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); @@ -118,7 +118,7 @@ fn test_whitelist_call_and_execute() { #[test] fn test_whitelist_call_and_execute_failing_call() { new_test_ext().execute_with(|| { - let call = Call::Whitelist(crate::Call::dispatch_whitelisted_call { + let call = RuntimeCall::Whitelist(crate::Call::dispatch_whitelisted_call { call_hash: Default::default(), call_weight_witness: Weight::zero(), }); @@ -137,8 +137,9 @@ fn test_whitelist_call_and_execute_failing_call() { #[test] fn test_whitelist_call_and_execute_without_note_preimage() { new_test_ext().execute_with(|| { - let call = - Box::new(Call::System(frame_system::Call::remark_with_event { remark: vec![1] })); + let call = Box::new(RuntimeCall::System(frame_system::Call::remark_with_event { + remark: vec![1], + })); let call_hash = ::Hashing::hash_of(&call); assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); @@ -161,7 +162,7 @@ fn test_whitelist_call_and_execute_without_note_preimage() { #[test] fn test_whitelist_call_and_execute_decode_consumes_all() { new_test_ext().execute_with(|| { - let call = Call::System(frame_system::Call::remark_with_event { remark: vec![1] }); + let call = RuntimeCall::System(frame_system::Call::remark_with_event { remark: vec![1] }); let call_weight = call.get_dispatch_info().weight; let mut call = call.encode(); // Appending something does not make the encoded call invalid. diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bb526010dddd5..4ed159fd7e2ed 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -455,7 +455,7 @@ impl From for Result, Origin> { } impl frame_support::traits::OriginTrait for Origin { - type Call = ::Call; + type Call = ::RuntimeCall; type PalletsOrigin = Origin; type AccountId = ::AccountId; @@ -501,9 +501,9 @@ impl frame_support::traits::OriginTrait for Origin { } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct Event; +pub struct RuntimeEvent; -impl From> for Event { +impl From> for RuntimeEvent { fn from(_evt: frame_system::Event) -> Self { unimplemented!("Not required in tests!") } @@ -585,7 +585,7 @@ impl frame_system::Config for Runtime { type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type Origin = Origin; - type Call = Extrinsic; + type RuntimeCall = Extrinsic; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -593,7 +593,7 @@ impl frame_system::Config for Runtime { type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; + type RuntimeEvent = RuntimeEvent; type BlockHashCount = ConstU64<2400>; type DbWeight = (); type Version = (); From 379813cafc7aa60deeda79ceccdcc31d83d47876 Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Mon, 12 Sep 2022 18:01:05 -0600 Subject: [PATCH 123/348] Update README (#12247) * Update README - rm Matrix chat banner reads as `inaccessible` and the channel is read only (not chat) - add SE banner - update links & call to action to SE * Update README.md * Update README.md * Update README.md --- README.md | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index c609641af7ce2..d7e6650290fba 100644 --- a/README.md +++ b/README.md @@ -1,20 +1,24 @@ -# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) [![Matrix](https://img.shields.io/matrix/substrate-technical:matrix.org)](https://matrix.to/#/#substrate-technical:matrix.org) - +# Substrate · [![GitHub license](https://img.shields.io/badge/license-GPL3%2FApache2-blue)](#LICENSE) [![GitLab Status](https://gitlab.parity.io/parity/substrate/badges/master/pipeline.svg)](https://gitlab.parity.io/parity/substrate/pipelines) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](docs/CONTRIBUTING.adoc) [![Stack Exchange](https://img.shields.io/badge/Substrate-Community%20&%20Support-24CC85?logo=stackexchange)](https://substrate.stackexchange.com/)

Substrate is a next-generation framework for blockchain innovation 🚀. -## Trying it out +## Getting Started + +Head to [docs.substrate.io](https://docs.substrate.io) and follow the [installation](https://docs.substrate.io/install/) instructions. +Then try out one of the [tutorials](https://docs.substrate.io/tutorials/). + +## Community & Support -Simply go to [docs.substrate.io](https://docs.substrate.io) and follow the -[installation](https://docs.substrate.io/main-docs/install/) instructions. You can -also try out one of the [tutorials](https://docs.substrate.io/tutorials/). +Join the highly active and supportive community on the [Susbstrate Stack Exchange](https://substrate.stackexchange.com/) to ask questions about use and problems you run into using this software. +Please do report bugs and [isssues here](https://github.com/paritytech/substrate/issues) for anything you suspect requires action in the source. ## Contributions & Code of Conduct -Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.adoc`](docs/CONTRIBUTING.adoc). In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](docs/CODE_OF_CONDUCT.md). +Please follow the contributions guidelines as outlined in [`docs/CONTRIBUTING.adoc`](docs/CONTRIBUTING.adoc). +In all communications and contributions, this project follows the [Contributor Covenant Code of Conduct](docs/CODE_OF_CONDUCT.md). ## Security From 5daa20c69049bcfa6df290124e5b6a9da88b71c3 Mon Sep 17 00:00:00 2001 From: girazoki Date: Tue, 13 Sep 2022 02:12:09 +0200 Subject: [PATCH 124/348] Add inspect trait for asset roles (#11738) Co-authored-by: Shawn Tabrizi --- frame/assets/src/impl_fungibles.rs | 20 +++++++++++++ frame/assets/src/tests.rs | 22 ++++++++++++++ frame/support/src/traits/tokens/fungibles.rs | 1 + .../src/traits/tokens/fungibles/roles.rs | 29 +++++++++++++++++++ 4 files changed, 72 insertions(+) create mode 100644 frame/support/src/traits/tokens/fungibles/roles.rs diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 6b263bc0c7bef..842ee5c102c1d 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -263,3 +263,23 @@ impl, I: 'static> fungibles::approvals::Mutate<: Self::do_transfer_approved(asset, owner, delegate, dest, amount) } } + +impl, I: 'static> fungibles::roles::Inspect<::AccountId> + for Pallet +{ + fn owner(asset: T::AssetId) -> Option<::AccountId> { + Asset::::get(asset).map(|x| x.owner) + } + + fn issuer(asset: T::AssetId) -> Option<::AccountId> { + Asset::::get(asset).map(|x| x.issuer) + } + + fn admin(asset: T::AssetId) -> Option<::AccountId> { + Asset::::get(asset).map(|x| x.admin) + } + + fn freezer(asset: T::AssetId) -> Option<::AccountId> { + Asset::::get(asset).map(|x| x.freezer) + } +} diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 50ab04111edff..598b6049b3d57 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -977,3 +977,25 @@ fn transfer_large_asset() { assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, amount - 1)); }) } + +#[test] +fn querying_roles_should_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::tokens::fungibles::roles::Inspect; + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::set_team( + Origin::signed(1), + 0, + // Issuer + 2, + // Admin + 3, + // Freezer + 4, + )); + assert_eq!(Assets::owner(0), Some(1)); + assert_eq!(Assets::issuer(0), Some(2)); + assert_eq!(Assets::admin(0), Some(3)); + assert_eq!(Assets::freezer(0), Some(4)); + }); +} diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index dab50d56962f6..e4108b7f80a98 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -31,6 +31,7 @@ pub mod metadata; pub use balanced::{Balanced, Unbalanced}; mod imbalance; pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; +pub mod roles; /// Trait for providing balance-inspection access to a set of named fungible assets. pub trait Inspect { diff --git a/frame/support/src/traits/tokens/fungibles/roles.rs b/frame/support/src/traits/tokens/fungibles/roles.rs new file mode 100644 index 0000000000000..18fd1cc801210 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/roles.rs @@ -0,0 +1,29 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Inspect traits for Asset roles + +pub trait Inspect: super::Inspect { + // Get owner for an AssetId. + fn owner(asset: Self::AssetId) -> Option; + // Get issuer for an AssetId. + fn issuer(asset: Self::AssetId) -> Option; + // Get admin for an AssetId. + fn admin(asset: Self::AssetId) -> Option; + // Get freezer for an AssetId. + fn freezer(asset: Self::AssetId) -> Option; +} From c1103fd803a974ee4d4098eb631a8839f6cb8618 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Tue, 13 Sep 2022 11:02:31 +0300 Subject: [PATCH 125/348] Move bitswap to its own crate (#12242) * Introduce `sc-network-bitswap` Move the bitswap protocol out of `sc-network` to its own crate. * Improve test coverage * Remove mention of bitswap from `sc-network` * Fix documentation * Fix clippy and remove the void dependency * Remove unneeded trait bound and bump prost to 0.11 --- Cargo.lock | 152 +++-- Cargo.toml | 1 + client/network/Cargo.toml | 4 - client/network/bitswap/Cargo.toml | 40 ++ client/network/{ => bitswap}/build.rs | 2 +- client/network/bitswap/src/lib.rs | 524 ++++++++++++++++++ client/network/{ => bitswap}/src/schema.rs | 2 +- .../src/schema/bitswap.v1.2.0.proto | 0 client/network/src/behaviour.rs | 18 +- client/network/src/bitswap.rs | 416 -------------- client/network/src/config.rs | 8 +- client/network/src/lib.rs | 2 - client/network/src/service.rs | 11 +- client/network/src/service/tests.rs | 2 +- client/network/test/src/lib.rs | 2 +- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 17 +- 17 files changed, 708 insertions(+), 494 deletions(-) create mode 100644 client/network/bitswap/Cargo.toml rename client/network/{ => bitswap}/build.rs (56%) create mode 100644 client/network/bitswap/src/lib.rs rename client/network/{ => bitswap}/src/schema.rs (97%) rename client/network/{ => bitswap}/src/schema/bitswap.v1.2.0.proto (100%) delete mode 100644 client/network/src/bitswap.rs diff --git a/Cargo.lock b/Cargo.lock index 335e8b3526792..08915196d5374 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -927,9 +927,9 @@ dependencies = [ [[package]] name = "cid" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a52cffa791ce5cf490ac3b2d6df970dc04f931b04e727be3c3e220e17164dfc4" +checksum = "f6ed9c8b2d17acb8110c46f1da5bf4a696d745e1474a16db0cd2b49cd0249bf2" dependencies = [ "core2", "multibase", @@ -3591,8 +3591,8 @@ dependencies = [ "libp2p-request-response", "libp2p-swarm", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.8.5", ] @@ -3618,8 +3618,8 @@ dependencies = [ "multistream-select", "parking_lot 0.12.1", "pin-project", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.8.5", "ring", "rw-stream-sink", @@ -3669,8 +3669,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.7.3", "smallvec", ] @@ -3693,8 +3693,8 @@ dependencies = [ "libp2p-swarm", "log", "prometheus-client", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3716,8 +3716,8 @@ dependencies = [ "libp2p-swarm", "log", "lru", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "prost-codec", "smallvec", "thiserror", @@ -3741,8 +3741,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.7.3", "sha2 0.10.2", "smallvec", @@ -3819,8 +3819,8 @@ dependencies = [ "lazy_static", "libp2p-core", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3856,8 +3856,8 @@ dependencies = [ "futures", "libp2p-core", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "unsigned-varint", "void", ] @@ -3892,8 +3892,8 @@ dependencies = [ "libp2p-swarm", "log", "pin-project", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "prost-codec", "rand 0.8.5", "smallvec", @@ -3916,8 +3916,8 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "rand 0.8.5", "sha2 0.10.2", "thiserror", @@ -7061,7 +7061,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.10.1", +] + +[[package]] +name = "prost" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +dependencies = [ + "bytes", + "prost-derive 0.11.0", ] [[package]] @@ -7079,8 +7089,28 @@ dependencies = [ "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.10.3", + "prost-types 0.10.1", + "regex", + "tempfile", + "which", +] + +[[package]] +name = "prost-build" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +dependencies = [ + "bytes", + "heck", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost 0.11.0", + "prost-types 0.11.1", "regex", "tempfile", "which", @@ -7094,7 +7124,7 @@ checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" dependencies = [ "asynchronous-codec", "bytes", - "prost", + "prost 0.10.3", "thiserror", "unsigned-varint", ] @@ -7112,6 +7142,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.10.1" @@ -7119,7 +7162,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", - "prost", + "prost 0.10.3", +] + +[[package]] +name = "prost-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +dependencies = [ + "bytes", + "prost 0.11.0", ] [[package]] @@ -7728,8 +7781,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8384,8 +8437,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost", - "prost-build", + "prost 0.10.3", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8411,10 +8463,35 @@ dependencies = [ "tempfile", "thiserror", "unsigned-varint", - "void", "zeroize", ] +[[package]] +name = "sc-network-bitswap" +version = "0.10.0-dev" +dependencies = [ + "cid", + "futures", + "libp2p", + "log", + "prost 0.11.0", + "prost-build 0.11.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-network-common", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-runtime", + "substrate-test-runtime", + "substrate-test-runtime-client", + "thiserror", + "tokio", + "unsigned-varint", + "void", +] + [[package]] name = "sc-network-common" version = "0.10.0-dev" @@ -8425,7 +8502,7 @@ dependencies = [ "futures", "libp2p", "parity-scale-codec", - "prost-build", + "prost-build 0.10.4", "sc-consensus", "sc-peerset", "serde", @@ -8466,8 +8543,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8488,8 +8565,8 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "prost", - "prost-build", + "prost 0.10.3", + "prost-build 0.10.4", "quickcheck", "sc-block-builder", "sc-client-api", @@ -8712,6 +8789,7 @@ dependencies = [ "sc-informant", "sc-keystore", "sc-network", + "sc-network-bitswap", "sc-network-common", "sc-network-light", "sc-network-sync", diff --git a/Cargo.toml b/Cargo.toml index e2907716ca9f2..9c71dbdf4b52c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ members = [ "client/keystore", "client/network", "client/network-gossip", + "client/network/bitswap", "client/network/common", "client/network/light", "client/network/sync", diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 82f77fa44450f..ae115220d6843 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -13,9 +13,6 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -[build-dependencies] -prost-build = "0.10" - [dependencies] async-trait = "0.1" asynchronous-codec = "0.6" @@ -43,7 +40,6 @@ serde_json = "1.0.85" smallvec = "1.8.0" thiserror = "1.0" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } -void = "1.0.2" zeroize = "1.4.3" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/network/bitswap/Cargo.toml b/client/network/bitswap/Cargo.toml new file mode 100644 index 0000000000000..9fa34aab17dfb --- /dev/null +++ b/client/network/bitswap/Cargo.toml @@ -0,0 +1,40 @@ +[package] +description = "Substrate bitswap protocol" +name = "sc-network-bitswap" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2021" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-bitswap" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +prost-build = "0.11" + +[dependencies] +cid = "0.8.6" +futures = "0.3.21" +libp2p = "0.46.1" +log = "0.4.17" +prost = "0.11" +thiserror = "1.0" +unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +void = "1.0.2" +sc-client-api = { version = "4.0.0-dev", path = "../../api" } +sc-network-common = { version = "0.10.0-dev", path = "../common" } +sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } + +[dev-dependencies] +tokio = { version = "1", features = ["full"] } +sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } +sp-core = { version = "6.0.0", path = "../../../primitives/core" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/network/build.rs b/client/network/bitswap/build.rs similarity index 56% rename from client/network/build.rs rename to client/network/bitswap/build.rs index 671277230a774..bd6222d546851 100644 --- a/client/network/build.rs +++ b/client/network/bitswap/build.rs @@ -1,4 +1,4 @@ -const PROTOS: &[&str] = &["src/schema/bitswap.v1.2.0.proto"]; +const PROTOS: &[&str] = &["bitswap.v1.2.0.proto"]; fn main() { prost_build::compile_protos(PROTOS, &["src/schema"]).unwrap(); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs new file mode 100644 index 0000000000000..aba7f40ce632f --- /dev/null +++ b/client/network/bitswap/src/lib.rs @@ -0,0 +1,524 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Bitswap server for Substrate. +//! +//! Allows querying transactions by hash over standard bitswap protocol +//! Only supports bitswap 1.2.0. +//! CID is expected to reference 256-bit Blake2b transaction hash. + +use cid::{self, Version}; +use futures::{channel::mpsc, StreamExt}; +use libp2p::core::PeerId; +use log::{debug, error, trace}; +use prost::Message; +use sc_client_api::BlockBackend; +use sc_network_common::{ + protocol::ProtocolName, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, +}; +use schema::bitswap::{ + message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, + Message as BitswapMessage, +}; +use sp_runtime::traits::Block as BlockT; +use std::{io, sync::Arc, time::Duration}; +use unsigned_varint::encode as varint_encode; + +mod schema; + +const LOG_TARGET: &str = "bitswap"; + +// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes +// https://github.com/ipfs/js-ipfs-bitswap/blob/ +// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 +// We set it to the same value as max substrate protocol message +const MAX_PACKET_SIZE: u64 = 16 * 1024 * 1024; + +/// Max number of queued responses before denying requests. +const MAX_REQUEST_QUEUE: usize = 20; + +/// Max number of blocks per wantlist +const MAX_WANTED_BLOCKS: usize = 16; + +/// Bitswap protocol name +const PROTOCOL_NAME: &'static str = "/ipfs/bitswap/1.2.0"; + +/// Prefix represents all metadata of a CID, without the actual content. +#[derive(PartialEq, Eq, Clone, Debug)] +struct Prefix { + /// The version of CID. + pub version: Version, + /// The codec of CID. + pub codec: u64, + /// The multihash type of CID. + pub mh_type: u64, + /// The multihash length of CID. + pub mh_len: u8, +} + +impl Prefix { + /// Convert the prefix to encoded bytes. + pub fn to_bytes(&self) -> Vec { + let mut res = Vec::with_capacity(4); + let mut buf = varint_encode::u64_buffer(); + let version = varint_encode::u64(self.version.into(), &mut buf); + res.extend_from_slice(version); + let mut buf = varint_encode::u64_buffer(); + let codec = varint_encode::u64(self.codec, &mut buf); + res.extend_from_slice(codec); + let mut buf = varint_encode::u64_buffer(); + let mh_type = varint_encode::u64(self.mh_type, &mut buf); + res.extend_from_slice(mh_type); + let mut buf = varint_encode::u64_buffer(); + let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); + res.extend_from_slice(mh_len); + res + } +} + +/// Bitswap request handler +pub struct BitswapRequestHandler { + client: Arc + Send + Sync>, + request_receiver: mpsc::Receiver, +} + +impl BitswapRequestHandler { + /// Create a new [`BitswapRequestHandler`]. + pub fn new(client: Arc + Send + Sync>) -> (Self, ProtocolConfig) { + let (tx, request_receiver) = mpsc::channel(MAX_REQUEST_QUEUE); + + let config = ProtocolConfig { + name: ProtocolName::from(PROTOCOL_NAME), + fallback_names: vec![], + max_request_size: MAX_PACKET_SIZE, + max_response_size: MAX_PACKET_SIZE, + request_timeout: Duration::from_secs(15), + inbound_queue: Some(tx), + }; + + (Self { client, request_receiver }, config) + } + + /// Run [`BitswapRequestHandler`]. + pub async fn run(mut self) { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_message(&peer, &payload) { + Ok(response) => { + let response = OutgoingResponse { + result: Ok(response), + reputation_changes: Vec::new(), + sent_feedback: None, + }; + + match pending_response.send(response) { + Ok(()) => + trace!(target: LOG_TARGET, "Handled bitswap request from {peer}.",), + Err(_) => debug!( + target: LOG_TARGET, + "Failed to handle light client request from {peer}: {}", + BitswapError::SendResponse, + ), + } + }, + Err(err) => { + error!(target: LOG_TARGET, "Failed to process request from {peer}: {err}"); + + // TODO: adjust reputation? + + let response = OutgoingResponse { + result: Err(()), + reputation_changes: vec![], + sent_feedback: None, + }; + + if pending_response.send(response).is_err() { + debug!( + target: LOG_TARGET, + "Failed to handle bitswap request from {peer}: {}", + BitswapError::SendResponse, + ); + } + }, + } + } + } + + /// Handle received Bitswap request + fn handle_message( + &mut self, + peer: &PeerId, + payload: &Vec, + ) -> Result, BitswapError> { + let request = schema::bitswap::Message::decode(&payload[..])?; + + trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); + + let mut response = BitswapMessage::default(); + + let wantlist = match request.wantlist { + Some(wantlist) => wantlist, + None => { + debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); + return Err(BitswapError::InvalidWantList) + }, + }; + + if wantlist.entries.len() > MAX_WANTED_BLOCKS { + trace!(target: LOG_TARGET, "Ignored request: too many entries"); + return Err(BitswapError::TooManyEntries) + } + + for entry in wantlist.entries { + let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { + Ok(cid) => cid, + Err(e) => { + trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); + continue + }, + }; + + if cid.version() != cid::Version::V1 || + cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || + cid.hash().size() != 32 + { + debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); + continue + } + + let mut hash = B::Hash::default(); + hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); + let transaction = match self.client.indexed_transaction(&hash) { + Ok(ex) => ex, + Err(e) => { + error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); + None + }, + }; + + match transaction { + Some(transaction) => { + trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); + + if entry.want_type == WantType::Block as i32 { + let prefix = Prefix { + version: cid.version(), + codec: cid.codec(), + mh_type: cid.hash().code(), + mh_len: cid.hash().size(), + }; + response + .payload + .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); + } else { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::Have as i32, + cid: cid.to_bytes(), + }); + } + }, + None => { + trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); + + if entry.send_dont_have { + response.block_presences.push(BlockPresence { + r#type: BlockPresenceType::DontHave as i32, + cid: cid.to_bytes(), + }); + } + }, + } + } + + Ok(response.encode_to_vec()) + } +} + +/// Bitswap protocol error. +#[derive(Debug, thiserror::Error)] +pub enum BitswapError { + /// Protobuf decoding error. + #[error("Failed to decode request: {0}.")] + DecodeProto(#[from] prost::DecodeError), + + /// Protobuf encoding error. + #[error("Failed to encode response: {0}.")] + EncodeProto(#[from] prost::EncodeError), + + /// Client backend error. + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + /// Error parsing CID + #[error(transparent)] + BadCid(#[from] cid::Error), + + /// Packet read error. + #[error(transparent)] + Read(#[from] io::Error), + + /// Error sending response. + #[error("Failed to send response.")] + SendResponse, + + /// Message doesn't have a WANT list. + #[error("Invalid WANT list.")] + InvalidWantList, + + /// Too many blocks requested. + #[error("Too many block entries in the request.")] + TooManyEntries, +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::{channel::oneshot, SinkExt}; + use sc_block_builder::BlockBuilderProvider; + use schema::bitswap::{ + message::{wantlist::Entry, Wantlist}, + Message as BitswapMessage, + }; + use sp_consensus::BlockOrigin; + use sp_runtime::codec::Encode; + use substrate_test_runtime::Extrinsic; + use substrate_test_runtime_client::{self, prelude::*, TestClientBuilder}; + + #[tokio::test] + async fn undecodeable_message() { + let client = substrate_test_runtime_client::new(); + let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); + + tokio::spawn(async move { bitswap.run().await }); + + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: vec![0x13, 0x37, 0x13, 0x38], + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(result, Err(())); + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + } else { + panic!("invalid event received"); + } + } + + #[tokio::test] + async fn empty_want_list() { + let client = substrate_test_runtime_client::new(); + let (bitswap, mut config) = BitswapRequestHandler::new(Arc::new(client)); + + tokio::spawn(async move { bitswap.run().await }); + + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .as_mut() + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: BitswapMessage { wantlist: None, ..Default::default() }.encode_to_vec(), + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(result, Err(())); + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + } else { + panic!("invalid event received"); + } + + // Empty WANT list should not cause an error + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: BitswapMessage { + wantlist: Some(Default::default()), + ..Default::default() + } + .encode_to_vec(), + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(result, Ok(BitswapMessage::default().encode_to_vec())); + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + } else { + panic!("invalid event received"); + } + } + + #[tokio::test] + async fn too_long_want_list() { + let client = substrate_test_runtime_client::new(); + let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); + + tokio::spawn(async move { bitswap.run().await }); + + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: BitswapMessage { + wantlist: Some(Wantlist { + entries: (0..MAX_WANTED_BLOCKS + 1) + .map(|_| Entry::default()) + .collect::>(), + full: false, + }), + ..Default::default() + } + .encode_to_vec(), + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(result, Err(())); + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + } else { + panic!("invalid event received"); + } + } + + #[tokio::test] + async fn transaction_not_found() { + let client = TestClientBuilder::with_tx_storage(u32::MAX).build(); + + let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); + tokio::spawn(async move { bitswap.run().await }); + + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: BitswapMessage { + wantlist: Some(Wantlist { + entries: vec![Entry { + block: cid::Cid::new_v1( + 0x70, + cid::multihash::Multihash::wrap( + u64::from(cid::multihash::Code::Blake2b256), + &[0u8; 32], + ) + .unwrap(), + ) + .to_bytes(), + ..Default::default() + }], + full: false, + }), + ..Default::default() + } + .encode_to_vec(), + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(result, Ok(vec![])); + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + } else { + panic!("invalid event received"); + } + } + + #[tokio::test] + async fn transaction_found() { + let mut client = TestClientBuilder::with_tx_storage(u32::MAX).build(); + let mut block_builder = client.new_block(Default::default()).unwrap(); + + let ext = Extrinsic::Store(vec![0x13, 0x37, 0x13, 0x38]); + + block_builder.push(ext.clone()).unwrap(); + let block = block_builder.build().unwrap().block; + + client.import(BlockOrigin::File, block).await.unwrap(); + + let (bitswap, config) = BitswapRequestHandler::new(Arc::new(client)); + + tokio::spawn(async move { bitswap.run().await }); + + let (tx, rx) = oneshot::channel(); + config + .inbound_queue + .unwrap() + .send(IncomingRequest { + peer: PeerId::random(), + payload: BitswapMessage { + wantlist: Some(Wantlist { + entries: vec![Entry { + block: cid::Cid::new_v1( + 0x70, + cid::multihash::Multihash::wrap( + u64::from(cid::multihash::Code::Blake2b256), + &sp_core::hashing::blake2_256(&ext.encode()[2..]), + ) + .unwrap(), + ) + .to_bytes(), + ..Default::default() + }], + full: false, + }), + ..Default::default() + } + .encode_to_vec(), + pending_response: tx, + }) + .await + .unwrap(); + + if let Ok(OutgoingResponse { result, reputation_changes, sent_feedback }) = rx.await { + assert_eq!(reputation_changes, Vec::new()); + assert!(sent_feedback.is_none()); + + let response = + schema::bitswap::Message::decode(&result.expect("fetch to succeed")[..]).unwrap(); + assert_eq!(response.payload[0].data, vec![0x13, 0x37, 0x13, 0x38]); + } else { + panic!("invalid event received"); + } + } +} diff --git a/client/network/src/schema.rs b/client/network/bitswap/src/schema.rs similarity index 97% rename from client/network/src/schema.rs rename to client/network/bitswap/src/schema.rs index 4893bc28a7355..362e59aca68f9 100644 --- a/client/network/src/schema.rs +++ b/client/network/bitswap/src/schema.rs @@ -18,6 +18,6 @@ //! Include sources generated from protobuf definitions. -pub mod bitswap { +pub(crate) mod bitswap { include!(concat!(env!("OUT_DIR"), "/bitswap.message.rs")); } diff --git a/client/network/src/schema/bitswap.v1.2.0.proto b/client/network/bitswap/src/schema/bitswap.v1.2.0.proto similarity index 100% rename from client/network/src/schema/bitswap.v1.2.0.proto rename to client/network/bitswap/src/schema/bitswap.v1.2.0.proto diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 24df47db6803f..88571647fb0e3 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -17,7 +17,6 @@ // along with this program. If not, see . use crate::{ - bitswap::Bitswap, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, @@ -32,8 +31,7 @@ use libp2p::{ identify::IdentifyInfo, kad::record, swarm::{ - behaviour::toggle::Toggle, NetworkBehaviour, NetworkBehaviourAction, - NetworkBehaviourEventProcess, PollParameters, + NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters, }, NetworkBehaviour, }; @@ -79,8 +77,6 @@ where peer_info: peer_info::PeerInfoBehaviour, /// Discovers nodes of the network. discovery: DiscoveryBehaviour, - /// Bitswap server for blockchain data. - bitswap: Toggle>, /// Generic request-response protocols. request_responses: request_responses::RequestResponsesBehaviour, @@ -214,7 +210,6 @@ where block_request_protocol_config: ProtocolConfig, state_request_protocol_config: ProtocolConfig, warp_sync_protocol_config: Option, - bitswap: Option>, light_client_request_protocol_config: ProtocolConfig, // All remaining request protocol configs. mut request_response_protocols: Vec, @@ -239,7 +234,6 @@ where substrate, peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), - bitswap: bitswap.into(), request_responses: request_responses::RequestResponsesBehaviour::new( request_response_protocols.into_iter(), peerset, @@ -338,16 +332,6 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl NetworkBehaviourEventProcess for Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn inject_event(&mut self, event: void::Void) { - void::unreachable(event) - } -} - impl NetworkBehaviourEventProcess> for Behaviour where B: BlockT, diff --git a/client/network/src/bitswap.rs b/client/network/src/bitswap.rs deleted file mode 100644 index 52fa0c36caedf..0000000000000 --- a/client/network/src/bitswap.rs +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Bitswap server for substrate. -//! -//! Allows querying transactions by hash over standard bitswap protocol -//! Only supports bitswap 1.2.0. -//! CID is expected to reference 256-bit Blake2b transaction hash. - -use crate::schema::bitswap::{ - message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, - Message as BitswapMessage, -}; -use cid::Version; -use core::pin::Pin; -use futures::{ - io::{AsyncRead, AsyncWrite}, - Future, -}; -use libp2p::{ - core::{ - connection::ConnectionId, upgrade, InboundUpgrade, Multiaddr, OutboundUpgrade, PeerId, - UpgradeInfo, - }, - swarm::{ - NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, OneShotHandler, PollParameters, - }, -}; -use log::{debug, error, trace}; -use prost::Message; -use sc_client_api::BlockBackend; -use sp_runtime::traits::Block as BlockT; -use std::{ - collections::VecDeque, - io, - marker::PhantomData, - sync::Arc, - task::{Context, Poll}, -}; -use unsigned_varint::encode as varint_encode; - -const LOG_TARGET: &str = "bitswap"; - -// Undocumented, but according to JS the bitswap messages have a max size of 512*1024 bytes -// https://github.com/ipfs/js-ipfs-bitswap/blob/ -// d8f80408aadab94c962f6b88f343eb9f39fa0fcc/src/decision-engine/index.js#L16 -// We set it to the same value as max substrate protocol message -const MAX_PACKET_SIZE: usize = 16 * 1024 * 1024; - -// Max number of queued responses before denying requests. -const MAX_RESPONSE_QUEUE: usize = 20; -// Max number of blocks per wantlist -const MAX_WANTED_BLOCKS: usize = 16; - -const PROTOCOL_NAME: &[u8] = b"/ipfs/bitswap/1.2.0"; - -type FutureResult = Pin> + Send>>; - -/// Bitswap protocol config -#[derive(Clone, Copy, Debug, Default)] -pub struct BitswapConfig; - -impl UpgradeInfo for BitswapConfig { - type Info = &'static [u8]; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once(PROTOCOL_NAME) - } -} - -impl InboundUpgrade for BitswapConfig -where - TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, -{ - type Output = BitswapMessage; - type Error = BitswapError; - type Future = FutureResult; - - fn upgrade_inbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { - Box::pin(async move { - let packet = upgrade::read_length_prefixed(&mut socket, MAX_PACKET_SIZE).await?; - let message: BitswapMessage = Message::decode(packet.as_slice())?; - Ok(message) - }) - } -} - -impl UpgradeInfo for BitswapMessage { - type Info = &'static [u8]; - type InfoIter = std::iter::Once; - - fn protocol_info(&self) -> Self::InfoIter { - std::iter::once(PROTOCOL_NAME) - } -} - -impl OutboundUpgrade for BitswapMessage -where - TSocket: AsyncRead + AsyncWrite + Send + Unpin + 'static, -{ - type Output = (); - type Error = io::Error; - type Future = FutureResult; - - fn upgrade_outbound(self, mut socket: TSocket, _info: Self::Info) -> Self::Future { - Box::pin(async move { - let data = self.encode_to_vec(); - upgrade::write_length_prefixed(&mut socket, data).await - }) - } -} - -/// Internal protocol handler event. -#[derive(Debug)] -pub enum HandlerEvent { - /// We received a `BitswapMessage` from a remote. - Request(BitswapMessage), - /// We successfully sent a `BitswapMessage`. - ResponseSent, -} - -impl From for HandlerEvent { - fn from(message: BitswapMessage) -> Self { - Self::Request(message) - } -} - -impl From<()> for HandlerEvent { - fn from(_: ()) -> Self { - Self::ResponseSent - } -} - -/// Prefix represents all metadata of a CID, without the actual content. -#[derive(PartialEq, Eq, Clone, Debug)] -struct Prefix { - /// The version of CID. - pub version: Version, - /// The codec of CID. - pub codec: u64, - /// The multihash type of CID. - pub mh_type: u64, - /// The multihash length of CID. - pub mh_len: u8, -} - -impl Prefix { - /// Convert the prefix to encoded bytes. - pub fn to_bytes(&self) -> Vec { - let mut res = Vec::with_capacity(4); - let mut buf = varint_encode::u64_buffer(); - let version = varint_encode::u64(self.version.into(), &mut buf); - res.extend_from_slice(version); - let mut buf = varint_encode::u64_buffer(); - let codec = varint_encode::u64(self.codec, &mut buf); - res.extend_from_slice(codec); - let mut buf = varint_encode::u64_buffer(); - let mh_type = varint_encode::u64(self.mh_type, &mut buf); - res.extend_from_slice(mh_type); - let mut buf = varint_encode::u64_buffer(); - let mh_len = varint_encode::u64(self.mh_len as u64, &mut buf); - res.extend_from_slice(mh_len); - res - } -} - -/// Bitswap trait -pub trait BitswapT { - /// Get single indexed transaction by content hash. - /// - /// Note that this will only fetch transactions - /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction( - &self, - hash: ::Hash, - ) -> sp_blockchain::Result>>; - - /// Queue of blocks ready to be sent out on `poll()` - fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)>; -} - -/// Network behaviour that handles sending and receiving IPFS blocks. -struct BitswapInternal { - client: Arc, - ready_blocks: VecDeque<(PeerId, BitswapMessage)>, - _block: PhantomData, -} - -impl BitswapInternal { - /// Create a new instance of the bitswap protocol handler. - pub fn new(client: Arc) -> Self { - Self { client, ready_blocks: Default::default(), _block: PhantomData::default() } - } -} - -impl BitswapT for BitswapInternal -where - Block: BlockT, - Client: BlockBackend, -{ - fn indexed_transaction( - &self, - hash: ::Hash, - ) -> sp_blockchain::Result>> { - self.client.indexed_transaction(&hash) - } - - fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { - &mut self.ready_blocks - } -} - -/// Wrapper for bitswap trait object implement NetworkBehaviour -pub struct Bitswap { - inner: Box + Sync + Send>, -} - -impl Bitswap { - /// Create new Bitswap wrapper - pub fn from_client + Send + Sync + 'static>( - client: Arc, - ) -> Self { - let inner = Box::new(BitswapInternal::new(client)) as Box<_>; - Self { inner } - } -} - -impl BitswapT for Bitswap { - fn indexed_transaction( - &self, - hash: ::Hash, - ) -> sp_blockchain::Result>> { - self.inner.indexed_transaction(hash) - } - - fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { - self.inner.ready_blocks() - } -} - -impl BitswapT for Box -where - T: BitswapT, -{ - fn indexed_transaction( - &self, - hash: ::Hash, - ) -> sp_blockchain::Result>> { - T::indexed_transaction(self, hash) - } - - fn ready_blocks(&mut self) -> &mut VecDeque<(PeerId, BitswapMessage)> { - T::ready_blocks(self) - } -} - -impl NetworkBehaviour for Bitswap -where - B: BlockT, -{ - type ConnectionHandler = OneShotHandler; - type OutEvent = void::Void; - - fn new_handler(&mut self) -> Self::ConnectionHandler { - Default::default() - } - - fn addresses_of_peer(&mut self, _peer: &PeerId) -> Vec { - Vec::new() - } - - fn inject_event(&mut self, peer: PeerId, _connection: ConnectionId, message: HandlerEvent) { - let request = match message { - HandlerEvent::ResponseSent => return, - HandlerEvent::Request(msg) => msg, - }; - trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); - if self.ready_blocks().len() > MAX_RESPONSE_QUEUE { - debug!(target: LOG_TARGET, "Ignored request: queue is full"); - return - } - - let mut response = BitswapMessage { - wantlist: None, - blocks: Default::default(), - payload: Default::default(), - block_presences: Default::default(), - pending_bytes: 0, - }; - let wantlist = match request.wantlist { - Some(wantlist) => wantlist, - None => { - debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer); - return - }, - }; - if wantlist.entries.len() > MAX_WANTED_BLOCKS { - trace!(target: LOG_TARGET, "Ignored request: too many entries"); - return - } - for entry in wantlist.entries { - let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { - Ok(cid) => cid, - Err(e) => { - trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); - continue - }, - }; - if cid.version() != cid::Version::V1 || - cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || - cid.hash().size() != 32 - { - debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); - continue - } - let mut hash = B::Hash::default(); - hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let transaction = match self.indexed_transaction(hash) { - Ok(ex) => ex, - Err(e) => { - error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); - None - }, - }; - match transaction { - Some(transaction) => { - trace!(target: LOG_TARGET, "Found CID {:?}, hash {:?}", cid, hash); - if entry.want_type == WantType::Block as i32 { - let prefix = Prefix { - version: cid.version(), - codec: cid.codec(), - mh_type: cid.hash().code(), - mh_len: cid.hash().size(), - }; - response - .payload - .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); - } else { - response.block_presences.push(BlockPresence { - r#type: BlockPresenceType::Have as i32, - cid: cid.to_bytes(), - }); - } - }, - None => { - trace!(target: LOG_TARGET, "Missing CID {:?}, hash {:?}", cid, hash); - if entry.send_dont_have { - response.block_presences.push(BlockPresence { - r#type: BlockPresenceType::DontHave as i32, - cid: cid.to_bytes(), - }); - } - }, - } - } - trace!(target: LOG_TARGET, "Response: {:?}", response); - self.ready_blocks().push_back((peer, response)); - } - - fn poll( - &mut self, - _ctx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll> { - if let Some((peer_id, message)) = self.ready_blocks().pop_front() { - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: message, - }) - } - Poll::Pending - } -} - -/// Bitswap protocol error. -#[derive(Debug, thiserror::Error)] -pub enum BitswapError { - /// Protobuf decoding error. - #[error("Failed to decode request: {0}.")] - DecodeProto(#[from] prost::DecodeError), - - /// Protobuf encoding error. - #[error("Failed to encode response: {0}.")] - EncodeProto(#[from] prost::EncodeError), - - /// Client backend error. - #[error(transparent)] - Client(#[from] sp_blockchain::Error), - - /// Error parsing CID - #[error(transparent)] - BadCid(#[from] cid::Error), - - /// Packet read error. - #[error(transparent)] - Read(#[from] io::Error), - - /// Error sending response. - #[error("Failed to send response.")] - SendResponse, -} diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 3fe95deb0c1ed..d7ca8b48a7c88 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -31,7 +31,7 @@ pub use sc_network_common::{ pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::{bitswap::Bitswap, ExHashT}; +use crate::ExHashT; use core::{fmt, iter}; use futures::future; @@ -79,9 +79,6 @@ where /// Client that contains the blockchain. pub chain: Arc, - /// Bitswap block request protocol implementation. - pub bitswap: Option>, - /// Pool of transactions. /// /// The network worker will fetch transactions from this object in order to propagate them on @@ -139,6 +136,9 @@ where /// Optional warp sync protocol config. pub warp_sync_protocol_config: Option, + + /// Request response protocol configurations + pub request_response_protocol_configs: Vec, } /// Role of the local node. diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 35a91e77da524..320104d0f9554 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -249,12 +249,10 @@ mod discovery; mod peer_info; mod protocol; mod request_responses; -mod schema; mod service; mod transport; mod utils; -pub mod bitswap; pub mod config; pub mod error; pub mod network_state; diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 4610b025dde0d..fb98db3251647 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -137,7 +137,7 @@ impl NetworkWorker where B: BlockT + 'static, H: ExHashT, - Client: sp_blockchain::HeaderBackend + 'static, + Client: HeaderBackend + 'static, { /// Creates the network service. /// @@ -376,7 +376,6 @@ where params.block_request_protocol_config, params.state_request_protocol_config, params.warp_sync_protocol_config, - params.bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, peerset_handle.clone(), @@ -1743,14 +1742,10 @@ where let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", Some(ConnectionError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::B(EitherError::A( - PingFailure::Timeout, - ))), + EitherError::B(EitherError::A(PingFailure::Timeout)), )))) => "ping-timeout", Some(ConnectionError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged, - )), + EitherError::A(NotifsHandlerError::SyncNotificationsClogged), )))) => "sync-notifications-clogged", Some(ConnectionError::Handler(_)) => "protocol-error", Some(ConnectionError::KeepAliveTimeout) => "keep-alive-timeout", diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 8770e14bf5338..a9505c5341c3d 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -146,11 +146,11 @@ fn build_test_full_node( import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, - bitswap: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, warp_sync_protocol_config: None, + request_response_protocol_configs: Vec::new(), }) .unwrap(); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 837cdeed0f3d1..5a29e587ceff5 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -883,11 +883,11 @@ where import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, - bitswap: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, warp_sync_protocol_config: Some(warp_protocol_config), + request_response_protocol_configs: Vec::new(), }) .unwrap(); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 0acdbb1b1b63e..e96421d031d9a 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -51,6 +51,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "6.0.0", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index f03ba6de2866d..5a2f4cf978b41 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -37,7 +37,8 @@ use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; -use sc_network::{bitswap::Bitswap, config::SyncMode, NetworkService}; +use sc_network::{config::SyncMode, NetworkService}; +use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, sync::warp::WarpSyncProvider, @@ -746,6 +747,8 @@ where warp_sync, } = params; + let mut request_response_protocol_configs = Vec::new(); + if warp_sync.is_none() && config.network.sync_mode.is_warp() { return Err("Warp sync enabled, but no warp sync provider configured.".into()) } @@ -835,6 +838,13 @@ where config.network.max_parallel_downloads, warp_sync_provider, )?; + + request_response_protocol_configs.push(config.network.ipfs_server.then(|| { + let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); + spawn_handle.spawn("bitswap-request-handler", Some("networking"), handler.run()); + protocol_config + })); + let network_params = sc_network::config::Params { role: config.role.clone(), executor: { @@ -856,12 +866,15 @@ where fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), - bitswap: config.network.ipfs_server.then(|| Bitswap::from_client(client.clone())), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, warp_sync_protocol_config, light_client_request_protocol_config, + request_response_protocol_configs: request_response_protocol_configs + .into_iter() + .flatten() + .collect::>(), }; let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); From 9df944835d4fa5268cb30d281f8aed21cf7ecf64 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 13 Sep 2022 13:32:34 +0200 Subject: [PATCH 126/348] benches: disable caching per default (#12232) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Disable cache for storage benches Signed-off-by: Oliver Tale-Yazdi * Disable caching per default Signed-off-by: Oliver Tale-Yazdi * Update utils/frame/benchmarking-cli/src/storage/cmd.rs Co-authored-by: Bastian Köcher * Add --enable-trie-cache to 'storage' command Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- utils/frame/benchmarking-cli/src/block/cmd.rs | 14 ++++++++++++++ utils/frame/benchmarking-cli/src/extrinsic/cmd.rs | 14 ++++++++++++++ utils/frame/benchmarking-cli/src/overhead/cmd.rs | 14 ++++++++++++++ utils/frame/benchmarking-cli/src/storage/cmd.rs | 14 ++++++++++---- 4 files changed, 52 insertions(+), 4 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/block/cmd.rs b/utils/frame/benchmarking-cli/src/block/cmd.rs index e4e1716b1c5ac..14cbb1b438686 100644 --- a/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -67,6 +67,12 @@ pub struct BlockCmd { #[allow(missing_docs)] #[clap(flatten)] pub params: BenchmarkParams, + + /// Enable the Trie cache. + /// + /// This should only be used for performance analysis and not for final results. + #[clap(long)] + pub enable_trie_cache: bool, } impl BlockCmd { @@ -98,4 +104,12 @@ impl CliConfiguration for BlockCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } + + fn trie_cache_maximum_size(&self) -> Result> { + if self.enable_trie_cache { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) + } else { + Ok(None) + } + } } diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 2c94a6be50255..339d6126bcd09 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -72,6 +72,12 @@ pub struct ExtrinsicParams { /// Extrinsic to benchmark. #[clap(long, value_name = "EXTRINSIC", required_unless_present = "list")] pub extrinsic: Option, + + /// Enable the Trie cache. + /// + /// This should only be used for performance analysis and not for final results. + #[clap(long)] + pub enable_trie_cache: bool, } impl ExtrinsicCmd { @@ -132,4 +138,12 @@ impl CliConfiguration for ExtrinsicCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } + + fn trie_cache_maximum_size(&self) -> Result> { + if self.params.enable_trie_cache { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) + } else { + Ok(None) + } + } } diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 1c6753b4a20ea..93b0e7c472647 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -75,6 +75,12 @@ pub struct OverheadParams { /// Good for adding LICENSE headers. #[clap(long, value_name = "PATH")] pub header: Option, + + /// Enable the Trie cache. + /// + /// This should only be used for performance analysis and not for final results. + #[clap(long)] + pub enable_trie_cache: bool, } /// Type of a benchmark. @@ -156,4 +162,12 @@ impl CliConfiguration for OverheadCmd { fn import_params(&self) -> Option<&ImportParams> { Some(&self.import_params) } + + fn trie_cache_maximum_size(&self) -> Result> { + if self.params.enable_trie_cache { + Ok(self.import_params().map(|x| x.trie_cache_maximum_size()).unwrap_or_default()) + } else { + Ok(None) + } + } } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index d28ac4102152f..1d91e8f0b0517 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -105,9 +105,15 @@ pub struct StorageParams { /// Trie cache size in bytes. /// /// Providing `0` will disable the cache. - #[clap(long, default_value = "1024")] + #[clap(long, value_name = "Bytes", default_value = "67108864")] pub trie_cache_size: usize, + /// Enable the Trie cache. + /// + /// This should only be used for performance analysis and not for final results. + #[clap(long)] + pub enable_trie_cache: bool, + /// Include child trees in benchmark. #[clap(long)] pub include_child_trees: bool, @@ -220,10 +226,10 @@ impl CliConfiguration for StorageCmd { } fn trie_cache_maximum_size(&self) -> Result> { - if self.params.trie_cache_size == 0 { - Ok(None) - } else { + if self.params.enable_trie_cache && self.params.trie_cache_size > 0 { Ok(Some(self.params.trie_cache_size)) + } else { + Ok(None) } } } From bb7fb64c0561853080c7f9708af22840030c9727 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 13 Sep 2022 12:48:46 +0100 Subject: [PATCH 127/348] Make `BasePath::new_temp_dir` return the same path for the program lifetime (#12246) * Make `BasePath::new_temp_dir` return the same path for the program lifetime Instead of returning always a different path, this now returns the same path for the entire lifetime of the program. We still ensure that the path is cleared at the end of the program. * Update client/service/src/config.rs Co-authored-by: Koute * Update client/service/src/config.rs Co-authored-by: Nitwit <47109040+nitwit69@users.noreply.github.com> * FMT Co-authored-by: Koute Co-authored-by: Nitwit <47109040+nitwit69@users.noreply.github.com> --- Cargo.lock | 37 +++++++++++++++++++++++++++++++++- client/service/Cargo.toml | 1 + client/service/src/config.rs | 39 ++++++++++++++++++++++-------------- 3 files changed, 61 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08915196d5374..efc1aa2976cdd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -873,6 +873,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "chacha20" version = "0.8.1" @@ -8822,6 +8828,7 @@ dependencies = [ "sp-transaction-storage-proof", "sp-trie", "sp-version", + "static_init", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -10341,6 +10348,34 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "static_init" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" +dependencies = [ + "bitflags", + "cfg_aliases", + "libc", + "parking_lot 0.11.2", + "parking_lot_core 0.8.5", + "static_init_macro", + "winapi", +] + +[[package]] +name = "static_init_macro" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" +dependencies = [ + "cfg_aliases", + "memchr", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "statrs" version = "0.15.0" @@ -11210,7 +11245,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if 1.0.0", "digest 0.10.3", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e96421d031d9a..76c61fe17f0da 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -85,6 +85,7 @@ async-trait = "0.1.57" tokio = { version = "1.17.0", features = ["time", "rt-multi-thread", "parking_lot"] } tempfile = "3.1.0" directories = "4.0.1" +static_init = "1.0.3" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 2fb3f820ebf15..44153e3b914f3 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -262,31 +262,43 @@ impl Default for RpcMethods { } } -/// The base path that is used for everything that needs to be write on disk to run a node. +#[static_init::dynamic(drop, lazy)] +static mut BASE_PATH_TEMP: Option = None; + +/// The base path that is used for everything that needs to be written on disk to run a node. #[derive(Debug)] -pub enum BasePath { - /// A temporary directory is used as base path and will be deleted when dropped. - Temporary(TempDir), - /// A path on the disk. - Permanenent(PathBuf), +pub struct BasePath { + path: PathBuf, } impl BasePath { /// Create a `BasePath` instance using a temporary directory prefixed with "substrate" and use /// it as base path. /// - /// Note: the temporary directory will be created automatically and deleted when the `BasePath` - /// instance is dropped. + /// Note: The temporary directory will be created automatically and deleted when the program + /// exits. Every call to this function will return the same path for the lifetime of the + /// program. pub fn new_temp_dir() -> io::Result { - Ok(BasePath::Temporary(tempfile::Builder::new().prefix("substrate").tempdir()?)) + let mut temp = BASE_PATH_TEMP.write(); + + match &*temp { + Some(p) => Ok(Self::new(p.path())), + None => { + let temp_dir = tempfile::Builder::new().prefix("substrate").tempdir()?; + let path = PathBuf::from(temp_dir.path()); + + *temp = Some(temp_dir); + Ok(Self::new(path)) + }, + } } /// Create a `BasePath` instance based on an existing path on disk. /// /// Note: this function will not ensure that the directory exist nor create the directory. It /// will also not delete the directory when the instance is dropped. - pub fn new>(path: P) -> BasePath { - BasePath::Permanenent(path.as_ref().to_path_buf()) + pub fn new>(path: P) -> BasePath { + Self { path: path.into() } } /// Create a base path from values describing the project. @@ -300,10 +312,7 @@ impl BasePath { /// Retrieve the base path. pub fn path(&self) -> &Path { - match self { - BasePath::Temporary(temp_dir) => temp_dir.path(), - BasePath::Permanenent(path) => path.as_path(), - } + &self.path } /// Returns the configuration directory inside this base path. From 3449fb003534c0c361a92a2d1fe03abc80e4997d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 13 Sep 2022 14:19:26 +0200 Subject: [PATCH 128/348] Remove CanAuthorWith trait (#12213) * Remove CanAuthorWith trait CanAuthotWith trait removed. Also all dependencies, parameters, type paramers were removed. This is related to removal of native runtime. * Remove commented code * Fix code formatting * trigger CI job * trigger CI job * trigger CI job * trigger CI job * trigger CI job * trigger CI job * trigger CI job --- bin/node-template/node/src/service.rs | 13 ++--- bin/node/cli/src/service.rs | 7 +-- client/consensus/aura/src/import_queue.rs | 53 ++++++-------------- client/consensus/aura/src/lib.rs | 24 +++------ client/consensus/babe/src/lib.rs | 41 ++++------------ client/consensus/babe/src/tests.rs | 5 +- client/consensus/pow/src/lib.rs | 45 +++-------------- client/consensus/slots/src/lib.rs | 25 ++-------- primitives/consensus/common/src/lib.rs | 59 ----------------------- 9 files changed, 45 insertions(+), 227 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index ffb2440caa0ed..caa01761636df 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,7 +1,7 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{BlockBackend, ExecutorProvider}; +use sc_client_api::BlockBackend; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; use sc_finality_grandpa::SharedVoterState; @@ -113,7 +113,7 @@ pub fn new_partial( let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { + sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), @@ -129,9 +129,6 @@ pub fn new_partial( Ok((timestamp, slot)) }, spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( - client.executor().clone(), - ), registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), @@ -254,12 +251,9 @@ pub fn new_full(mut config: Configuration) -> Result telemetry.as_ref().map(|x| x.handle()), ); - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - let aura = sc_consensus_aura::start_aura::( + let aura = sc_consensus_aura::start_aura::( StartAuraParams { slot_duration, client, @@ -280,7 +274,6 @@ pub fn new_full(mut config: Configuration) -> Result force_authoring, backoff_authoring_blocks, keystore: keystore_container.sync_keystore(), - can_author_with, sync_oracle: network.clone(), justification_sync_link: network.clone(), block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 2152301ac2d42..c058e22fa7a97 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -26,7 +26,7 @@ use futures::prelude::*; use kitchensink_runtime::RuntimeApi; use node_executor::ExecutorDispatch; use node_primitives::Block; -use sc_client_api::{BlockBackend, ExecutorProvider}; +use sc_client_api::BlockBackend; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::NetworkService; @@ -227,7 +227,6 @@ pub fn new_partial( }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), telemetry.as_ref().map(|x| x.handle()), )?; @@ -422,9 +421,6 @@ pub fn new_full_base( telemetry.as_ref().map(|x| x.handle()), ); - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let client_clone = client.clone(); let slot_duration = babe_link.config().slot_duration(); let babe_config = sc_consensus_babe::BabeParams { @@ -463,7 +459,6 @@ pub fn new_full_base( force_authoring, backoff_authoring_blocks, babe_link, - can_author_with, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 30554006732c0..31fe7bbaa1095 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -35,7 +35,7 @@ use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, HeaderBackend, }; -use sp_consensus::{CanAuthorWith, Error as ConsensusError}; +use sp_consensus::Error as ConsensusError; use sp_consensus_aura::{ digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, AURA_ENGINE_ID, @@ -109,27 +109,24 @@ where } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, create_inherent_data_providers: CIDP, - can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, } -impl AuraVerifier { +impl AuraVerifier { pub(crate) fn new( client: Arc, create_inherent_data_providers: CIDP, - can_author_with: CAW, check_for_equivocation: CheckForEquivocation, telemetry: Option, ) -> Self { Self { client, create_inherent_data_providers, - can_author_with, check_for_equivocation, telemetry, phantom: PhantomData, @@ -137,10 +134,9 @@ impl AuraVerifier { } } -impl AuraVerifier +impl AuraVerifier where P: Send + Sync + 'static, - CAW: Send + Sync + 'static, CIDP: Send, { async fn check_inherents( @@ -154,19 +150,8 @@ where where C: ProvideRuntimeApi, C::Api: BlockBuilderApi, - CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "aura", - "Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - let inherent_res = self .client .runtime_api() @@ -187,14 +172,13 @@ where } #[async_trait::async_trait] -impl Verifier for AuraVerifier +impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, - CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -338,7 +322,7 @@ impl Default for CheckForEquivocation { } /// Parameters of [`import_queue`]. -pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { +pub struct ImportQueueParams<'a, Block, I, C, S, CIDP> { /// The block import to use. pub block_import: I, /// The justification import. @@ -351,8 +335,6 @@ pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { pub spawner: &'a S, /// The prometheus registry. pub registry: Option<&'a Registry>, - /// Can we author with the current node? - pub can_author_with: CAW, /// Should we check for equivocation? pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. @@ -360,7 +342,7 @@ pub struct ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> { } /// Start an import queue for the Aura consensus algorithm. -pub fn import_queue( +pub fn import_queue( ImportQueueParams { block_import, justification_import, @@ -368,10 +350,9 @@ pub fn import_queue( create_inherent_data_providers, spawner, registry, - can_author_with, check_for_equivocation, telemetry, - }: ImportQueueParams, + }: ImportQueueParams, ) -> Result, sp_consensus::Error> where Block: BlockT, @@ -392,14 +373,12 @@ where P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, S: sp_core::traits::SpawnEssentialNamed, - CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = build_verifier::(BuildVerifierParams { + let verifier = build_verifier::(BuildVerifierParams { client, create_inherent_data_providers, - can_author_with, check_for_equivocation, telemetry, }); @@ -408,13 +387,11 @@ where } /// Parameters of [`build_verifier`]. -pub struct BuildVerifierParams { +pub struct BuildVerifierParams { /// The client to interact with the chain. pub client: Arc, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, - /// Can we author with the current node? - pub can_author_with: CAW, /// Should we check for equivocation? pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. @@ -422,19 +399,17 @@ pub struct BuildVerifierParams { } /// Build the [`AuraVerifier`] -pub fn build_verifier( +pub fn build_verifier( BuildVerifierParams { client, create_inherent_data_providers, - can_author_with, check_for_equivocation, telemetry, - }: BuildVerifierParams, -) -> AuraVerifier { - AuraVerifier::<_, P, _, _>::new( + }: BuildVerifierParams, +) -> AuraVerifier { + AuraVerifier::<_, P, _>::new( client, create_inherent_data_providers, - can_author_with, check_for_equivocation, telemetry, ) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 92fe1fa3cf29d..0bdc663815051 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -47,9 +47,7 @@ use sc_telemetry::TelemetryHandle; use sp_api::ProvideRuntimeApi; use sp_application_crypto::{AppKey, AppPublic}; use sp_blockchain::{HeaderBackend, Result as CResult}; -use sp_consensus::{ - BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, -}; +use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; use sp_consensus_slots::Slot; use sp_core::crypto::{ByteArray, Pair, Public}; use sp_inherents::CreateInherentDataProviders; @@ -108,7 +106,7 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A } /// Parameters of [`start_aura`]. -pub struct StartAuraParams { +pub struct StartAuraParams { /// The duration of a slot. pub slot_duration: SlotDuration, /// The client to interact with the chain. @@ -131,8 +129,6 @@ pub struct StartAuraParams { pub backoff_authoring_blocks: Option, /// The keystore used by the node. pub keystore: SyncCryptoStorePtr, - /// Can we author a block with this node? - pub can_author_with: CAW, /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -147,7 +143,7 @@ pub struct StartAuraParams { } /// Start the aura worker. The returned future should be run in a futures executor. -pub fn start_aura( +pub fn start_aura( StartAuraParams { slot_duration, client, @@ -160,11 +156,10 @@ pub fn start_aura( force_authoring, backoff_authoring_blocks, keystore, - can_author_with, block_proposal_slot_portion, max_block_proposal_slot_portion, telemetry, - }: StartAuraParams, + }: StartAuraParams, ) -> Result, sp_consensus::Error> where P: Pair + Send + Sync, @@ -182,7 +177,6 @@ where CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, - CAW: CanAuthorWith + Send, Error: std::error::Error + Send + From + 'static, { let worker = build_aura_worker::(BuildAuraWorkerParams { @@ -205,7 +199,6 @@ where SimpleSlotWorkerToSlotWorker(worker), sync_oracle, create_inherent_data_providers, - can_author_with, )) } @@ -568,9 +561,7 @@ mod tests { use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; - use sp_consensus::{ - AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, - }; + use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; @@ -633,7 +624,6 @@ mod tests { type AuraVerifier = import_queue::AuraVerifier< PeersFullClient, AuthorityPair, - AlwaysCanAuthor, Box< dyn CreateInherentDataProviders< TestBlock, @@ -670,7 +660,6 @@ mod tests { Ok((timestamp, slot)) }), - AlwaysCanAuthor, CheckForEquivocation::Yes, None, ) @@ -738,7 +727,7 @@ mod tests { let slot_duration = slot_duration(&*client).expect("slot duration available"); aura_futures.push( - start_aura::(StartAuraParams { + start_aura::(StartAuraParams { slot_duration, block_import: client.clone(), select_chain, @@ -760,7 +749,6 @@ mod tests { BackoffAuthoringOnFinalizedHeadLagging::default(), ), keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: None, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1303915efee49..198de865bb290 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -115,8 +115,7 @@ use sp_blockchain::{ Backend as _, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, }; use sp_consensus::{ - BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, - SelectChain, + BlockOrigin, CacheKeyId, Environment, Error as ConsensusError, Proposer, SelectChain, }; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; @@ -370,7 +369,7 @@ where } /// Parameters for BABE. -pub struct BabeParams { +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: SyncCryptoStorePtr, @@ -406,9 +405,6 @@ pub struct BabeParams { /// The source of timestamps for relative slots pub babe_link: BabeLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, - /// The proportion of the slot dedicated to proposing. /// /// The block proposing will be limited to this proportion of the slot from the starting of the @@ -425,7 +421,7 @@ pub struct BabeParams { } /// Start the babe worker. -pub fn start_babe( +pub fn start_babe( BabeParams { keystore, client, @@ -438,11 +434,10 @@ pub fn start_babe( force_authoring, backoff_authoring_blocks, babe_link, - can_author_with, block_proposal_slot_portion, max_block_proposal_slot_portion, telemetry, - }: BabeParams, + }: BabeParams, ) -> Result, sp_consensus::Error> where B: BlockT, @@ -468,7 +463,6 @@ where CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + Sync + 'static, - CAW: CanAuthorWith + Send + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -500,7 +494,6 @@ where sc_consensus_slots::SimpleSlotWorkerToSlotWorker(worker), sync_oracle, create_inherent_data_providers, - can_author_with, ); let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); @@ -1009,23 +1002,21 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, select_chain: SelectChain, create_inherent_data_providers: CIDP, config: BabeConfiguration, epoch_changes: SharedEpochChanges, - can_author_with: CAW, telemetry: Option, } -impl BabeVerifier +impl BabeVerifier where Block: BlockT, Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { async fn check_inherents( @@ -1036,16 +1027,6 @@ where create_inherent_data_providers: CIDP::InherentDataProviders, execution_context: ExecutionContext, ) -> Result<(), Error> { - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "babe", - "Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - let inherent_res = self .client .runtime_api() @@ -1150,8 +1131,8 @@ type BlockVerificationResult = Result<(BlockImportParams, Option)>>), String>; #[async_trait::async_trait] -impl Verifier - for BabeVerifier +impl Verifier + for BabeVerifier where Block: BlockT, Client: HeaderMetadata @@ -1162,7 +1143,6 @@ where + AuxStore, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, - CAW: CanAuthorWith + Send + Sync, CIDP: CreateInherentDataProviders + Send + Sync, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -1773,7 +1753,7 @@ where /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, block_import: Inner, justification_import: Option>, @@ -1782,7 +1762,6 @@ pub fn import_queue( create_inherent_data_providers: CIDP, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, - can_author_with: CAW, telemetry: Option, ) -> ClientResult> where @@ -1802,7 +1781,6 @@ where + 'static, Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, - CAW: CanAuthorWith + Send + Sync + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { @@ -1811,7 +1789,6 @@ where create_inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, - can_author_with, telemetry, client, }; diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 7207b7a36c3d4..ab3805138482c 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -31,7 +31,7 @@ use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; -use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityPair, Slot, @@ -235,7 +235,6 @@ pub struct TestVerifier { TestBlock, PeersFullClient, TestSelectChain, - AlwaysCanAuthor, Box< dyn CreateInherentDataProviders< TestBlock, @@ -332,7 +331,6 @@ impl TestNetFactory for BabeTestNet { }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), - can_author_with: AlwaysCanAuthor, telemetry: None, }, mutator: MUTATOR.with(|m| m.borrow().clone()), @@ -447,7 +445,6 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), babe_link: data.link.clone(), keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, justification_sync_link: (), block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index f63e453a48026..6c492931d445c 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -56,9 +56,7 @@ use sc_consensus::{ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; -use sp_consensus::{ - CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, -}; +use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_core::ExecutionContext; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; @@ -214,18 +212,17 @@ pub trait PowAlgorithm { } /// A block importer for PoW. -pub struct PowBlockImport { +pub struct PowBlockImport { algorithm: Algorithm, inner: I, select_chain: S, client: Arc, create_inherent_data_providers: Arc, check_inherents_after: <::Header as HeaderT>::Number, - can_author_with: CAW, } -impl Clone - for PowBlockImport +impl Clone + for PowBlockImport { fn clone(&self) -> Self { Self { @@ -235,12 +232,11 @@ impl Clone client: self.client.clone(), create_inherent_data_providers: self.create_inherent_data_providers.clone(), check_inherents_after: self.check_inherents_after, - can_author_with: self.can_author_with.clone(), } } } -impl PowBlockImport +impl PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -248,7 +244,6 @@ where C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, - CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { /// Create a new block import suitable to be used in PoW @@ -259,7 +254,6 @@ where check_inherents_after: <::Header as HeaderT>::Number, select_chain: S, create_inherent_data_providers: CIDP, - can_author_with: CAW, ) -> Self { Self { inner, @@ -268,7 +262,6 @@ where check_inherents_after, select_chain, create_inherent_data_providers: Arc::new(create_inherent_data_providers), - can_author_with, } } @@ -283,16 +276,6 @@ where return Ok(()) } - if let Err(e) = self.can_author_with.can_author_with(&block_id) { - debug!( - target: "pow", - "Skipping `check_inherents` as authoring version is not compatible: {}", - e, - ); - - return Ok(()) - } - let inherent_data = inherent_data_providers .create_inherent_data() .map_err(|e| Error::CreateInherents(e))?; @@ -317,8 +300,7 @@ where } #[async_trait::async_trait] -impl BlockImport - for PowBlockImport +impl BlockImport for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -328,7 +310,6 @@ where C::Api: BlockBuilderApi, Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, - CAW: CanAuthorWith + Send + Sync, CIDP: CreateInherentDataProviders + Send + Sync, { type Error = ConsensusError; @@ -512,7 +493,7 @@ where /// /// `pre_runtime` is a parameter that allows a custom additional pre-runtime digest to be inserted /// for blocks being built. This can encode authorship information, or just be a graffiti. -pub fn start_mining_worker( +pub fn start_mining_worker( block_import: BoxBlockImport>, client: Arc, select_chain: S, @@ -524,7 +505,6 @@ pub fn start_mining_worker( create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, - can_author_with: CAW, ) -> ( MiningHandle>::Proof>, impl Future, @@ -541,7 +521,6 @@ where SO: SyncOracle + Clone + Send + Sync + 'static, L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, - CAW: CanAuthorWith + Clone + Send + 'static, { let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); let worker = MiningHandle::new(algorithm.clone(), block_import, justification_sync_link); @@ -573,16 +552,6 @@ where }; let best_hash = best_header.hash(); - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { - warn!( - target: "pow", - "Skipping proposal `can_author_with` returned: {} \ - Probably a node update is required!", - err, - ); - continue - } - if worker.best_hash() == Some(best_hash) { continue } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 39b40a32f18ca..b9f8c03f2ac88 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -38,13 +38,10 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{CanAuthorWith, Proposal, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; @@ -486,13 +483,12 @@ impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I, J); /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub async fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, sync_oracle: SO, create_inherent_data_providers: CIDP, - can_author_with: CAW, ) where B: BlockT, C: SelectChain, @@ -500,7 +496,6 @@ pub async fn start_slot_worker( SO: SyncOracle + Send, CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, - CAW: CanAuthorWith + Send, { let mut slots = Slots::new(slot_duration.as_duration(), create_inherent_data_providers, client); @@ -518,19 +513,7 @@ pub async fn start_slot_worker( continue } - if let Err(err) = - can_author_with.can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) - { - warn!( - target: "slots", - "Unable to author block in slot {},. `can_author_with` returned: {} \ - Probably a node update is required!", - slot_info.slot, - err, - ); - } else { - let _ = worker.on_slot(slot_info).await; - } + let _ = worker.on_slot(slot_info).await; } } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 043533cbf2258..458a5eee259a9 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -25,7 +25,6 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor}, Digest, }; @@ -268,61 +267,3 @@ where T::is_offline(self) } } - -/// Checks if the current active native block authoring implementation can author with the runtime -/// at the given block. -pub trait CanAuthorWith { - /// See trait docs for more information. - /// - /// # Return - /// - /// - Returns `Ok(())` when authoring is supported. - /// - Returns `Err(_)` when authoring is not supported. - fn can_author_with(&self, at: &BlockId) -> Result<(), String>; -} - -/// Checks if the node can author blocks by using -/// [`NativeVersion::can_author_with`](sp_version::NativeVersion::can_author_with). -#[derive(Clone)] -pub struct CanAuthorWithNativeVersion(T); - -impl CanAuthorWithNativeVersion { - /// Creates a new instance of `Self`. - pub fn new(inner: T) -> Self { - Self(inner) - } -} - -impl + sp_version::GetNativeVersion, Block: BlockT> - CanAuthorWith for CanAuthorWithNativeVersion -{ - fn can_author_with(&self, at: &BlockId) -> Result<(), String> { - match self.0.runtime_version(at) { - Ok(version) => self.0.native_version().can_author_with(&version), - Err(e) => Err(format!( - "Failed to get runtime version at `{}` and will disable authoring. Error: {}", - at, e, - )), - } - } -} - -/// Returns always `true` for `can_author_with`. This is useful for tests. -#[derive(Clone)] -pub struct AlwaysCanAuthor; - -impl CanAuthorWith for AlwaysCanAuthor { - fn can_author_with(&self, _: &BlockId) -> Result<(), String> { - Ok(()) - } -} - -/// Never can author. -#[derive(Clone)] -pub struct NeverCanAuthor; - -impl CanAuthorWith for NeverCanAuthor { - fn can_author_with(&self, _: &BlockId) -> Result<(), String> { - Err("Authoring is always disabled.".to_string()) - } -} From 23bb5a6255bbcd7ce2999044710428bc4a7a924f Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Tue, 13 Sep 2022 21:23:44 +0800 Subject: [PATCH 129/348] Create sp-weights crate to store weight primitives (#12219) * Create sp-weights crate to store weight primitives * Fix templates * Fix templates * Fixes * Fixes * cargo fmt * Fixes * Fixes * Use deprecated type alias instead of deprecated unit types * Use deprecated subtraits instead of deprecated hollow new traits * Fixes * Allow deprecation in macro expansion * Add missing where clause during call macro expansion * cargo fmt * Fixes * cargo fmt * Fixes * Fixes * Fixes * Fixes * Move FRAME-specific weight files back to frame_support * Fixes * Update frame/support/src/dispatch.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/support/src/dispatch.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/support/src/dispatch.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Add missing header * Rewrite module docs * Fixes * Fixes * Fixes * Fixes * cargo fmt Co-authored-by: Shawn Tabrizi Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- Cargo.lock | 18 + Cargo.toml | 1 + bin/node/executor/tests/basic.rs | 3 +- bin/node/executor/tests/fees.rs | 3 +- bin/node/runtime/src/impls.rs | 5 +- bin/node/runtime/src/lib.rs | 3 +- frame/babe/src/lib.rs | 4 +- frame/babe/src/tests.rs | 2 +- frame/balances/src/tests_composite.rs | 3 +- frame/balances/src/tests_local.rs | 3 +- frame/collective/src/lib.rs | 7 +- frame/collective/src/tests.rs | 5 +- frame/contracts/src/lib.rs | 4 +- frame/contracts/src/tests.rs | 4 +- frame/contracts/src/wasm/runtime.rs | 2 +- .../election-provider-multi-phase/src/lib.rs | 3 +- .../election-provider-support/src/onchain.rs | 2 +- frame/examples/basic/src/lib.rs | 4 +- frame/examples/basic/src/tests.rs | 5 +- frame/executive/src/lib.rs | 4 +- frame/grandpa/src/lib.rs | 4 +- frame/grandpa/src/tests.rs | 2 +- frame/multisig/src/lib.rs | 5 +- frame/preimage/src/lib.rs | 2 +- frame/proxy/src/lib.rs | 3 +- frame/ranked-collective/src/lib.rs | 3 +- frame/recovery/src/lib.rs | 3 +- frame/scheduler/src/lib.rs | 4 +- frame/staking/src/pallet/impls.rs | 3 +- frame/staking/src/tests.rs | 3 +- frame/sudo/src/lib.rs | 2 +- frame/support/Cargo.toml | 2 + .../src/construct_runtime/expand/call.rs | 7 +- .../procedural/src/pallet/expand/call.rs | 4 + frame/support/src/dispatch.rs | 754 +++++++++++++- frame/support/src/lib.rs | 7 +- frame/support/src/traits/misc.rs | 4 +- frame/support/src/weights.rs | 976 ++---------------- frame/support/src/weights/block_weights.rs | 8 +- .../support/src/weights/extrinsic_weights.rs | 8 +- frame/support/src/weights/paritydb_weights.rs | 9 +- frame/support/src/weights/rocksdb_weights.rs | 9 +- frame/support/test/tests/construct_runtime.rs | 6 +- frame/support/test/tests/origin.rs | 2 +- frame/support/test/tests/pallet.rs | 6 +- frame/support/test/tests/pallet_instance.rs | 3 +- frame/system/Cargo.toml | 2 + frame/system/benchmarking/src/lib.rs | 2 +- .../system/src/extensions/check_mortality.rs | 5 +- .../src/extensions/check_non_zero_sender.rs | 2 +- frame/system/src/extensions/check_nonce.rs | 2 +- frame/system/src/extensions/check_weight.rs | 8 +- frame/system/src/lib.rs | 10 +- frame/system/src/limits.rs | 5 +- frame/system/src/tests.rs | 3 +- .../asset-tx-payment/src/lib.rs | 3 +- .../asset-tx-payment/src/tests.rs | 3 +- frame/transaction-payment/src/lib.rs | 17 +- frame/transaction-payment/src/types.rs | 2 +- frame/utility/src/lib.rs | 3 +- frame/utility/src/tests.rs | 4 +- frame/whitelist/src/lib.rs | 3 +- primitives/runtime/Cargo.toml | 1 + primitives/runtime/src/traits.rs | 6 + primitives/weights/Cargo.toml | 40 + primitives/weights/src/lib.rs | 299 ++++++ .../weights/src}/weight_v2.rs | 204 +--- .../benchmarking-cli/src/overhead/weights.hbs | 8 +- .../benchmarking-cli/src/storage/weights.hbs | 9 +- 69 files changed, 1328 insertions(+), 1237 deletions(-) create mode 100644 primitives/weights/Cargo.toml create mode 100644 primitives/weights/src/lib.rs rename {frame/support/src/weights => primitives/weights/src}/weight_v2.rs (68%) diff --git a/Cargo.lock b/Cargo.lock index efc1aa2976cdd..34d2296e019ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2293,6 +2293,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", + "sp-weights", "tt-call", ] @@ -2390,6 +2391,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-version", + "sp-weights", "substrate-test-runtime-client", ] @@ -10010,6 +10012,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", + "sp-weights", "substrate-test-runtime-client", "zstd", ] @@ -10315,6 +10318,21 @@ dependencies = [ "wasmtime", ] +[[package]] +name = "sp-weights" +version = "4.0.0" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "scale-info", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-debug-derive", + "sp-std", +] + [[package]] name = "spin" version = "0.5.2" diff --git a/Cargo.toml b/Cargo.toml index 9c71dbdf4b52c..4dbf65dc7e1fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -212,6 +212,7 @@ members = [ "primitives/version", "primitives/version/proc-macro", "primitives/wasm-interface", + "primitives/weights", "test-utils/client", "test-utils/derive", "test-utils/runtime", diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 33f71568ec6c5..99a9b83596acf 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -17,8 +17,9 @@ use codec::{Decode, Encode, Joiner}; use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo}, traits::Currency, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, + weights::Weight, }; use frame_system::{self, AccountInfo, EventRecord, Phase}; use sp_core::{storage::well_known_keys, traits::Externalities}; diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index ae1bfa371469c..6932cb2cea867 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -17,8 +17,9 @@ use codec::{Encode, Joiner}; use frame_support::{ + dispatch::GetDispatchInfo, traits::Currency, - weights::{constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFee}, + weights::{constants::ExtrinsicBaseWeight, IdentityFee, WeightToFee}, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 3509592ebf04a..fb2f3cec65290 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -129,7 +129,10 @@ mod multiplier_tests { AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment, }; - use frame_support::weights::{DispatchClass, Weight, WeightToFee}; + use frame_support::{ + dispatch::DispatchClass, + weights::{Weight, WeightToFee}, + }; fn max_normal() -> Weight { BlockWeights::get() diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 966a2e17ea475..2f0efcaa56740 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -28,6 +28,7 @@ use frame_election_provider_support::{ }; use frame_support::{ construct_runtime, + dispatch::DispatchClass, pallet_prelude::Get, parameter_types, traits::{ @@ -37,7 +38,7 @@ use frame_support::{ }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - ConstantMultiplier, DispatchClass, IdentityFee, Weight, + ConstantMultiplier, IdentityFee, Weight, }, PalletId, RuntimeDebug, }; diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 9a99d8ed995ab..eadaa036332fa 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -23,13 +23,13 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResultWithPostInfo, + dispatch::{DispatchResultWithPostInfo, Pays}, ensure, traits::{ ConstU32, DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler, }, - weights::{Pays, Weight}, + weights::Weight, BoundedVec, WeakBoundedVec, }; use sp_application_crypto::ByteArray; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 065105eda1c32..84e55ed5d050b 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -20,8 +20,8 @@ use super::{Call, *}; use frame_support::{ assert_err, assert_noop, assert_ok, + dispatch::{GetDispatchInfo, Pays}, traits::{Currency, EstimateNextSessionRotation, OnFinalize}, - weights::{GetDispatchInfo, Pays}, }; use mock::*; use pallet_session::ShouldEndSession; diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index fa1d2db8b1e49..1276a812c7861 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -21,9 +21,10 @@ use crate::{self as pallet_balances, decl_tests, Config, Pallet}; use frame_support::{ + dispatch::DispatchInfo, parameter_types, traits::{ConstU32, ConstU64, ConstU8}, - weights::{DispatchInfo, IdentityFee, Weight}, + weights::{IdentityFee, Weight}, }; use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 3301d90027682..b391b03ed6b6a 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -21,9 +21,10 @@ use crate::{self as pallet_balances, decl_tests, Config, Pallet}; use frame_support::{ + dispatch::DispatchInfo, parameter_types, traits::{ConstU32, ConstU64, ConstU8, StorageMapShim}, - weights::{DispatchInfo, IdentityFee, Weight}, + weights::{IdentityFee, Weight}, }; use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 7944f826255ec..49da641e3e204 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -49,12 +49,15 @@ use sp_std::{marker::PhantomData, prelude::*, result}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, - dispatch::{DispatchError, DispatchResultWithPostInfo, Dispatchable, PostDispatchInfo}, + dispatch::{ + DispatchError, DispatchResultWithPostInfo, Dispatchable, GetDispatchInfo, Pays, + PostDispatchInfo, + }, ensure, traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::{GetDispatchInfo, Pays, Weight}, + weights::Weight, }; #[cfg(test)] diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index b063cc21bd426..13b9a24f7889d 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -18,9 +18,10 @@ use super::{Event as CollectiveEvent, *}; use crate as pallet_collective; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, + dispatch::Pays, + parameter_types, traits::{ConstU32, ConstU64, GenesisBuild, StorageVersion}, - weights::Pays, Hashable, }; use frame_system::{EventRecord, Phase}; diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9890bd840cc3a..48f1668440387 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -107,10 +107,10 @@ use crate::{ }; use codec::{Encode, HasCompact}; use frame_support::{ - dispatch::Dispatchable, + dispatch::{DispatchClass, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, - weights::{DispatchClass, GetDispatchInfo, Pays, PostDispatchInfo, Weight}, + weights::Weight, BoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 1193d17676e1d..c40968eb2ea57 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -32,14 +32,14 @@ use assert_matches::assert_matches; use codec::Encode; use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_noop, assert_ok, - dispatch::DispatchErrorWithPostInfo, + dispatch::{DispatchClass, DispatchErrorWithPostInfo, PostDispatchInfo}, parameter_types, storage::child, traits::{ BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, OnIdle, OnInitialize, ReservableCurrency, }, - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, PostDispatchInfo, Weight}, + weights::{constants::WEIGHT_PER_SECOND, Weight}, }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 5b94611843169..7caa7c5c6fa1f 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -2314,7 +2314,7 @@ pub mod env { call_ptr: u32, call_len: u32, ) -> Result { - use frame_support::{dispatch::GetDispatchInfo, weights::extract_actual_weight}; + use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(call_ptr, call_len)?; diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index f0378ea7dc83f..1188e3353bc96 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -234,9 +234,10 @@ use frame_election_provider_support::{ ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, }; use frame_support::{ + dispatch::DispatchClass, ensure, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, - weights::{DispatchClass, Weight}, + weights::Weight, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; use scale_info::TypeInfo; diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index c899e021f974a..05bbd11e1ae9d 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -22,7 +22,7 @@ use crate::{ Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolver, WeightInfo, }; -use frame_support::{traits::Get, weights::DispatchClass}; +use frame_support::{dispatch::DispatchClass, traits::Get}; use sp_npos_elections::*; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index 756333b197090..caa2d825262db 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -273,9 +273,9 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResult, + dispatch::{ClassifyDispatch, DispatchClass, DispatchResult, Pays, PaysFee, WeighData}, traits::IsSubType, - weights::{ClassifyDispatch, DispatchClass, Pays, PaysFee, WeighData, Weight}, + weights::Weight, }; use frame_system::ensure_signed; use log::info; diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 78cdb3794f8d3..793cb4287f505 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -19,9 +19,10 @@ use crate::*; use frame_support::{ - assert_ok, parameter_types, + assert_ok, + dispatch::{DispatchInfo, GetDispatchInfo}, + parameter_types, traits::{ConstU64, OnInitialize}, - weights::{DispatchInfo, GetDispatchInfo}, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 7b71224904378..96e71512e54bc 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -118,12 +118,12 @@ use codec::{Codec, Encode}; use frame_support::{ - dispatch::PostDispatchInfo, + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, traits::{ EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, + weights::Weight, }; use sp_runtime::{ generic::Digest, diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 1fcf5c67150b5..fe5b9861853bf 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -40,11 +40,11 @@ use fg_primitives::{ GRANDPA_ENGINE_ID, }; use frame_support::{ - dispatch::DispatchResultWithPostInfo, + dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::Get, storage, traits::{KeyOwnerProofSystem, OneSessionHandler}, - weights::{Pays, Weight}, + weights::Weight, WeakBoundedVec, }; use scale_info::TypeInfo; diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 07c4fcad3078d..baf71dfff71dc 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -25,8 +25,8 @@ use codec::Encode; use fg_primitives::ScheduledChange; use frame_support::{ assert_err, assert_noop, assert_ok, + dispatch::{GetDispatchInfo, Pays}, traits::{Currency, OnFinalize, OneSessionHandler}, - weights::{GetDispatchInfo, Pays}, }; use frame_system::{EventRecord, Phase}; use sp_core::H256; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 4593d6dade690..0c7e931e5ea2e 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -53,11 +53,12 @@ pub mod weights; use codec::{Decode, Encode}; use frame_support::{ dispatch::{ - DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo, + DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, + PostDispatchInfo, }, ensure, traits::{Currency, Get, ReservableCurrency, WrapperKeepOpaque}, - weights::{GetDispatchInfo, Weight}, + weights::Weight, RuntimeDebug, }; use frame_system::{self as system, RawOrigin}; diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index e4616078b0761..749e5924cc885 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -41,10 +41,10 @@ use sp_std::prelude::*; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ + dispatch::Pays, ensure, pallet_prelude::Get, traits::{Currency, PreimageProvider, PreimageRecipient, ReservableCurrency}, - weights::Pays, BoundedVec, }; use scale_info::TypeInfo; diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index 7467fb0988dd6..dcc19e85085f8 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -35,10 +35,9 @@ pub mod weights; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::DispatchError, + dispatch::{DispatchError, GetDispatchInfo}, ensure, traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, - weights::GetDispatchInfo, RuntimeDebug, }; use frame_system::{self as system}; diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 7f212b3859776..44b942d000012 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -52,10 +52,9 @@ use sp_std::{marker::PhantomData, prelude::*}; use frame_support::{ codec::{Decode, Encode, MaxEncodedLen}, - dispatch::{DispatchError, DispatchResultWithPostInfo}, + dispatch::{DispatchError, DispatchResultWithPostInfo, PostDispatchInfo}, ensure, traits::{EnsureOrigin, PollStatus, Polling, VoteTally}, - weights::PostDispatchInfo, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index dd9e7208320c4..7b85c119e68c5 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -160,9 +160,8 @@ use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversi use sp_std::prelude::*; use frame_support::{ - dispatch::PostDispatchInfo, + dispatch::{GetDispatchInfo, PostDispatchInfo}, traits::{BalanceStatus, Currency, ReservableCurrency}, - weights::GetDispatchInfo, BoundedVec, RuntimeDebug, }; diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 39dbac17cedd2..7e9b8e1b92fb7 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -60,12 +60,12 @@ pub mod weights; use codec::{Codec, Decode, Encode}; use frame_support::{ - dispatch::{DispatchError, DispatchResult, Dispatchable, Parameter}, + dispatch::{DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter}, traits::{ schedule::{self, DispatchTime, MaybeHashed}, EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, PrivilegeCmp, StorageVersion, }, - weights::{GetDispatchInfo, Weight}, + weights::Weight, }; use frame_system::{self as system, ensure_signed}; pub use pallet::*; diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 386c413132f6c..13038527bc2f8 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -22,12 +22,13 @@ use frame_election_provider_support::{ Supports, VoteWeight, VoterOf, }; use frame_support::{ + dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ Currency, CurrencyToVote, Defensive, EstimateNextNewSession, Get, Imbalance, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, - weights::{Weight, WithPostDispatchInfo}, + weights::Weight, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 9807cffd43909..85badfac769af 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -21,10 +21,9 @@ use super::{ConfigOp, Event, MaxUnlockingChunks, *}; use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, - dispatch::WithPostDispatchInfo, + dispatch::{extract_actual_weight, GetDispatchInfo, WithPostDispatchInfo}, pallet_prelude::*, traits::{Currency, Get, ReservableCurrency}, - weights::{extract_actual_weight, GetDispatchInfo}, }; use mock::*; use pallet_balances::Error as BalancesError; diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 96cd2d1615449..69eac2c1b93c4 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -95,7 +95,7 @@ use sp_runtime::{traits::StaticLookup, DispatchResult}; use sp_std::prelude::*; -use frame_support::{traits::UnfilteredDispatchable, weights::GetDispatchInfo}; +use frame_support::{dispatch::GetDispatchInfo, traits::UnfilteredDispatchable}; #[cfg(test)] mod mock; diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 6f0831b751b45..d0a40a09f2211 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -26,6 +26,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive sp-arithmetic = { version = "5.0.0", default-features = false, path = "../../primitives/arithmetic" } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } tt-call = "1.0.8" frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } paste = "1.0" @@ -62,6 +63,7 @@ std = [ "sp-inherents/std", "sp-staking/std", "sp-state-machine", + "sp-weights/std", "frame-support-procedural/std", "log/std", ] diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index b415132ab0426..101f0c371382f 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -123,6 +123,9 @@ pub fn expand_outer_dispatch( } } } + // Deprecated, but will warn when used + #[allow(deprecated)] + impl #scrate::weights::GetDispatchInfo for RuntimeCall {} impl #scrate::dispatch::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::dispatch::CallMetadata { use #scrate::dispatch::GetCallName; @@ -161,8 +164,8 @@ pub fn expand_outer_dispatch( impl #scrate::dispatch::Dispatchable for RuntimeCall { type Origin = Origin; type Config = RuntimeCall; - type Info = #scrate::weights::DispatchInfo; - type PostInfo = #scrate::weights::PostDispatchInfo; + type Info = #scrate::dispatch::DispatchInfo; + type PostInfo = #scrate::dispatch::PostDispatchInfo; fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { if !::filter_call(&origin, &self) { return #scrate::sp_std::result::Result::Err( diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 8b2922b5fa4f2..d5c84fa37510d 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -255,6 +255,10 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + // Deprecated, but will warn when used + #[allow(deprecated)] + impl<#type_impl_gen> #frame_support::weights::GetDispatchInfo for #call_ident<#type_use_gen> #where_clause {} + impl<#type_impl_gen> #frame_support::dispatch::GetCallName for #call_ident<#type_use_gen> #where_clause { diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 9a6654d5524d4..1d8930f96fbe5 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -31,18 +31,22 @@ pub use crate::{ traits::{ CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, UnfilteredDispatchable, }, - weights::{ - ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, - TransactionPriority, WeighData, Weight, WithPostDispatchInfo, - }, }; -pub use sp_runtime::{traits::Dispatchable, DispatchError, RuntimeDebug}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_runtime::{ + generic::{CheckedExtrinsic, UncheckedExtrinsic}, + traits::SignedExtension, +}; +pub use sp_runtime::{ + traits::Dispatchable, transaction_validity::TransactionPriority, DispatchError, RuntimeDebug, +}; +pub use sp_weights::Weight; /// The return type of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` /// returned from a dispatch. -pub type DispatchResultWithPostInfo = - sp_runtime::DispatchResultWithInfo; +pub type DispatchResultWithPostInfo = sp_runtime::DispatchResultWithInfo; /// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from /// dispatchable functions and is automatically converted to the augmented type. Should be @@ -51,8 +55,7 @@ pub type DispatchResultWithPostInfo = pub type DispatchResult = Result<(), sp_runtime::DispatchError>; /// The error type contained in a `DispatchResultWithPostInfo`. -pub type DispatchErrorWithPostInfo = - sp_runtime::DispatchErrorWithPostInfo; +pub type DispatchErrorWithPostInfo = sp_runtime::DispatchErrorWithPostInfo; /// Serializable version of pallet dispatchable. pub trait Callable { @@ -91,6 +94,552 @@ impl From> for RawOrigin { pub trait Parameter: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + scale_info::TypeInfo {} +/// Means of classifying a dispatchable function. +pub trait ClassifyDispatch { + /// Classify the dispatch function based on input data `target` of type `T`. When implementing + /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except + /// origin). + fn classify_dispatch(&self, target: T) -> DispatchClass; +} + +/// Indicates if dispatch function should pay fees or not. +/// +/// If set to `Pays::No`, the block resource limits are applied, yet no fee is deducted. +pub trait PaysFee { + fn pays_fee(&self, _target: T) -> Pays; +} + +/// Explicit enum to denote if a transaction pays fee or not. +#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)] +pub enum Pays { + /// Transactor will pay related fees. + Yes, + /// Transactor will NOT pay related fees. + No, +} + +impl Default for Pays { + fn default() -> Self { + Self::Yes + } +} + +impl From for PostDispatchInfo { + fn from(pays_fee: Pays) -> Self { + Self { actual_weight: None, pays_fee } + } +} + +/// A generalized group of dispatch types. +/// +/// NOTE whenever upgrading the enum make sure to also update +/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum DispatchClass { + /// A normal dispatch. + Normal, + /// An operational dispatch. + Operational, + /// A mandatory dispatch. These kinds of dispatch are always included regardless of their + /// weight, therefore it is critical that they are separately validated to ensure that a + /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just + /// means ensuring that the extrinsic can only be included once and that it is always very + /// light. + /// + /// Do *NOT* use it for extrinsics that can be heavy. + /// + /// The only real use case for this is inherent extrinsics that are required to execute in a + /// block for the block to be valid, and it solves the issue in the case that the block + /// initialization is sufficiently heavy to mean that those inherents do not fit into the + /// block. Essentially, we assume that in these exceptional circumstances, it is better to + /// allow an overweight block to be created than to not allow any block at all to be created. + Mandatory, +} + +impl Default for DispatchClass { + fn default() -> Self { + Self::Normal + } +} + +impl DispatchClass { + /// Returns an array containing all dispatch classes. + pub fn all() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] + } + + /// Returns an array of all dispatch classes except `Mandatory`. + pub fn non_mandatory() -> &'static [DispatchClass] { + &[DispatchClass::Normal, DispatchClass::Operational] + } +} + +/// A trait that represents one or many values of given type. +/// +/// Useful to accept as parameter type to let the caller pass either a single value directly +/// or an iterator. +pub trait OneOrMany { + /// The iterator type. + type Iter: Iterator; + /// Convert this item into an iterator. + fn into_iter(self) -> Self::Iter; +} + +impl OneOrMany for DispatchClass { + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { + sp_std::iter::once(self) + } +} + +impl<'a> OneOrMany for &'a [DispatchClass] { + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { + self.iter().cloned() + } +} + +/// A bundle of static information collected from the `#[pallet::weight]` attributes. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct DispatchInfo { + /// Weight of this transaction. + pub weight: Weight, + /// Class of this transaction. + pub class: DispatchClass, + /// Does this transaction pay fees. + pub pays_fee: Pays, +} + +/// A `Dispatchable` function (aka transaction) that can carry some static information along with +/// it, using the `#[pallet::weight]` attribute. +pub trait GetDispatchInfo { + /// Return a `DispatchInfo`, containing relevant information of this dispatch. + /// + /// This is done independently of its encoded size. + fn get_dispatch_info(&self) -> DispatchInfo; +} + +impl GetDispatchInfo for () { + fn get_dispatch_info(&self) -> DispatchInfo { + DispatchInfo::default() + } +} + +/// Extract the actual weight from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Weight { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, + } + .calc_actual_weight(info) +} + +/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. +pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { + match result { + Ok(post_info) => post_info, + Err(err) => &err.post_info, + } + .pays_fee(info) +} + +/// Weight information that is only available post dispatch. +/// NOTE: This can only be used to reduce the weight or fee, not increase it. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct PostDispatchInfo { + /// Actual weight consumed by a call or `None` which stands for the worst case static weight. + pub actual_weight: Option, + /// Whether this transaction should pay fees when all is said and done. + pub pays_fee: Pays, +} + +impl PostDispatchInfo { + /// Calculate how much (if any) weight was not used by the `Dispatchable`. + pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { + info.weight - self.calc_actual_weight(info) + } + + /// Calculate how much weight was actually spent by the `Dispatchable`. + pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { + if let Some(actual_weight) = self.actual_weight { + actual_weight.min(info.weight) + } else { + info.weight + } + } + + /// Determine if user should actually pay fees at the end of the dispatch. + pub fn pays_fee(&self, info: &DispatchInfo) -> Pays { + // If they originally were not paying fees, or the post dispatch info + // says they should not pay fees, then they don't pay fees. + // This is because the pre dispatch information must contain the + // worst case for weight and fees paid. + if info.pays_fee == Pays::No || self.pays_fee == Pays::No { + Pays::No + } else { + // Otherwise they pay. + Pays::Yes + } + } +} + +impl From<()> for PostDispatchInfo { + fn from(_: ()) -> Self { + Self { actual_weight: None, pays_fee: Default::default() } + } +} + +impl sp_runtime::traits::Printable for PostDispatchInfo { + fn print(&self) { + "actual_weight=".print(); + match self.actual_weight { + Some(weight) => weight.print(), + None => "max-weight".print(), + }; + "pays_fee=".print(); + match self.pays_fee { + Pays::Yes => "Yes".print(), + Pays::No => "No".print(), + } + } +} + +/// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables +/// that want to return a custom a posterior weight on error. +pub trait WithPostDispatchInfo { + /// Call this on your modules custom errors type in order to return a custom weight on error. + /// + /// # Example + /// + /// ```ignore + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100)))?; + /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); + /// ``` + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; +} + +impl WithPostDispatchInfo for T +where + T: Into, +{ + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(actual_weight), + pays_fee: Default::default(), + }, + error: self.into(), + } + } +} + +/// Implementation for unchecked extrinsic. +impl GetDispatchInfo + for UncheckedExtrinsic +where + Call: GetDispatchInfo, + Extra: SignedExtension, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } +} + +/// Implementation for checked extrinsic. +impl GetDispatchInfo for CheckedExtrinsic +where + Call: GetDispatchInfo, +{ + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } +} + +/// Implementation for test extrinsic. +#[cfg(feature = "std")] +impl GetDispatchInfo for sp_runtime::testing::TestXt { + fn get_dispatch_info(&self) -> DispatchInfo { + // for testing: weight == size. + DispatchInfo { + weight: Weight::from_ref_time(self.encode().len() as _), + pays_fee: Pays::Yes, + ..Default::default() + } + } +} + +/// A struct holding value for each `DispatchClass`. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] +pub struct PerDispatchClass { + /// Value for `Normal` extrinsics. + normal: T, + /// Value for `Operational` extrinsics. + operational: T, + /// Value for `Mandatory` extrinsics. + mandatory: T, +} + +impl PerDispatchClass { + /// Create new `PerDispatchClass` with the same value for every class. + pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { + Self { + normal: val(DispatchClass::Normal), + operational: val(DispatchClass::Operational), + mandatory: val(DispatchClass::Mandatory), + } + } + + /// Get a mutable reference to current value of given class. + pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { + match class { + DispatchClass::Operational => &mut self.operational, + DispatchClass::Normal => &mut self.normal, + DispatchClass::Mandatory => &mut self.mandatory, + } + } + + /// Get current value for given class. + pub fn get(&self, class: DispatchClass) -> &T { + match class { + DispatchClass::Normal => &self.normal, + DispatchClass::Operational => &self.operational, + DispatchClass::Mandatory => &self.mandatory, + } + } +} + +impl PerDispatchClass { + /// Set the value of given class. + pub fn set(&mut self, new: T, class: impl OneOrMany) { + for class in class.into_iter() { + *self.get_mut(class) = new.clone(); + } + } +} + +impl PerDispatchClass { + /// Returns the total weight consumed by all extrinsics in the block. + pub fn total(&self) -> Weight { + let mut sum = Weight::zero(); + for class in DispatchClass::all() { + sum = sum.saturating_add(*self.get(*class)); + } + sum + } + + /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. + pub fn add(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_add(weight); + } + + /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would + /// occur. + pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { + let value = self.get_mut(class); + *value = value.checked_add(&weight).ok_or(())?; + Ok(()) + } + + /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of + /// `Weight`. + pub fn sub(&mut self, weight: Weight, class: DispatchClass) { + let value = self.get_mut(class); + *value = value.saturating_sub(weight); + } +} + +/// Means of weighing some particular kind of data (`T`). +pub trait WeighData { + /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be + /// a tuple of all arguments given to the function (except origin). + fn weigh_data(&self, target: T) -> Weight; +} + +impl WeighData for Weight { + fn weigh_data(&self, _: T) -> Weight { + return *self + } +} + +impl PaysFee for (Weight, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (Weight, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl WeighData for (Weight, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (Weight, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (Weight, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (Weight, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (Weight, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (actual_weight, pays_fee) = post_weight_info; + Self { actual_weight, pays_fee } + } +} + +impl From> for PostDispatchInfo { + fn from(actual_weight: Option) -> Self { + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl ClassifyDispatch for Weight { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for Weight { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl ClassifyDispatch for (Weight, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +// TODO: Eventually remove these + +impl From> for PostDispatchInfo { + fn from(maybe_actual_computation: Option) -> Self { + let actual_weight = match maybe_actual_computation { + Some(actual_computation) => Some(Weight::zero().set_ref_time(actual_computation)), + None => None, + }; + Self { actual_weight, pays_fee: Default::default() } + } +} + +impl From<(Option, Pays)> for PostDispatchInfo { + fn from(post_weight_info: (Option, Pays)) -> Self { + let (maybe_actual_time, pays_fee) = post_weight_info; + let actual_weight = match maybe_actual_time { + Some(actual_time) => Some(Weight::zero().set_ref_time(actual_time)), + None => None, + }; + Self { actual_weight, pays_fee } + } +} + +impl ClassifyDispatch for u64 { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for u64 { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for u64 { + fn weigh_data(&self, _: T) -> Weight { + return Weight::zero().set_ref_time(*self) + } +} + +impl WeighData for (u64, DispatchClass, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (u64, DispatchClass, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (u64, DispatchClass, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.2 + } +} + +impl WeighData for (u64, DispatchClass) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (u64, DispatchClass) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + self.1 + } +} + +impl PaysFee for (u64, DispatchClass) { + fn pays_fee(&self, _: T) -> Pays { + Pays::Yes + } +} + +impl WeighData for (u64, Pays) { + fn weigh_data(&self, args: T) -> Weight { + return self.0.weigh_data(args) + } +} + +impl ClassifyDispatch for (u64, Pays) { + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::Normal + } +} + +impl PaysFee for (u64, Pays) { + fn pays_fee(&self, _: T) -> Pays { + self.1 + } +} + +// END TODO + /// Declares a `Module` struct and a `Call` enum, which implements the dispatch logic. /// /// ## Declaration @@ -2629,13 +3178,14 @@ macro_rules! __check_reserved_fn_name { mod tests { use super::*; use crate::{ + dispatch::{DispatchClass, DispatchInfo, Pays}, metadata::*, traits::{ CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, PalletInfo, }, - weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; + use sp_weights::RuntimeDbWeight; pub trait Config: system::Config + Sized where @@ -2940,3 +3490,187 @@ mod tests { Call::::new_call_variant_aux_0(); } } + +#[cfg(test)] +// Do not complain about unused `dispatch` and `dispatch_aux`. +#[allow(dead_code)] +mod weight_tests { + use super::*; + use sp_core::{parameter_types, Get}; + use sp_weights::RuntimeDbWeight; + + pub trait Config: 'static { + type Origin; + type Balance; + type BlockNumber; + type DbWeight: Get; + type PalletInfo: crate::traits::PalletInfo; + } + + pub struct TraitImpl {} + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + impl Config for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + type Balance = u32; + type DbWeight = DbWeight; + type PalletInfo = crate::tests::PanicPalletInfo; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin, system=self { + // no arguments, fixed weight + #[weight = 1000] + fn f00(_origin) { unimplemented!(); } + + #[weight = (1000, DispatchClass::Mandatory)] + fn f01(_origin) { unimplemented!(); } + + #[weight = (1000, Pays::No)] + fn f02(_origin) { unimplemented!(); } + + #[weight = (1000, DispatchClass::Operational, Pays::No)] + fn f03(_origin) { unimplemented!(); } + + // weight = a x 10 + b + #[weight = ((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes)] + fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = (0, DispatchClass::Operational, Pays::Yes)] + fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_ref_time(10_000)] + fn f20(_origin) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_ref_time(40_000)] + fn f21(_origin) { unimplemented!(); } + + } + } + + #[test] + fn weights_are_correct() { + // #[weight = 1000] + let info = Call::::f00 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(1000)); + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (1000, DispatchClass::Mandatory)] + let info = Call::::f01 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(1000)); + assert_eq!(info.class, DispatchClass::Mandatory); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (1000, Pays::No)] + let info = Call::::f02 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(1000)); + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::No); + + // #[weight = (1000, DispatchClass::Operational, Pays::No)] + let info = Call::::f03 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(1000)); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::No); + + // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] + let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(150)); // 13*10 + 20 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = (0, DispatchClass::Operational, Pays::Yes)] + let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(0)); + assert_eq!(info.class, DispatchClass::Operational); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + let info = Call::::f20 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(12300)); // 100*3 + 1000*2 + 10_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + + // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + let info = Call::::f21 {}.get_dispatch_info(); + assert_eq!(info.weight, Weight::from_ref_time(45600)); // 100*6 + 1000*5 + 40_1000 + assert_eq!(info.class, DispatchClass::Normal); + assert_eq!(info.pays_fee, Pays::Yes); + } + + #[test] + fn extract_actual_weight_works() { + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_ref_time(7)); + assert_eq!( + extract_actual_weight(&Ok(Some(1000).into()), &pre), + Weight::from_ref_time(1000) + ); + assert_eq!( + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), + Weight::from_ref_time(9) + ); + } + + #[test] + fn extract_actual_weight_caps_at_pre_weight() { + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!( + extract_actual_weight(&Ok(Some(1250).into()), &pre), + Weight::from_ref_time(1000) + ); + assert_eq!( + extract_actual_weight( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(1300))), + &pre + ), + Weight::from_ref_time(1000), + ); + } + + #[test] + fn extract_actual_pays_fee_works() { + let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), + &pre + ), + Pays::Yes + ); + assert_eq!( + extract_actual_pays_fee( + &Err(DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, + error: DispatchError::BadOrigin, + }), + &pre + ), + Pays::No + ); + + let pre = DispatchInfo { + weight: Weight::from_ref_time(1000), + pays_fee: Pays::No, + ..Default::default() + }; + assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); + assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); + } +} diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 315d856e5d0f8..8dcc51226923b 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1376,7 +1376,10 @@ pub mod pallet_prelude { #[cfg(feature = "std")] pub use crate::traits::GenesisBuild; pub use crate::{ - dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter}, + dispatch::{ + DispatchClass, DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter, + Pays, + }, ensure, inherent::{InherentData, InherentIdentifier, ProvideInherent}, storage, @@ -1391,7 +1394,6 @@ pub mod pallet_prelude { ConstU32, EnsureOrigin, Get, GetDefault, GetStorageVersion, Hooks, IsType, PalletInfoAccess, StorageInfoTrait, StorageVersion, TypedGet, }, - weights::{DispatchClass, Pays, Weight}, Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; @@ -1407,6 +1409,7 @@ pub mod pallet_prelude { MAX_MODULE_ERROR_ENCODED_SIZE, }; pub use sp_std::marker::PhantomData; + pub use sp_weights::Weight; } /// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 59d5ec067baed..7fc4a6fb08a5a 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -711,13 +711,13 @@ pub trait EstimateCallFee { /// /// The dispatch info and the length is deduced from the call. The post info can optionally be /// provided. - fn estimate_call_fee(call: &Call, post_info: crate::weights::PostDispatchInfo) -> Balance; + fn estimate_call_fee(call: &Call, post_info: crate::dispatch::PostDispatchInfo) -> Balance; } // Useful for building mocks. #[cfg(feature = "std")] impl, const T: u32> EstimateCallFee for ConstU32 { - fn estimate_call_fee(_: &Call, _: crate::weights::PostDispatchInfo) -> Balance { + fn estimate_call_fee(_: &Call, _: crate::dispatch::PostDispatchInfo) -> Balance { T.into() } } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index b4fd896e865dc..9ff49b97bf21f 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -15,102 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Primitives for transaction weighting. -//! -//! Every dispatchable function is responsible for providing `#[weight = $x]` attribute. In this -//! snipped, `$x` can be any user provided struct that implements the following traits: -//! -//! - [`WeighData`]: the weight amount. -//! - [`ClassifyDispatch`]: class of the dispatch. -//! - [`PaysFee`]: whether this weight should be translated to fee and deducted upon dispatch. -//! -//! Substrate then bundles the output information of the three traits into [`DispatchInfo`] struct -//! and provides it by implementing the [`GetDispatchInfo`] for all `Call` both inner and outer call -//! types. -//! -//! Substrate provides two pre-defined ways to annotate weight: -//! -//! ### 1. Fixed values -//! -//! This can only be used when all 3 traits can be resolved statically. You have 3 degrees of -//! configuration: -//! -//! 1. Define only weight, **in which case `ClassifyDispatch` will be `Normal` and `PaysFee` will be -//! `Yes`**. -//! -//! ``` -//! # use frame_system::Config; -//! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 1000] -//! fn dispatching(origin) { unimplemented!() } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! 2.1 Define weight and class, **in which case `PaysFee` would be `Yes`**. -//! -//! ``` -//! # use frame_system::Config; -//! # use frame_support::weights::DispatchClass; -//! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = (1000, DispatchClass::Operational)] -//! fn dispatching(origin) { unimplemented!() } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! 2.2 Define weight and `PaysFee`, **in which case `ClassifyDispatch` would be `Normal`**. -//! -//! ``` -//! # use frame_system::Config; -//! # use frame_support::weights::Pays; -//! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = (1000, Pays::No)] -//! fn dispatching(origin) { unimplemented!() } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! 3. Define all 3 parameters. -//! -//! ``` -//! # use frame_system::Config; -//! # use frame_support::weights::{DispatchClass, Pays}; -//! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = (1000, DispatchClass::Operational, Pays::No)] -//! fn dispatching(origin) { unimplemented!() } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! ### 2. Define weights as a function of input arguments. -//! -//! The arguments of the dispatch are available in the weight expressions as a borrowed value. -//! -//! ``` -//! # use frame_system::Config; -//! # use frame_support::weights::{DispatchClass, Pays}; -//! frame_support::decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = ( -//! *a as u64 + *b, -//! DispatchClass::Operational, -//! if *a > 1000 { Pays::Yes } else { Pays::No } -//! )] -//! fn dispatching(origin, a: u32, b: u64) { unimplemented!() } -//! } -//! } -//! # fn main() {} -//! ``` -//! FRAME assumes a weight of `1_000_000_000_000` equals 1 second of compute on a standard machine. +//! Re-exports `sp-weights` public API, and contains benchmarked weight constants specific to +//! FRAME. //! //! Latest machine specification used to benchmark are: //! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 @@ -123,41 +29,14 @@ mod block_weights; mod extrinsic_weights; mod paritydb_weights; mod rocksdb_weights; -mod weight_v2; - -use crate::{ - dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}, - traits::Get, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; -use sp_arithmetic::{ - traits::{BaseArithmetic, Saturating, Unsigned}, - Perbill, -}; -use sp_runtime::{ - generic::{CheckedExtrinsic, UncheckedExtrinsic}, - traits::{SaturatedConversion, SignedExtension}, - RuntimeDebug, -}; - -/// Re-export priority as type -pub use sp_runtime::transaction_validity::TransactionPriority; -pub use weight_v2::*; +use crate::dispatch; +pub use sp_weights::*; /// These constants are specific to FRAME, and the current implementation of its various components. /// For example: FRAME System, FRAME Executive, our FRAME support libraries, etc... pub mod constants { - use super::Weight; - - pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); - pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); - pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); - pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); + pub use sp_weights::constants::*; // Expose the Block and Extrinsic base weights. pub use super::{block_weights::BlockExecutionWeight, extrinsic_weights::ExtrinsicBaseWeight}; @@ -168,786 +47,69 @@ pub mod constants { }; } -/// Means of weighing some particular kind of data (`T`). -pub trait WeighData { - /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be - /// a tuple of all arguments given to the function (except origin). - fn weigh_data(&self, target: T) -> Weight; -} - -/// Means of classifying a dispatchable function. -pub trait ClassifyDispatch { - /// Classify the dispatch function based on input data `target` of type `T`. When implementing - /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except - /// origin). - fn classify_dispatch(&self, target: T) -> DispatchClass; -} - -/// Indicates if dispatch function should pay fees or not. -/// If set to `Pays::No`, the block resource limits are applied, yet no fee is deducted. -pub trait PaysFee { - fn pays_fee(&self, _target: T) -> Pays; -} - -/// Explicit enum to denote if a transaction pays fee or not. -#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode, TypeInfo)] -pub enum Pays { - /// Transactor will pay related fees. - Yes, - /// Transactor will NOT pay related fees. - No, -} - -impl Default for Pays { - fn default() -> Self { - Self::Yes - } -} - -impl From for PostDispatchInfo { - fn from(pays_fee: Pays) -> Self { - Self { actual_weight: None, pays_fee } - } -} - -/// A generalized group of dispatch types. -/// -/// NOTE whenever upgrading the enum make sure to also update -/// [DispatchClass::all] and [DispatchClass::non_mandatory] helper functions. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum DispatchClass { - /// A normal dispatch. - Normal, - /// An operational dispatch. - Operational, - /// A mandatory dispatch. These kinds of dispatch are always included regardless of their - /// weight, therefore it is critical that they are separately validated to ensure that a - /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just - /// means ensuring that the extrinsic can only be included once and that it is always very - /// light. - /// - /// Do *NOT* use it for extrinsics that can be heavy. - /// - /// The only real use case for this is inherent extrinsics that are required to execute in a - /// block for the block to be valid, and it solves the issue in the case that the block - /// initialization is sufficiently heavy to mean that those inherents do not fit into the - /// block. Essentially, we assume that in these exceptional circumstances, it is better to - /// allow an overweight block to be created than to not allow any block at all to be created. - Mandatory, -} - -impl Default for DispatchClass { - fn default() -> Self { - Self::Normal - } -} - -impl DispatchClass { - /// Returns an array containing all dispatch classes. - pub fn all() -> &'static [DispatchClass] { - &[DispatchClass::Normal, DispatchClass::Operational, DispatchClass::Mandatory] - } - - /// Returns an array of all dispatch classes except `Mandatory`. - pub fn non_mandatory() -> &'static [DispatchClass] { - &[DispatchClass::Normal, DispatchClass::Operational] - } -} - -/// A trait that represents one or many values of given type. -/// -/// Useful to accept as parameter type to let the caller pass either a single value directly -/// or an iterator. -pub trait OneOrMany { - /// The iterator type. - type Iter: Iterator; - /// Convert this item into an iterator. - fn into_iter(self) -> Self::Iter; -} - -impl OneOrMany for DispatchClass { - type Iter = sp_std::iter::Once; - fn into_iter(self) -> Self::Iter { - sp_std::iter::once(self) - } -} - -impl<'a> OneOrMany for &'a [DispatchClass] { - type Iter = sp_std::iter::Cloned>; - fn into_iter(self) -> Self::Iter { - self.iter().cloned() - } -} - -/// A bundle of static information collected from the `#[weight = $x]` attributes. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct DispatchInfo { - /// Weight of this transaction. - pub weight: Weight, - /// Class of this transaction. - pub class: DispatchClass, - /// Does this transaction pay fees. - pub pays_fee: Pays, -} - -/// A `Dispatchable` function (aka transaction) that can carry some static information along with -/// it, using the `#[weight]` attribute. -pub trait GetDispatchInfo { - /// Return a `DispatchInfo`, containing relevant information of this dispatch. - /// - /// This is done independently of its encoded size. - fn get_dispatch_info(&self) -> DispatchInfo; -} - -impl GetDispatchInfo for () { - fn get_dispatch_info(&self) -> DispatchInfo { - DispatchInfo::default() - } -} - -/// Weight information that is only available post dispatch. -/// NOTE: This can only be used to reduce the weight or fee, not increase it. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct PostDispatchInfo { - /// Actual weight consumed by a call or `None` which stands for the worst case static weight. - pub actual_weight: Option, - /// Whether this transaction should pay fees when all is said and done. - pub pays_fee: Pays, -} - -impl PostDispatchInfo { - /// Calculate how much (if any) weight was not used by the `Dispatchable`. - pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { - info.weight - self.calc_actual_weight(info) - } - - /// Calculate how much weight was actually spent by the `Dispatchable`. - pub fn calc_actual_weight(&self, info: &DispatchInfo) -> Weight { - if let Some(actual_weight) = self.actual_weight { - actual_weight.min(info.weight) - } else { - info.weight - } - } - - /// Determine if user should actually pay fees at the end of the dispatch. - pub fn pays_fee(&self, info: &DispatchInfo) -> Pays { - // If they originally were not paying fees, or the post dispatch info - // says they should not pay fees, then they don't pay fees. - // This is because the pre dispatch information must contain the - // worst case for weight and fees paid. - if info.pays_fee == Pays::No || self.pays_fee == Pays::No { - Pays::No - } else { - // Otherwise they pay. - Pays::Yes - } - } -} - -/// Extract the actual weight from a dispatch result if any or fall back to the default weight. -pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Weight { - match result { - Ok(post_info) => post_info, - Err(err) => &err.post_info, - } - .calc_actual_weight(info) -} - -/// Extract the actual pays_fee from a dispatch result if any or fall back to the default weight. -pub fn extract_actual_pays_fee(result: &DispatchResultWithPostInfo, info: &DispatchInfo) -> Pays { - match result { - Ok(post_info) => post_info, - Err(err) => &err.post_info, - } - .pays_fee(info) -} - -impl From<()> for PostDispatchInfo { - fn from(_: ()) -> Self { - Self { actual_weight: None, pays_fee: Default::default() } - } -} - -impl sp_runtime::traits::Printable for PostDispatchInfo { - fn print(&self) { - "actual_weight=".print(); - match self.actual_weight { - Some(weight) => weight.print(), - None => "max-weight".print(), - }; - "pays_fee=".print(); - match self.pays_fee { - Pays::Yes => "Yes".print(), - Pays::No => "No".print(), - } - } -} - -/// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables -/// that want to return a custom a posterior weight on error. -pub trait WithPostDispatchInfo { - /// Call this on your modules custom errors type in order to return a custom weight on error. - /// - /// # Example - /// - /// ```ignore - /// let who = ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100)))?; - /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); - /// ``` - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; -} - -impl WithPostDispatchInfo for T -where - T: Into, -{ - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { - DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { - actual_weight: Some(actual_weight), - pays_fee: Default::default(), - }, - error: self.into(), - } - } -} - -/// Implementation for unchecked extrinsic. -impl GetDispatchInfo - for UncheckedExtrinsic -where - Call: GetDispatchInfo, - Extra: SignedExtension, -{ - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic -where - Call: GetDispatchInfo, -{ - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } -} - -/// Implementation for test extrinsic. -#[cfg(feature = "std")] -impl GetDispatchInfo for sp_runtime::testing::TestXt { - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: Weight::from_ref_time(self.encode().len() as _), - pays_fee: Pays::Yes, - ..Default::default() - } - } -} - -/// The weight of database operations that the runtime can invoke. -/// -/// NOTE: This is currently only measured in computational time, and will probably -/// be updated all together once proof size is accounted for. -#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct RuntimeDbWeight { - pub read: u64, - pub write: u64, -} - -impl RuntimeDbWeight { - pub fn reads(self, r: u64) -> Weight { - Weight::from_ref_time(self.read.saturating_mul(r)) - } - - pub fn writes(self, w: u64) -> Weight { - Weight::from_ref_time(self.write.saturating_mul(w)) - } - - pub fn reads_writes(self, r: u64, w: u64) -> Weight { - let read_weight = self.read.saturating_mul(r); - let write_weight = self.write.saturating_mul(w); - Weight::from_ref_time(read_weight.saturating_add(write_weight)) - } -} - -/// One coefficient and its position in the `WeightToFee`. -/// -/// One term of polynomial is calculated as: -/// -/// ```ignore -/// coeff_integer * x^(degree) + coeff_frac * x^(degree) -/// ``` -/// -/// The `negative` value encodes whether the term is added or substracted from the -/// overall polynomial result. -#[derive(Clone, Encode, Decode, TypeInfo)] -pub struct WeightToFeeCoefficient { - /// The integral part of the coefficient. - pub coeff_integer: Balance, - /// The fractional part of the coefficient. - pub coeff_frac: Perbill, - /// True iff the coefficient should be interpreted as negative. - pub negative: bool, - /// Degree/exponent of the term. - pub degree: u8, -} - -/// A list of coefficients that represent one polynomial. -pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; - -/// A trait that describes the weight to fee calculation. -pub trait WeightToFee { - /// The type that is returned as result from calculation. - type Balance: BaseArithmetic + From + Copy + Unsigned; - - /// Calculates the fee from the passed `weight`. - fn weight_to_fee(weight: &Weight) -> Self::Balance; -} - -/// A trait that describes the weight to fee calculation as polynomial. -/// -/// An implementor should only implement the `polynomial` function. -pub trait WeightToFeePolynomial { - /// The type that is returned as result from polynomial evaluation. - type Balance: BaseArithmetic + From + Copy + Unsigned; - - /// Returns a polynomial that describes the weight to fee conversion. - /// - /// This is the only function that should be manually implemented. Please note - /// that all calculation is done in the probably unsigned `Balance` type. This means - /// that the order of coefficients is important as putting the negative coefficients - /// first will most likely saturate the result to zero mid evaluation. - fn polynomial() -> WeightToFeeCoefficients; -} - -impl WeightToFee for T -where - T: WeightToFeePolynomial, -{ - type Balance = ::Balance; - - /// Calculates the fee from the passed `weight` according to the `polynomial`. - /// - /// This should not be overridden in most circumstances. Calculation is done in the - /// `Balance` type and never overflows. All evaluation is saturating. - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::polynomial() - .iter() - .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(weight.ref_time()) - .saturating_pow(args.degree.into()); - - // The sum could get negative. Therefore we only sum with the accumulator. - // The Perbill Mul implementation is non overflowing. - let frac = args.coeff_frac * w; - let integer = args.coeff_integer.saturating_mul(w); - - if args.negative { - acc = acc.saturating_sub(frac); - acc = acc.saturating_sub(integer); - } else { - acc = acc.saturating_add(frac); - acc = acc.saturating_add(integer); - } - - acc - }) - } -} - -/// Implementor of `WeightToFee` that maps one unit of weight to one unit of fee. -pub struct IdentityFee(sp_std::marker::PhantomData); - -impl WeightToFee for IdentityFee -where - T: BaseArithmetic + From + Copy + Unsigned, -{ - type Balance = T; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()) - } -} - -/// Implementor of [`WeightToFee`] that uses a constant multiplier. -/// # Example -/// -/// ``` -/// # use frame_support::traits::ConstU128; -/// # use frame_support::weights::ConstantMultiplier; -/// // Results in a multiplier of 10 for each unit of weight (or length) -/// type LengthToFee = ConstantMultiplier::>; -/// ``` -pub struct ConstantMultiplier(sp_std::marker::PhantomData<(T, M)>); - -impl WeightToFee for ConstantMultiplier -where - T: BaseArithmetic + From + Copy + Unsigned, - M: Get, -{ - type Balance = T; - - fn weight_to_fee(weight: &Weight) -> Self::Balance { - Self::Balance::saturated_from(weight.ref_time()).saturating_mul(M::get()) - } -} - -/// A struct holding value for each `DispatchClass`. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] -pub struct PerDispatchClass { - /// Value for `Normal` extrinsics. - normal: T, - /// Value for `Operational` extrinsics. - operational: T, - /// Value for `Mandatory` extrinsics. - mandatory: T, -} - -impl PerDispatchClass { - /// Create new `PerDispatchClass` with the same value for every class. - pub fn new(val: impl Fn(DispatchClass) -> T) -> Self { - Self { - normal: val(DispatchClass::Normal), - operational: val(DispatchClass::Operational), - mandatory: val(DispatchClass::Mandatory), - } - } - - /// Get a mutable reference to current value of given class. - pub fn get_mut(&mut self, class: DispatchClass) -> &mut T { - match class { - DispatchClass::Operational => &mut self.operational, - DispatchClass::Normal => &mut self.normal, - DispatchClass::Mandatory => &mut self.mandatory, - } - } - - /// Get current value for given class. - pub fn get(&self, class: DispatchClass) -> &T { - match class { - DispatchClass::Normal => &self.normal, - DispatchClass::Operational => &self.operational, - DispatchClass::Mandatory => &self.mandatory, - } - } -} - -impl PerDispatchClass { - /// Set the value of given class. - pub fn set(&mut self, new: T, class: impl OneOrMany) { - for class in class.into_iter() { - *self.get_mut(class) = new.clone(); - } - } -} - -impl PerDispatchClass { - /// Returns the total weight consumed by all extrinsics in the block. - pub fn total(&self) -> Weight { - let mut sum = Weight::zero(); - for class in DispatchClass::all() { - sum = sum.saturating_add(*self.get(*class)); - } - sum - } - - /// Add some weight of a specific dispatch class, saturating at the numeric bounds of `Weight`. - pub fn add(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_add(weight); - } - - /// Try to add some weight of a specific dispatch class, returning Err(()) if overflow would - /// occur. - pub fn checked_add(&mut self, weight: Weight, class: DispatchClass) -> Result<(), ()> { - let value = self.get_mut(class); - *value = value.checked_add(&weight).ok_or(())?; - Ok(()) - } - - /// Subtract some weight of a specific dispatch class, saturating at the numeric bounds of - /// `Weight`. - pub fn sub(&mut self, weight: Weight, class: DispatchClass) { - let value = self.get_mut(class); - *value = value.saturating_sub(weight); - } -} - -#[cfg(test)] -#[allow(dead_code)] -mod tests { - use super::*; - use crate::{decl_module, parameter_types, traits::Get}; - use smallvec::smallvec; - - pub trait Config: 'static { - type Origin; - type Balance; - type BlockNumber; - type DbWeight: Get; - type PalletInfo: crate::traits::PalletInfo; - } - - pub struct TraitImpl {} - - parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 100, - write: 1000, - }; - } - - impl Config for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - type Balance = u32; - type DbWeight = DbWeight; - type PalletInfo = crate::tests::PanicPalletInfo; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { - // no arguments, fixed weight - #[weight = 1000] - fn f00(_origin) { unimplemented!(); } - - #[weight = (1000, DispatchClass::Mandatory)] - fn f01(_origin) { unimplemented!(); } - - #[weight = (1000, Pays::No)] - fn f02(_origin) { unimplemented!(); } - - #[weight = (1000, DispatchClass::Operational, Pays::No)] - fn f03(_origin) { unimplemented!(); } - - // weight = a x 10 + b - #[weight = ((_a * 10 + _eb * 1) as u64, DispatchClass::Normal, Pays::Yes)] - fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = (0, DispatchClass::Operational, Pays::Yes)] - fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + Weight::from_ref_time(10_000)] - fn f20(_origin) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads_writes(6, 5) + Weight::from_ref_time(40_000)] - fn f21(_origin) { unimplemented!(); } - - } - } - - #[test] - fn weights_are_correct() { - // #[weight = 1000] - let info = Call::::f00 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (1000, DispatchClass::Mandatory)] - let info = Call::::f01 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Mandatory); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (1000, Pays::No)] - let info = Call::::f02 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::No); - - // #[weight = (1000, DispatchClass::Operational, Pays::No)] - let info = Call::::f03 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(1000)); - assert_eq!(info.class, DispatchClass::Operational); - assert_eq!(info.pays_fee, Pays::No); - - // #[weight = ((_a * 10 + _eb * 1) as Weight, DispatchClass::Normal, Pays::Yes)] - let info = Call::::f11 { _a: 13, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(150)); // 13*10 + 20 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = (0, DispatchClass::Operational, Pays::Yes)] - let info = Call::::f12 { _a: 10, _eb: 20 }.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(0)); - assert_eq!(info.class, DispatchClass::Operational); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - let info = Call::::f20 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(12300)); // 100*3 + 1000*2 + 10_1000 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - - // #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] - let info = Call::::f21 {}.get_dispatch_info(); - assert_eq!(info.weight, Weight::from_ref_time(45600)); // 100*6 + 1000*5 + 40_1000 - assert_eq!(info.class, DispatchClass::Normal); - assert_eq!(info.pays_fee, Pays::Yes); - } - - #[test] - fn extract_actual_weight_works() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), Weight::from_ref_time(7)); - assert_eq!( - extract_actual_weight(&Ok(Some(1000).into()), &pre), - Weight::from_ref_time(1000) - ); - assert_eq!( - extract_actual_weight( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), - &pre - ), - Weight::from_ref_time(9) - ); - } - - #[test] - fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!( - extract_actual_weight(&Ok(Some(1250).into()), &pre), - Weight::from_ref_time(1000) - ); - assert_eq!( - extract_actual_weight( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(1300))), - &pre - ), - Weight::from_ref_time(1000), - ); - } - - #[test] - fn extract_actual_pays_fee_works() { - let pre = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::Yes); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::No).into()), &pre), Pays::No); - assert_eq!( - extract_actual_pays_fee( - &Err(DispatchError::BadOrigin.with_weight(Weight::from_ref_time(9))), - &pre - ), - Pays::Yes - ); - assert_eq!( - extract_actual_pays_fee( - &Err(DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: None, pays_fee: Pays::No }, - error: DispatchError::BadOrigin, - }), - &pre - ), - Pays::No - ); - - let pre = DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }; - assert_eq!(extract_actual_pays_fee(&Ok(Some(7).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok(Some(1000).into()), &pre), Pays::No); - assert_eq!(extract_actual_pays_fee(&Ok((Some(1000), Pays::Yes).into()), &pre), Pays::No); - } - - type Balance = u64; - - // 0.5x^3 + 2.333x^2 + 7x - 10_000 - struct Poly; - impl WeightToFeePolynomial for Poly { - type Balance = Balance; - - fn polynomial() -> WeightToFeeCoefficients { - smallvec![ - WeightToFeeCoefficient { - coeff_integer: 0, - coeff_frac: Perbill::from_float(0.5), - negative: false, - degree: 3 - }, - WeightToFeeCoefficient { - coeff_integer: 2, - coeff_frac: Perbill::from_rational(1u32, 3u32), - negative: false, - degree: 2 - }, - WeightToFeeCoefficient { - coeff_integer: 7, - coeff_frac: Perbill::zero(), - negative: false, - degree: 1 - }, - WeightToFeeCoefficient { - coeff_integer: 10_000, - coeff_frac: Perbill::zero(), - negative: true, - degree: 0 - }, - ] - } - } - - #[test] - fn polynomial_works() { - // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(100)), 514033); - // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10_123)), 518917034928); - } - - #[test] - fn polynomial_does_not_underflow() { - assert_eq!(Poly::weight_to_fee(&Weight::zero()), 0); - assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10)), 0); - } - - #[test] - fn polynomial_does_not_overflow() { - assert_eq!(Poly::weight_to_fee(&Weight::MAX), Balance::max_value() - 10_000); - } - - #[test] - fn identity_fee_works() { - assert_eq!(IdentityFee::::weight_to_fee(&Weight::zero()), 0); - assert_eq!(IdentityFee::::weight_to_fee(&Weight::from_ref_time(50)), 50); - assert_eq!(IdentityFee::::weight_to_fee(&Weight::MAX), Balance::max_value()); - } - - #[test] - fn constant_fee_works() { - use crate::traits::ConstU128; - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::zero()), - 0 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( - 50 - )), - 500 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( - 16 - )), - 16384 - ); - assert_eq!( - ConstantMultiplier::>::weight_to_fee( - &Weight::from_ref_time(2) - ), - u128::MAX - ); +#[deprecated = "Function has moved to `frame_support::dispatch`"] +pub fn extract_actual_pays_fee( + res: &dispatch::DispatchResultWithPostInfo, + info: &dispatch::DispatchInfo, +) -> dispatch::Pays { + dispatch::extract_actual_pays_fee(res, info) +} +#[deprecated = "Function has moved to `frame_support::dispatch`"] +pub fn extract_actual_weight( + res: &dispatch::DispatchResultWithPostInfo, + info: &dispatch::DispatchInfo, +) -> Weight { + dispatch::extract_actual_weight(res, info) +} +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait ClassifyDispatch: dispatch::ClassifyDispatch { + fn classify_dispatch(&self, target: T) -> dispatch::DispatchClass { + >::classify_dispatch(self, target) + } +} +#[deprecated = "Enum has moved to `frame_support::dispatch`"] +pub type DispatchClass = dispatch::DispatchClass; +#[deprecated = "Struct has moved to `frame_support::dispatch`"] +pub type DispatchInfo = dispatch::DispatchInfo; +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait GetDispatchInfo: dispatch::GetDispatchInfo { + fn get_dispatch_info(&self) -> dispatch::DispatchInfo { + ::get_dispatch_info(self) + } +} +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait OneOrMany: dispatch::OneOrMany { + fn into_iter(self) -> Self::Iter + where + Self: Sized, + { + >::into_iter(self) + } +} +#[deprecated = "Enum has moved to `frame_support::dispatch`"] +pub type Pays = dispatch::Pays; +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait PaysFee: dispatch::PaysFee { + fn pays_fee(&self, target: T) -> dispatch::Pays { + >::pays_fee(self, target) + } +} +#[deprecated = "Struct has moved to `frame_support::dispatch`"] +pub type PerDispatchClass = dispatch::PerDispatchClass; +#[deprecated = "Struct has moved to `frame_support::dispatch`"] +pub type PostDispatchInfo = dispatch::PostDispatchInfo; +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait WeighData: dispatch::WeighData { + fn weigh_data(&self, target: T) -> Weight { + >::weigh_data(self, target) + } +} +#[deprecated = "Trait has moved to `frame_support::dispatch`"] +pub trait WithPostDispatchInfo: dispatch::WithPostDispatchInfo { + fn with_weight(self, actual_weight: Weight) -> dispatch::DispatchErrorWithPostInfo + where + Self: Sized, + { + ::with_weight(self, actual_weight) } } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 57978f60a4ecb..240b80460aa09 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -34,10 +34,8 @@ // --warmup=10 // --repeat=100 -use frame_support::{ - parameter_types, - weights::{constants::WEIGHT_PER_NANOS, Weight}, -}; +use sp_core::parameter_types; +use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; parameter_types! { /// Time to execute an empty block. @@ -58,7 +56,7 @@ parameter_types! { #[cfg(test)] mod test_weights { - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 54f70a063cdec..913dbc4e91fc1 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -34,10 +34,8 @@ // --warmup=10 // --repeat=100 -use frame_support::{ - parameter_types, - weights::{constants::WEIGHT_PER_NANOS, Weight}, -}; +use sp_core::parameter_types; +use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; parameter_types! { /// Time to execute a NO-OP extrinsic, for example `System::remark`. @@ -58,7 +56,7 @@ parameter_types! { #[cfg(test)] mod test_weights { - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, diff --git a/frame/support/src/weights/paritydb_weights.rs b/frame/support/src/weights/paritydb_weights.rs index 9f6ba82dbbff6..344e6cf0ddb6e 100644 --- a/frame/support/src/weights/paritydb_weights.rs +++ b/frame/support/src/weights/paritydb_weights.rs @@ -16,10 +16,9 @@ // limitations under the License. pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; + use frame_support::weights::constants; + use sp_core::parameter_types; + use sp_weights::RuntimeDbWeight; parameter_types! { /// ParityDB can be enabled with a feature flag, but is still experimental. These weights @@ -33,7 +32,7 @@ pub mod constants { #[cfg(test)] mod test_db_weights { use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, diff --git a/frame/support/src/weights/rocksdb_weights.rs b/frame/support/src/weights/rocksdb_weights.rs index cab983de0ad0a..4dec2d8c877ea 100644 --- a/frame/support/src/weights/rocksdb_weights.rs +++ b/frame/support/src/weights/rocksdb_weights.rs @@ -16,10 +16,9 @@ // limitations under the License. pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; + use frame_support::weights::constants; + use sp_core::parameter_types; + use sp_weights::RuntimeDbWeight; parameter_types! { /// By default, Substrate uses RocksDB, so this will be the weight used throughout @@ -33,7 +32,7 @@ pub mod constants { #[cfg(test)] mod test_db_weights { use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 4f3c783252e00..4306314b4c005 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -200,7 +200,7 @@ pub mod module3 { } #[weight = 3] fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } - #[weight = (5, frame_support::weights::DispatchClass::Operational)] + #[weight = (5, frame_support::dispatch::DispatchClass::Operational)] fn operational(_origin) { unreachable!() } } } @@ -504,8 +504,8 @@ fn call_encode_is_correct_and_decode_works() { #[test] fn call_weight_should_attach_to_call_enum() { use frame_support::{ - dispatch::{DispatchInfo, GetDispatchInfo}, - weights::{DispatchClass, Pays, Weight}, + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, + weights::Weight, }; // operational. assert_eq!( diff --git a/frame/support/test/tests/origin.rs b/frame/support/test/tests/origin.rs index 53124059e6353..620f64ec3cfc2 100644 --- a/frame/support/test/tests/origin.rs +++ b/frame/support/test/tests/origin.rs @@ -100,7 +100,7 @@ pub mod module { } #[weight = 3] fn aux_4(_origin) -> frame_support::dispatch::DispatchResult { unreachable!() } - #[weight = (5, frame_support::weights::DispatchClass::Operational)] + #[weight = (5, frame_support::dispatch::DispatchClass::Operational)] fn operational(_origin) { unreachable!() } } } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 420617a9e4f24..4304aa1533a8b 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -16,14 +16,16 @@ // limitations under the License. use frame_support::{ - dispatch::{Parameter, UnfilteredDispatchable}, + dispatch::{ + DispatchClass, DispatchInfo, GetDispatchInfo, Parameter, Pays, UnfilteredDispatchable, + }, pallet_prelude::ValueQuery, storage::unhashed, traits::{ ConstU32, GetCallName, GetStorageVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, PalletError, PalletInfoAccess, StorageVersion, }, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, RuntimeDbWeight, Weight}, + weights::{RuntimeDbWeight, Weight}, }; use scale_info::{meta_type, TypeInfo}; use sp_io::{ diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index f01ce17d71bf9..3bbbc559ff9b8 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -16,11 +16,10 @@ // limitations under the License. use frame_support::{ - dispatch::UnfilteredDispatchable, + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays, UnfilteredDispatchable}, pallet_prelude::ValueQuery, storage::unhashed, traits::{ConstU32, GetCallName, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade}, - weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, }; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 3429c6546c7fd..dd3a5d606bad5 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -23,6 +23,7 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-version = { version = "5.0.0", default-features = false, path = "../../primitives/version" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../primitives/weights" } [dev-dependencies] criterion = "0.3.3" @@ -42,6 +43,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-version/std", + "sp-weights/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 367e6c73c4134..dbe38da5775a7 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -21,7 +21,7 @@ use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_support::{storage, traits::Get, weights::DispatchClass}; +use frame_support::{dispatch::DispatchClass, storage, traits::Get}; use frame_system::{Call, Pallet as System, RawOrigin}; use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index f515f1ae0fa27..635ab4ef1d9a9 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -101,7 +101,10 @@ impl SignedExtension for CheckMortality { mod tests { use super::*; use crate::mock::{new_test_ext, System, Test, CALL}; - use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; + use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, Pays}, + weights::Weight, + }; use sp_core::H256; #[test] diff --git a/frame/system/src/extensions/check_non_zero_sender.rs b/frame/system/src/extensions/check_non_zero_sender.rs index 093424999aab3..036f70c2fdd48 100644 --- a/frame/system/src/extensions/check_non_zero_sender.rs +++ b/frame/system/src/extensions/check_non_zero_sender.rs @@ -17,7 +17,7 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::weights::DispatchInfo; +use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, SignedExtension}, diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 7259b80013ae5..1616a2d8a119e 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -17,7 +17,7 @@ use crate::Config; use codec::{Decode, Encode}; -use frame_support::weights::DispatchInfo; +use frame_support::dispatch::DispatchInfo; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, One, SignedExtension}, diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 9af79e480b394..466231b8455ec 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -18,8 +18,8 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Decode, Encode}; use frame_support::{ + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, traits::Get, - weights::{DispatchClass, DispatchInfo, PostDispatchInfo, Weight}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -27,6 +27,7 @@ use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, DispatchResult, }; +use sp_weights::Weight; /// Block resource (weight) limit check. /// @@ -269,10 +270,7 @@ mod tests { mock::{new_test_ext, System, Test, CALL}, AllExtrinsicsLen, BlockWeight, }; - use frame_support::{ - assert_err, assert_ok, - weights::{Pays, Weight}, - }; + use frame_support::{assert_err, assert_ok, dispatch::Pays, weights::Weight}; use sp_std::marker::PhantomData; fn block_weights() -> crate::limits::BlockWeights { diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 550e3939f995a..739dcadde9ac1 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -84,20 +84,20 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchResult, DispatchResultWithPostInfo}, + dispatch::{ + extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, + DispatchResult, DispatchResultWithPostInfo, PerDispatchClass, + }, storage, traits::{ ConstU32, Contains, EnsureOrigin, Get, HandleLifetime, OnKilledAccount, OnNewAccount, OriginTrait, PalletInfo, SortedMembers, StoredMap, TypedGet, }, - weights::{ - extract_actual_pays_fee, extract_actual_weight, DispatchClass, DispatchInfo, - PerDispatchClass, RuntimeDbWeight, Weight, - }, Parameter, }; use scale_info::TypeInfo; use sp_core::storage::well_known_keys; +use sp_weights::{RuntimeDbWeight, Weight}; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index ba73d29806bba..e182eb626424d 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -25,7 +25,10 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. -use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; +use frame_support::{ + dispatch::{DispatchClass, OneOrMany, PerDispatchClass}, + weights::{constants, Weight}, +}; use scale_info::TypeInfo; use sp_runtime::{traits::Bounded, Perbill, RuntimeDebug}; diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 641cda13b07a4..b6fdda8d9cbe2 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -18,8 +18,7 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, - dispatch::PostDispatchInfo, - weights::{Pays, WithPostDispatchInfo}, + dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, }; use mock::{Origin, *}; use sp_core::H256; diff --git a/frame/transaction-payment/asset-tx-payment/src/lib.rs b/frame/transaction-payment/asset-tx-payment/src/lib.rs index c14c7ba11b546..e136da8b0bb75 100644 --- a/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -39,7 +39,7 @@ use sp_std::prelude::*; use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResult, + dispatch::{DispatchInfo, DispatchResult, PostDispatchInfo}, traits::{ tokens::{ fungibles::{Balanced, CreditOf, Inspect}, @@ -47,7 +47,6 @@ use frame_support::{ }, IsType, }, - weights::{DispatchInfo, PostDispatchInfo}, DefaultNoBound, }; use pallet_transaction_payment::OnChargeTransaction; diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index dfce28fda43d2..1f6113aabdddd 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -18,10 +18,11 @@ use crate as pallet_asset_tx_payment; use frame_support::{ assert_ok, + dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::*, parameter_types, traits::{fungibles::Mutate, ConstU32, ConstU64, ConstU8, FindAuthor}, - weights::{DispatchClass, DispatchInfo, PostDispatchInfo, Weight, WeightToFee as WeightToFeeT}, + weights::{Weight, WeightToFee as WeightToFeeT}, ConsensusEngineId, }; use frame_system as system; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 5f1f8321456be..0447ea6bfd99d 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -63,11 +63,11 @@ use sp_runtime::{ use sp_std::prelude::*; use frame_support::{ - dispatch::DispatchResult, - traits::{EstimateCallFee, Get}, - weights::{ - DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, WeightToFee, + dispatch::{ + DispatchClass, DispatchInfo, DispatchResult, GetDispatchInfo, Pays, PostDispatchInfo, }, + traits::{EstimateCallFee, Get}, + weights::{Weight, WeightToFee}, }; mod payment; @@ -849,12 +849,11 @@ mod tests { }; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, + dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo}, + parameter_types, traits::{ConstU32, ConstU64, Currency, GenesisBuild, Imbalance, OnUnbalanced}, - weights::{ - DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight, - WeightToFee as WeightToFeeT, - }, + weights::{Weight, WeightToFee as WeightToFeeT}, }; use frame_system as system; use pallet_balances::Call as BalancesCall; diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 5e915d62a26d4..1f41ba7b0b72e 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; use sp_std::prelude::*; -use frame_support::weights::{DispatchClass, Weight}; +use frame_support::{dispatch::DispatchClass, weights::Weight}; /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index b5bd686b59f00..08fb34ed9e3c1 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -58,9 +58,8 @@ pub mod weights; use codec::{Decode, Encode}; use frame_support::{ - dispatch::PostDispatchInfo, + dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, - weights::{extract_actual_weight, GetDispatchInfo}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 1985cc3022114..93b80672f55e2 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -24,10 +24,10 @@ use super::*; use crate as utility; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, - dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, + dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable, Pays}, parameter_types, storage, traits::{ConstU32, ConstU64, Contains}, - weights::{Pays, Weight}, + weights::Weight, }; use sp_core::H256; use sp_runtime::{ diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 19e53f2491614..c5621a239f3a6 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -42,9 +42,10 @@ pub use weights::WeightInfo; use codec::{DecodeLimit, Encode, FullCodec}; use frame_support::{ + dispatch::{GetDispatchInfo, PostDispatchInfo}, ensure, traits::{PreimageProvider, PreimageRecipient}, - weights::{GetDispatchInfo, PostDispatchInfo, Weight}, + weights::Weight, }; use scale_info::TypeInfo; use sp_runtime::traits::{Dispatchable, Hash}; diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 9e4b36c584e91..030c44c284593 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -29,6 +29,7 @@ sp-arithmetic = { version = "5.0.0", default-features = false, path = "../arithm sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } +sp-weights = { version = "4.0.0", default-features = false, path = "../weights" } [dev-dependencies] rand = "0.7.2" diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 2cf98b4e638d3..495dfa66c5462 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1820,6 +1820,12 @@ impl Printable for bool { } } +impl Printable for sp_weights::Weight { + fn print(&self) { + self.ref_time().print() + } +} + impl Printable for () { fn print(&self) { "()".print() diff --git a/primitives/weights/Cargo.toml b/primitives/weights/Cargo.toml new file mode 100644 index 0000000000000..8c0302ff5d1b2 --- /dev/null +++ b/primitives/weights/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "sp-weights" +version = "4.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Types and traits for interfacing between the host and the wasm runtime." +documentation = "https://docs.rs/sp-wasm-interface" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +impl-trait-for-tuples = "0.2.2" +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +serde = { version = "1.0.136", optional = true, features = ["derive"] } +smallvec = "1.8.0" +sp-arithmetic = { version = "5.0.0", default-features = false, path = "../arithmetic" } +sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } +sp-std = { version = "4.0.0", default-features = false, path = "../std" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "scale-info/std", + "serde", + "sp-arithmetic/std", + "sp-core/std", + "sp-debug-derive/std", + "sp-std/std" +] +# By default some types have documentation, `full-metadata-docs` allows to add documentation to +# more types in the metadata. +full-metadata-docs = ["scale-info/docs"] diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs new file mode 100644 index 0000000000000..d260f73d41268 --- /dev/null +++ b/primitives/weights/src/lib.rs @@ -0,0 +1,299 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Primitives for transaction weighting. +//! +//! Latest machine specification used to benchmark are: +//! - Digital Ocean: ubuntu-s-2vcpu-4gb-ams3-01 +//! - 2x Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz +//! - 4GB RAM +//! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) +//! - rustc 1.42.0 (b8cedc004 2020-03-09) + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate self as sp_weights; + +mod weight_v2; + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use smallvec::SmallVec; +use sp_arithmetic::{ + traits::{BaseArithmetic, SaturatedConversion, Saturating, Unsigned}, + Perbill, +}; +use sp_core::Get; +use sp_debug_derive::RuntimeDebug; + +pub use weight_v2::*; + +pub mod constants { + use super::Weight; + + pub const WEIGHT_PER_SECOND: Weight = Weight::from_ref_time(1_000_000_000_000); + pub const WEIGHT_PER_MILLIS: Weight = Weight::from_ref_time(1_000_000_000); + pub const WEIGHT_PER_MICROS: Weight = Weight::from_ref_time(1_000_000); + pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); +} + +/// The weight of database operations that the runtime can invoke. +/// +/// NOTE: This is currently only measured in computational time, and will probably +/// be updated all together once proof size is accounted for. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] +pub struct RuntimeDbWeight { + pub read: u64, + pub write: u64, +} + +impl RuntimeDbWeight { + pub fn reads(self, r: u64) -> Weight { + Weight::from_ref_time(self.read.saturating_mul(r)) + } + + pub fn writes(self, w: u64) -> Weight { + Weight::from_ref_time(self.write.saturating_mul(w)) + } + + pub fn reads_writes(self, r: u64, w: u64) -> Weight { + let read_weight = self.read.saturating_mul(r); + let write_weight = self.write.saturating_mul(w); + Weight::from_ref_time(read_weight.saturating_add(write_weight)) + } +} + +/// One coefficient and its position in the `WeightToFee`. +/// +/// One term of polynomial is calculated as: +/// +/// ```ignore +/// coeff_integer * x^(degree) + coeff_frac * x^(degree) +/// ``` +/// +/// The `negative` value encodes whether the term is added or substracted from the +/// overall polynomial result. +#[derive(Clone, Encode, Decode, TypeInfo)] +pub struct WeightToFeeCoefficient { + /// The integral part of the coefficient. + pub coeff_integer: Balance, + /// The fractional part of the coefficient. + pub coeff_frac: Perbill, + /// True iff the coefficient should be interpreted as negative. + pub negative: bool, + /// Degree/exponent of the term. + pub degree: u8, +} + +/// A list of coefficients that represent one polynomial. +pub type WeightToFeeCoefficients = SmallVec<[WeightToFeeCoefficient; 4]>; + +/// A trait that describes the weight to fee calculation. +pub trait WeightToFee { + /// The type that is returned as result from calculation. + type Balance: BaseArithmetic + From + Copy + Unsigned; + + /// Calculates the fee from the passed `weight`. + fn weight_to_fee(weight: &Weight) -> Self::Balance; +} + +/// A trait that describes the weight to fee calculation as polynomial. +/// +/// An implementor should only implement the `polynomial` function. +pub trait WeightToFeePolynomial { + /// The type that is returned as result from polynomial evaluation. + type Balance: BaseArithmetic + From + Copy + Unsigned; + + /// Returns a polynomial that describes the weight to fee conversion. + /// + /// This is the only function that should be manually implemented. Please note + /// that all calculation is done in the probably unsigned `Balance` type. This means + /// that the order of coefficients is important as putting the negative coefficients + /// first will most likely saturate the result to zero mid evaluation. + fn polynomial() -> WeightToFeeCoefficients; +} + +impl WeightToFee for T +where + T: WeightToFeePolynomial, +{ + type Balance = ::Balance; + + /// Calculates the fee from the passed `weight` according to the `polynomial`. + /// + /// This should not be overridden in most circumstances. Calculation is done in the + /// `Balance` type and never overflows. All evaluation is saturating. + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::polynomial() + .iter() + .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { + let w = Self::Balance::saturated_from(weight.ref_time()) + .saturating_pow(args.degree.into()); + + // The sum could get negative. Therefore we only sum with the accumulator. + // The Perbill Mul implementation is non overflowing. + let frac = args.coeff_frac * w; + let integer = args.coeff_integer.saturating_mul(w); + + if args.negative { + acc = acc.saturating_sub(frac); + acc = acc.saturating_sub(integer); + } else { + acc = acc.saturating_add(frac); + acc = acc.saturating_add(integer); + } + + acc + }) + } +} + +/// Implementor of `WeightToFee` that maps one unit of weight to one unit of fee. +pub struct IdentityFee(sp_std::marker::PhantomData); + +impl WeightToFee for IdentityFee +where + T: BaseArithmetic + From + Copy + Unsigned, +{ + type Balance = T; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(weight.ref_time()) + } +} + +/// Implementor of [`WeightToFee`] that uses a constant multiplier. +/// # Example +/// +/// ``` +/// # use sp_core::ConstU128; +/// # use sp_weights::ConstantMultiplier; +/// // Results in a multiplier of 10 for each unit of weight (or length) +/// type LengthToFee = ConstantMultiplier::>; +/// ``` +pub struct ConstantMultiplier(sp_std::marker::PhantomData<(T, M)>); + +impl WeightToFee for ConstantMultiplier +where + T: BaseArithmetic + From + Copy + Unsigned, + M: Get, +{ + type Balance = T; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + Self::Balance::saturated_from(weight.ref_time()).saturating_mul(M::get()) + } +} + +#[cfg(test)] +#[allow(dead_code)] +mod tests { + use super::*; + use smallvec::smallvec; + + type Balance = u64; + + // 0.5x^3 + 2.333x^2 + 7x - 10_000 + struct Poly; + impl WeightToFeePolynomial for Poly { + type Balance = Balance; + + fn polynomial() -> WeightToFeeCoefficients { + smallvec![ + WeightToFeeCoefficient { + coeff_integer: 0, + coeff_frac: Perbill::from_float(0.5), + negative: false, + degree: 3 + }, + WeightToFeeCoefficient { + coeff_integer: 2, + coeff_frac: Perbill::from_rational(1u32, 3u32), + negative: false, + degree: 2 + }, + WeightToFeeCoefficient { + coeff_integer: 7, + coeff_frac: Perbill::zero(), + negative: false, + degree: 1 + }, + WeightToFeeCoefficient { + coeff_integer: 10_000, + coeff_frac: Perbill::zero(), + negative: true, + degree: 0 + }, + ] + } + } + + #[test] + fn polynomial_works() { + // 100^3/2=500000 100^2*(2+1/3)=23333 700 -10000 + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(100)), 514033); + // 10123^3/2=518677865433 10123^2*(2+1/3)=239108634 70861 -10000 + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10_123)), 518917034928); + } + + #[test] + fn polynomial_does_not_underflow() { + assert_eq!(Poly::weight_to_fee(&Weight::zero()), 0); + assert_eq!(Poly::weight_to_fee(&Weight::from_ref_time(10)), 0); + } + + #[test] + fn polynomial_does_not_overflow() { + assert_eq!(Poly::weight_to_fee(&Weight::MAX), Balance::max_value() - 10_000); + } + + #[test] + fn identity_fee_works() { + assert_eq!(IdentityFee::::weight_to_fee(&Weight::zero()), 0); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::from_ref_time(50)), 50); + assert_eq!(IdentityFee::::weight_to_fee(&Weight::MAX), Balance::max_value()); + } + + #[test] + fn constant_fee_works() { + use sp_core::ConstU128; + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::zero()), + 0 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 50 + )), + 500 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee(&Weight::from_ref_time( + 16 + )), + 16384 + ); + assert_eq!( + ConstantMultiplier::>::weight_to_fee( + &Weight::from_ref_time(2) + ), + u128::MAX + ); + } +} diff --git a/frame/support/src/weights/weight_v2.rs b/primitives/weights/src/weight_v2.rs similarity index 68% rename from frame/support/src/weights/weight_v2.rs rename to primitives/weights/src/weight_v2.rs index 2d50f07add596..a4e4da4f8a7b3 100644 --- a/frame/support/src/weights/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -17,10 +17,8 @@ use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; -use sp_runtime::{ - traits::{Bounded, CheckedAdd, CheckedSub, Zero}, - RuntimeDebug, -}; +use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; +use sp_debug_derive::RuntimeDebug; use super::*; @@ -266,11 +264,11 @@ macro_rules! weight_mul_per_impl { } } weight_mul_per_impl!( - sp_runtime::Percent, - sp_runtime::PerU16, - sp_runtime::Permill, - sp_runtime::Perbill, - sp_runtime::Perquintill, + sp_arithmetic::Percent, + sp_arithmetic::PerU16, + sp_arithmetic::Permill, + sp_arithmetic::Perbill, + sp_arithmetic::Perquintill, ); macro_rules! weight_mul_primitive_impl { @@ -310,91 +308,6 @@ impl CheckedSub for Weight { } } -impl PaysFee for (Weight, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (Weight, DispatchClass) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl WeighData for (Weight, DispatchClass, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (Weight, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (Weight, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (Weight, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (Weight, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (Weight, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (actual_weight, pays_fee) = post_weight_info; - Self { actual_weight, pays_fee } - } -} - -impl From> for PostDispatchInfo { - fn from(actual_weight: Option) -> Self { - Self { actual_weight, pays_fee: Default::default() } - } -} - -impl WeighData for Weight { - fn weigh_data(&self, _: T) -> Weight { - return *self - } -} - -impl ClassifyDispatch for Weight { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for Weight { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl ClassifyDispatch for (Weight, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - impl core::fmt::Display for Weight { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "Weight(ref_time: {})", self.ref_time) @@ -421,106 +334,3 @@ impl SubAssign for Weight { *self = Self { ref_time: self.ref_time - other.ref_time }; } } - -impl sp_runtime::traits::Printable for Weight { - fn print(&self) { - self.ref_time().print() - } -} - -// TODO: Eventually remove these - -impl From> for PostDispatchInfo { - fn from(maybe_actual_computation: Option) -> Self { - let actual_weight = match maybe_actual_computation { - Some(actual_computation) => Some(Weight::zero().set_ref_time(actual_computation)), - None => None, - }; - Self { actual_weight, pays_fee: Default::default() } - } -} - -impl From<(Option, Pays)> for PostDispatchInfo { - fn from(post_weight_info: (Option, Pays)) -> Self { - let (maybe_actual_time, pays_fee) = post_weight_info; - let actual_weight = match maybe_actual_time { - Some(actual_time) => Some(Weight::zero().set_ref_time(actual_time)), - None => None, - }; - Self { actual_weight, pays_fee } - } -} - -impl WeighData for u64 { - fn weigh_data(&self, _: T) -> Weight { - return Weight::zero().set_ref_time(*self) - } -} - -impl ClassifyDispatch for u64 { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for u64 { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (u64, DispatchClass, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, DispatchClass, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (u64, DispatchClass, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.2 - } -} - -impl WeighData for (u64, DispatchClass) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, DispatchClass) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - self.1 - } -} - -impl PaysFee for (u64, DispatchClass) { - fn pays_fee(&self, _: T) -> Pays { - Pays::Yes - } -} - -impl WeighData for (u64, Pays) { - fn weigh_data(&self, args: T) -> Weight { - return self.0.weigh_data(args) - } -} - -impl ClassifyDispatch for (u64, Pays) { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::Normal - } -} - -impl PaysFee for (u64, Pays) { - fn pays_fee(&self, _: T) -> Pays { - self.1 - } -} - -// END TODO diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 0a4100953401f..86b2c0b0e0cdb 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -13,10 +13,8 @@ // {{arg}} {{/each}} -use frame_support::{ - parameter_types, - weights::{constants::WEIGHT_PER_NANOS, Weight}, -}; +use sp_core::parameter_types; +use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; parameter_types! { {{#if (eq short_name "block")}} @@ -42,7 +40,7 @@ parameter_types! { #[cfg(test)] mod test_weights { use super::*; - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that the weight exists and is sane. // NOTE: If this test fails but you are sure that the generated values are fine, diff --git a/utils/frame/benchmarking-cli/src/storage/weights.hbs b/utils/frame/benchmarking-cli/src/storage/weights.hbs index 95f42c411d1fd..82e581cf990c8 100644 --- a/utils/frame/benchmarking-cli/src/storage/weights.hbs +++ b/utils/frame/benchmarking-cli/src/storage/weights.hbs @@ -17,10 +17,9 @@ /// Storage DB weights for the `{{runtime_name}}` runtime and `{{db_name}}`. pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; + use frame_support::weights::constants; + use sp_core::parameter_types; + use sp_weights::RuntimeDbWeight; parameter_types! { {{#if (eq db_name "ParityDb")}} @@ -66,7 +65,7 @@ pub mod constants { #[cfg(test)] mod test_db_weights { use super::constants::{{db_name}}Weight as W; - use frame_support::weights::constants; + use sp_weights::constants; /// Checks that all weights exist and have sane values. // NOTE: If this test fails but you are sure that the generated values are fine, From b5e25424f369fe9481e1fa68e4278e694238755a Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 14 Sep 2022 11:43:19 -0400 Subject: [PATCH 130/348] fix sp-weight std (#12265) --- primitives/runtime/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 030c44c284593..278962bcf57c4 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -58,4 +58,5 @@ std = [ "sp-core/std", "sp-io/std", "sp-std/std", + "sp-weights/std", ] From 7c2c0c10ec2ad5788894cef001fc724d1b8a6127 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 15 Sep 2022 01:01:13 +0900 Subject: [PATCH 131/348] Improve base weights consistency and make sure they're never zero (#11806) * Improve base weights consistency and make sure they're never zero * Switch back to `linregress` crate; add back estimation errors * Move the test into `mod tests` * Use all of the samples instead of only the first one * Use full precision extrinsic base weights and slopes * Add an extra comment * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts Co-authored-by: parity-processbot <> Co-authored-by: Shawn Tabrizi --- frame/benchmarking/src/analysis.rs | 258 ++++++++++++++---- frame/benchmarking/src/lib.rs | 2 +- .../benchmarking-cli/src/pallet/writer.rs | 31 +-- 3 files changed, 219 insertions(+), 72 deletions(-) diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index c19af781234f8..9ba2ea657bab0 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -18,17 +18,15 @@ //! Tools for analyzing the benchmark results. use crate::BenchmarkResult; -use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; use std::collections::BTreeMap; -pub use linregress::RegressionModel; - pub struct Analysis { pub base: u128, pub slopes: Vec, pub names: Vec, pub value_dists: Option, u128, u128)>>, - pub model: Option, + pub errors: Option>, + selector: BenchmarkSelector, } #[derive(Clone, Copy)] @@ -40,6 +38,44 @@ pub enum BenchmarkSelector { ProofSize, } +/// Multiplies the value by 1000 and converts it into an u128. +fn mul_1000_into_u128(value: f64) -> u128 { + // This is slighly more precise than the alternative of `(value * 1000.0) as u128`. + (value as u128) + .saturating_mul(1000) + .saturating_add((value.fract() * 1000.0) as u128) +} + +impl BenchmarkSelector { + fn scale_and_cast_weight(self, value: f64, round_up: bool) -> u128 { + if let BenchmarkSelector::ExtrinsicTime = self { + mul_1000_into_u128(value) + } else { + if round_up { + (value + 0.5) as u128 + } else { + value as u128 + } + } + } + + fn scale_weight(self, value: u128) -> u128 { + if let BenchmarkSelector::ExtrinsicTime = self { + value.saturating_mul(1000) + } else { + value + } + } + + fn nanos_from_weight(self, value: u128) -> u128 { + if let BenchmarkSelector::ExtrinsicTime = self { + value / 1000 + } else { + value + } + } +} + #[derive(Debug)] pub enum AnalysisChoice { /// Use minimum squares regression for analyzing the benchmarking results. @@ -72,6 +108,60 @@ impl TryFrom> for AnalysisChoice { } } +fn raw_linear_regression( + xs: Vec, + ys: Vec, + x_vars: usize, + with_intercept: bool, +) -> Option<(f64, Vec, Vec)> { + let mut data: Vec = Vec::new(); + + // Here we build a raw matrix of linear equations for the `linregress` crate to solve for us + // and build a linear regression model around it. + // + // Each row of the matrix contains as the first column the actual value which we want + // the model to predict for us (the `y`), and the rest of the columns contain the input + // parameters on which the model will base its predictions on (the `xs`). + // + // In machine learning terms this is essentially the training data for the model. + // + // As a special case the very first input parameter represents the constant factor + // of the linear equation: the so called "intercept value". Since it's supposed to + // be constant we can just put a dummy input parameter of either a `1` (in case we want it) + // or a `0` (in case we do not). + for (&y, xs) in ys.iter().zip(xs.chunks_exact(x_vars)) { + data.push(y); + if with_intercept { + data.push(1.0); + } else { + data.push(0.0); + } + data.extend(xs); + } + let model = linregress::fit_low_level_regression_model(&data, ys.len(), x_vars + 2).ok()?; + Some((model.parameters[0], model.parameters[1..].to_vec(), model.se)) +} + +fn linear_regression( + xs: Vec, + mut ys: Vec, + x_vars: usize, +) -> Option<(f64, Vec, Vec)> { + let mut min = ys[0]; + for &value in &ys { + if value < min { + min = value; + } + } + + for value in &mut ys { + *value -= min; + } + + let (intercept, params, errors) = raw_linear_regression(xs, ys, x_vars, false)?; + Some((intercept + min, params, errors[1..].to_vec())) +} + impl Analysis { // Useful for when there are no components, and we just need an median value of the benchmark // results. Note: We choose the median value because it is more robust to outliers. @@ -95,11 +185,12 @@ impl Analysis { let mid = values.len() / 2; Some(Self { - base: values[mid], + base: selector.scale_weight(values[mid]), slopes: Vec::new(), names: Vec::new(), value_dists: None, - model: None, + errors: None, + selector, }) } @@ -186,15 +277,19 @@ impl Analysis { }) .collect::>(); - let base = models[0].0.max(0f64) as u128; - let slopes = models.iter().map(|x| x.1.max(0f64) as u128).collect::>(); + let base = selector.scale_and_cast_weight(models[0].0.max(0f64), false); + let slopes = models + .iter() + .map(|x| selector.scale_and_cast_weight(x.1.max(0f64), false)) + .collect::>(); Some(Self { base, slopes, names: results.into_iter().map(|x| x.0).collect::>(), value_dists: None, - model: None, + errors: None, + selector, }) } @@ -221,36 +316,7 @@ impl Analysis { *rs = rs[ql..rs.len() - ql].to_vec(); } - let mut data = - vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; - let names = r[0].components.iter().map(|x| format!("{:?}", x.0)).collect::>(); - data.extend(names.iter().enumerate().map(|(i, p)| { - ( - p.as_str(), - results - .iter() - .flat_map(|x| Some(x.0[i] as f64).into_iter().cycle().take(x.1.len())) - .collect::>(), - ) - })); - - let data = RegressionDataBuilder::new().build_from(data).ok()?; - - let model = FormulaRegressionBuilder::new() - .data(&data) - .formula(format!("Y ~ {}", names.join(" + "))) - .fit() - .ok()?; - - let slopes = model - .parameters - .regressor_values - .iter() - .enumerate() - .map(|(_, x)| (*x + 0.5) as u128) - .collect(); - let value_dists = results .iter() .map(|(p, vs)| { @@ -269,12 +335,33 @@ impl Analysis { }) .collect::>(); + let mut ys: Vec = Vec::new(); + let mut xs: Vec = Vec::new(); + for result in results { + let x: Vec = result.0.iter().map(|value| *value as f64).collect(); + for y in result.1 { + xs.extend(x.iter().copied()); + ys.push(y as f64); + } + } + + let (intercept, slopes, errors) = linear_regression(xs, ys, r[0].components.len())?; + Some(Self { - base: (model.parameters.intercept_value + 0.5) as u128, - slopes, + base: selector.scale_and_cast_weight(intercept, true), + slopes: slopes + .into_iter() + .map(|value| selector.scale_and_cast_weight(value, true)) + .collect(), names, value_dists: Some(value_dists), - model: Some(model), + errors: Some( + errors + .into_iter() + .map(|value| selector.scale_and_cast_weight(value, false)) + .collect(), + ), + selector, }) } @@ -304,9 +391,9 @@ impl Analysis { .for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order")); let names = median_slopes.names; let value_dists = min_squares.value_dists; - let model = min_squares.model; + let errors = min_squares.errors; - Some(Self { base, slopes, names, value_dists, model }) + Some(Self { base, slopes, names, value_dists, errors, selector }) } } @@ -363,18 +450,19 @@ impl std::fmt::Display for Analysis { } } } - if let Some(ref model) = self.model { + + if let Some(ref errors) = self.errors { writeln!(f, "\nQuality and confidence:")?; writeln!(f, "param error")?; - for (p, se) in self.names.iter().zip(model.se.regressor_values.iter()) { - writeln!(f, "{} {:>8}", p, ms(*se as u128))?; + for (p, se) in self.names.iter().zip(errors.iter()) { + writeln!(f, "{} {:>8}", p, ms(self.selector.nanos_from_weight(*se)))?; } } writeln!(f, "\nModel:")?; - writeln!(f, "Time ~= {:>8}", ms(self.base))?; + writeln!(f, "Time ~= {:>8}", ms(self.selector.nanos_from_weight(self.base)))?; for (&t, n) in self.slopes.iter().zip(self.names.iter()) { - writeln!(f, " + {} {:>8}", n, ms(t))?; + writeln!(f, " + {} {:>8}", n, ms(self.selector.nanos_from_weight(t)))?; } writeln!(f, " µs") } @@ -415,6 +503,53 @@ mod tests { } } + #[test] + fn test_linear_regression() { + let ys = vec![ + 3797981.0, + 37857779.0, + 70569402.0, + 104004114.0, + 137233924.0, + 169826237.0, + 203521133.0, + 237552333.0, + 271082065.0, + 305554637.0, + 335218347.0, + 371759065.0, + 405086197.0, + 438353555.0, + 472891417.0, + 505339532.0, + 527784778.0, + 562590596.0, + 635291991.0, + 673027090.0, + 708119408.0, + ]; + let xs = vec![ + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, + 16.0, 17.0, 18.0, 19.0, 20.0, + ]; + + let (intercept, params, errors) = + raw_linear_regression(xs.clone(), ys.clone(), 1, true).unwrap(); + assert_eq!(intercept as i64, -2712997); + assert_eq!(params.len(), 1); + assert_eq!(params[0] as i64, 34444926); + assert_eq!(errors.len(), 2); + assert_eq!(errors[0] as i64, 4805766); + assert_eq!(errors[1] as i64, 411084); + + let (intercept, params, errors) = linear_regression(xs, ys, 1).unwrap(); + assert_eq!(intercept as i64, 3797981); + assert_eq!(params.len(), 1); + assert_eq!(params[0] as i64, 33968513); + assert_eq!(errors.len(), 1); + assert_eq!(errors[0] as i64, 217331); + } + #[test] fn analysis_median_slopes_should_work() { let data = vec![ @@ -478,8 +613,8 @@ mod tests { let extrinsic_time = Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 10_000_000); - assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); + assert_eq!(extrinsic_time.base, 10_000_000_000); + assert_eq!(extrinsic_time.slopes, vec![1_000_000_000, 100_000_000]); let reads = Analysis::median_slopes(&data, BenchmarkSelector::Reads).unwrap(); assert_eq!(reads.base, 2); @@ -553,15 +688,30 @@ mod tests { let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 10_000_000); - assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); + assert_eq!(extrinsic_time.base, 11_500_000_000); + assert_eq!(extrinsic_time.slopes, vec![630635838, 23699421]); let reads = Analysis::min_squares_iqr(&data, BenchmarkSelector::Reads).unwrap(); - assert_eq!(reads.base, 2); + assert_eq!(reads.base, 3); assert_eq!(reads.slopes, vec![1, 0]); let writes = Analysis::min_squares_iqr(&data, BenchmarkSelector::Writes).unwrap(); - assert_eq!(writes.base, 0); + assert_eq!(writes.base, 2); assert_eq!(writes.slopes, vec![0, 2]); } + + #[test] + fn analysis_min_squares_iqr_uses_multiple_samples_for_same_parameters() { + let data = vec![ + benchmark_result(vec![(BenchmarkParameter::n, 0)], 2_000_000, 0, 0, 0), + benchmark_result(vec![(BenchmarkParameter::n, 0)], 4_000_000, 0, 0, 0), + benchmark_result(vec![(BenchmarkParameter::n, 1)], 4_000_000, 0, 0, 0), + benchmark_result(vec![(BenchmarkParameter::n, 1)], 8_000_000, 0, 0, 0), + ]; + + let extrinsic_time = + Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + assert_eq!(extrinsic_time.base, 2_000_000_000); + assert_eq!(extrinsic_time.slopes, vec![4_000_000_000]); + } } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 614216a96940a..3e3888dbf111d 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -30,7 +30,7 @@ mod utils; pub mod baseline; #[cfg(feature = "std")] -pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector, RegressionModel}; +pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector}; #[doc(hidden)] pub use frame_support; #[doc(hidden)] diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index 42a237fcf3ce3..a601c09eb5033 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -29,7 +29,6 @@ use serde::Serialize; use crate::{pallet::command::ComponentRange, shared::UnderscoreHelper, PalletCmd}; use frame_benchmarking::{ Analysis, AnalysisChoice, BenchmarkBatchSplitResults, BenchmarkResult, BenchmarkSelector, - RegressionModel, }; use frame_support::traits::StorageInfo; use sp_core::hexdisplay::HexDisplay; @@ -145,13 +144,15 @@ fn map_results( Ok(all_benchmarks) } -// Get an iterator of errors from a model. If the model is `None` all errors are zero. -fn extract_errors(model: &Option) -> impl Iterator + '_ { - let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); - std::iter::from_fn(move || match &mut errors { - Some(model) => model.next().map(|val| *val as u128), - _ => Some(0), - }) +// Get an iterator of errors. +fn extract_errors(errors: &Option>) -> impl Iterator + '_ { + errors + .as_ref() + .map(|e| e.as_slice()) + .unwrap_or(&[]) + .iter() + .copied() + .chain(std::iter::repeat(0)) } // Analyze and return the relevant results for a given benchmark. @@ -190,24 +191,20 @@ fn get_benchmark_data( .slopes .into_iter() .zip(extrinsic_time.names.iter()) - .zip(extract_errors(&extrinsic_time.model)) + .zip(extract_errors(&extrinsic_time.errors)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { used_components.push(name); } - used_extrinsic_time.push(ComponentSlope { - name: name.clone(), - slope: slope.saturating_mul(1000), - error: error.saturating_mul(1000), - }); + used_extrinsic_time.push(ComponentSlope { name: name.clone(), slope, error }); } }); reads .slopes .into_iter() .zip(reads.names.iter()) - .zip(extract_errors(&reads.model)) + .zip(extract_errors(&reads.errors)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { @@ -220,7 +217,7 @@ fn get_benchmark_data( .slopes .into_iter() .zip(writes.names.iter()) - .zip(extract_errors(&writes.model)) + .zip(extract_errors(&writes.errors)) .for_each(|((slope, name), error)| { if !slope.is_zero() { if !used_components.contains(&name) { @@ -251,7 +248,7 @@ fn get_benchmark_data( BenchmarkData { name: String::from_utf8(batch.benchmark.clone()).unwrap(), components, - base_weight: extrinsic_time.base.saturating_mul(1000), + base_weight: extrinsic_time.base, base_reads: reads.base, base_writes: writes.base, component_weight: used_extrinsic_time, From dc22e48e1da70b84fb7b8a1ffc2076cf1b6e1d99 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 14 Sep 2022 18:31:58 +0200 Subject: [PATCH 132/348] Fix Rust features (#11976) * Add std feature Signed-off-by: Oliver Tale-Yazdi * Fix features Signed-off-by: Oliver Tale-Yazdi * WIP Signed-off-by: Oliver Tale-Yazdi * Fix features Signed-off-by: Oliver Tale-Yazdi * Fmt Signed-off-by: Oliver Tale-Yazdi * Fix features Signed-off-by: Oliver Tale-Yazdi * Cleanup Signed-off-by: Oliver Tale-Yazdi * Impl function also in tests Signed-off-by: Oliver Tale-Yazdi * Make compile Signed-off-by: Oliver Tale-Yazdi * Fix sp-trie feature Something makes the bench regression guard fail, maybe this? Signed-off-by: Oliver Tale-Yazdi * Add runtime-benchmarks feature to sc-service Signed-off-by: Oliver Tale-Yazdi * Revert "Fix sp-trie feature" This reverts commit f2cddfe41bc72e6f2f8133795ec9408ba0c3ec63. Was already fixed, only needed a CI retry. Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 29 +++++++++++++++++++ bin/node-template/node/Cargo.toml | 7 ++++- bin/node-template/node/src/command.rs | 6 ++++ bin/node-template/pallets/template/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 5 +++- bin/node/cli/Cargo.toml | 5 +++- bin/node/cli/src/command.rs | 6 ++++ bin/node/runtime/Cargo.toml | 21 ++++++++++---- client/service/Cargo.toml | 1 + frame/alliance/Cargo.toml | 4 ++- frame/assets/Cargo.toml | 2 +- frame/babe/Cargo.toml | 2 +- frame/babe/src/benchmarking.rs | 29 ++----------------- frame/babe/src/tests.rs | 21 ++++++++++++++ frame/bags-list/Cargo.toml | 9 +++++- frame/bags-list/fuzzer/Cargo.toml | 2 +- frame/bags-list/src/lib.rs | 2 +- frame/bags-list/src/list/mod.rs | 2 +- frame/bags-list/src/mock.rs | 3 +- frame/balances/Cargo.toml | 2 +- frame/benchmarking/Cargo.toml | 4 ++- frame/benchmarking/src/lib.rs | 2 ++ frame/bounties/Cargo.toml | 1 + frame/child-bounties/Cargo.toml | 1 + frame/collective/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/conviction-voting/Cargo.toml | 5 ++-- frame/democracy/Cargo.toml | 2 +- .../election-provider-multi-phase/Cargo.toml | 3 +- frame/election-provider-support/Cargo.toml | 1 + .../benchmarking/Cargo.toml | 2 +- frame/election-provider-support/src/lib.rs | 8 ++--- frame/elections-phragmen/Cargo.toml | 1 + frame/examples/basic/Cargo.toml | 2 +- frame/gilt/Cargo.toml | 2 +- frame/grandpa/Cargo.toml | 2 +- frame/identity/Cargo.toml | 2 +- frame/im-online/Cargo.toml | 1 + frame/indices/Cargo.toml | 1 + frame/lottery/Cargo.toml | 1 + frame/membership/Cargo.toml | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 +- .../merkle-mountain-range/src/benchmarking.rs | 2 ++ frame/merkle-mountain-range/src/lib.rs | 2 +- frame/multisig/Cargo.toml | 1 + frame/nomination-pools/Cargo.toml | 10 +++++-- .../nomination-pools/benchmarking/Cargo.toml | 25 ++++++++++++---- .../nomination-pools/benchmarking/src/lib.rs | 1 + frame/offences/benchmarking/Cargo.toml | 12 ++++---- frame/offences/benchmarking/src/lib.rs | 1 + frame/preimage/Cargo.toml | 2 +- frame/proxy/Cargo.toml | 1 + frame/ranked-collective/Cargo.toml | 2 +- frame/recovery/Cargo.toml | 2 +- frame/referenda/Cargo.toml | 2 +- frame/remark/Cargo.toml | 1 + frame/scheduler/Cargo.toml | 2 +- frame/session/benchmarking/Cargo.toml | 6 +++- frame/session/benchmarking/src/lib.rs | 1 + frame/staking/Cargo.toml | 8 ++++- frame/staking/src/pallet/impls.rs | 7 ++++- frame/state-trie-migration/Cargo.toml | 2 +- frame/support/Cargo.toml | 6 ++-- frame/system/benchmarking/Cargo.toml | 6 ++++ frame/timestamp/Cargo.toml | 3 +- frame/tips/Cargo.toml | 1 + .../asset-tx-payment/Cargo.toml | 1 + frame/transaction-storage/Cargo.toml | 2 ++ frame/treasury/Cargo.toml | 1 + frame/uniques/Cargo.toml | 2 +- frame/utility/Cargo.toml | 1 + frame/vesting/Cargo.toml | 1 + frame/whitelist/Cargo.toml | 1 + primitives/api/Cargo.toml | 8 ++--- primitives/core/Cargo.toml | 2 ++ primitives/core/hashing/Cargo.toml | 1 + primitives/inherents/Cargo.toml | 4 +-- primitives/io/Cargo.toml | 9 +++--- primitives/runtime-interface/Cargo.toml | 2 ++ primitives/runtime/Cargo.toml | 1 - primitives/runtime/src/lib.rs | 4 --- test-utils/runtime/Cargo.toml | 1 + utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/frame/generate-bags/Cargo.toml | 2 +- 84 files changed, 250 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 34d2296e019ce..2aad399f3bde3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -427,6 +427,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bdca834647821e0b13d9539a8634eb62d3501b6b6c2cec1722786ee6671b851" + [[package]] name = "beef" version = "0.5.1" @@ -5968,6 +5974,7 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", + "sp-runtime-interface", "sp-staking", "sp-std", ] @@ -6842,6 +6849,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +dependencies = [ + "der", + "spki", + "zeroize", +] + [[package]] name = "pkg-config" version = "0.3.19" @@ -9127,6 +9145,7 @@ checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" dependencies = [ "der", "generic-array 0.14.4", + "pkcs8", "subtle", "zeroize", ] @@ -10339,6 +10358,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spki" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "ss58-registry" version = "1.29.0" diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index eeba198da8212..dc1d3caa8b75d 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -67,7 +67,12 @@ substrate-build-script-utils = { version = "3.0.0", path = "../../../utils/build [features] default = [] -runtime-benchmarks = ["node-template-runtime/runtime-benchmarks"] +# Dependencies that are only required if runtime benchmarking should be build. +runtime-benchmarks = [ + "node-template-runtime/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-benchmarking-cli/runtime-benchmarks", +] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli"] diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index f8f02ed8fc2dd..6d293b7b85fcc 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -127,6 +127,12 @@ pub fn run() -> sc_cli::Result<()> { let PartialComponents { client, .. } = service::new_partial(&config)?; cmd.run(client) }, + #[cfg(not(feature = "runtime-benchmarks"))] + BenchmarkCmd::Storage(_) => Err( + "Storage benchmarking can be enabled with `--features runtime-benchmarks`." + .into(), + ), + #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => { let PartialComponents { client, backend, .. } = service::new_partial(&config)?; diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 6f7a4b1d25841..3cfcef9d902ce 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -30,7 +30,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../ default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index bbe3e8eef7d3c..c6c4b2ea911ec 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -57,6 +57,9 @@ substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-bu [features] default = ["std"] std = [ + "frame-try-runtime?/std", + "frame-system-benchmarking?/std", + "frame-benchmarking?/std", "codec/std", "scale-info/std", "frame-executive/std", @@ -88,7 +91,7 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", - "frame-system-benchmarking", + "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", "hex-literal", "pallet-balances/runtime-benchmarks", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 247a61d835dc3..a435945168e0d 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -159,7 +159,10 @@ cli = [ "substrate-build-script-utils", "try-runtime-cli", ] -runtime-benchmarks = ["kitchensink-runtime/runtime-benchmarks", "frame-benchmarking-cli"] +runtime-benchmarks = [ + "kitchensink-runtime/runtime-benchmarks", + "frame-benchmarking-cli/runtime-benchmarks" +] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli"] diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 79abe8975f3b6..108d7743843b6 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -119,6 +119,12 @@ pub fn run() -> Result<()> { let partial = new_partial(&config)?; cmd.run(partial.client) }, + #[cfg(not(feature = "runtime-benchmarks"))] + BenchmarkCmd::Storage(_) => Err( + "Storage benchmarking can be enabled with `--features runtime-benchmarks`." + .into(), + ), + #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => { // ensure that we keep the task manager alive let partial = new_partial(&config)?; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index eb9c91a3baa05..6a81c16780240 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -117,6 +117,13 @@ substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-bu default = ["std"] with-tracing = ["frame-executive/with-tracing"] std = [ + "sp-sandbox/std", + "pallet-whitelist/std", + "pallet-offences-benchmarking?/std", + "pallet-election-provider-support-benchmarking?/std", + "pallet-asset-tx-payment/std", + "frame-system-benchmarking?/std", + "frame-election-provider-support/std", "sp-authority-discovery/std", "pallet-assets/std", "pallet-authority-discovery/std", @@ -148,6 +155,7 @@ std = [ "pallet-multisig/std", "pallet-nomination-pools/std", "pallet-nomination-pools-runtime-api/std", + "pallet-nomination-pools-benchmarking?/std", "pallet-identity/std", "pallet-scheduler/std", "node-primitives/std", @@ -159,6 +167,7 @@ std = [ "pallet-randomness-collective-flip/std", "sp-std/std", "pallet-session/std", + "pallet-session-benchmarking?/std", "sp-api/std", "sp-runtime/std", "sp-staking/std", @@ -167,7 +176,7 @@ std = [ "sp-session/std", "pallet-sudo/std", "frame-support/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-system-rpc-runtime-api/std", "frame-system/std", "pallet-election-provider-multi-phase/std", @@ -188,7 +197,7 @@ std = [ "pallet-uniques/std", "pallet-vesting/std", "log/std", - "frame-try-runtime/std", + "frame-try-runtime?/std", "sp-io/std", "pallet-child-bounties/std", "pallet-alliance/std", @@ -221,8 +230,8 @@ runtime-benchmarks = [ "pallet-membership/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", - "pallet-nomination-pools-benchmarking", - "pallet-offences-benchmarking", + "pallet-nomination-pools-benchmarking/runtime-benchmarks", + "pallet-offences-benchmarking/runtime-benchmarks", "pallet-preimage/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", @@ -230,7 +239,7 @@ runtime-benchmarks = [ "pallet-referenda/runtime-benchmarks", "pallet-recovery/runtime-benchmarks", "pallet-remark/runtime-benchmarks", - "pallet-session-benchmarking", + "pallet-session-benchmarking/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -242,7 +251,7 @@ runtime-benchmarks = [ "pallet-uniques/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", - "frame-system-benchmarking", + "frame-system-benchmarking/runtime-benchmarks", "hex-literal", ] try-runtime = [ diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 76c61fe17f0da..3c574ef13c8e6 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -20,6 +20,7 @@ rocksdb = ["sc-client-db/rocksdb"] wasmtime = ["sc-executor/wasmtime"] # exposes the client type test-helpers = [] +runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } diff --git a/frame/alliance/Cargo.toml b/frame/alliance/Cargo.toml index 706827708ce88..24e047091009b 100644 --- a/frame/alliance/Cargo.toml +++ b/frame/alliance/Cargo.toml @@ -41,6 +41,8 @@ pallet-collective = { version = "4.0.0-dev", path = "../collective" } [features] default = ["std"] std = [ + "pallet-collective?/std", + "frame-benchmarking?/std", "log/std", "codec/std", "scale-info/std", @@ -55,7 +57,7 @@ std = [ runtime-benchmarks = [ "hex", "sha2", - "frame-benchmarking", + "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 9e98d4e15aed4..7e750f7618437 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -39,7 +39,7 @@ std = [ "sp-runtime/std", "frame-support/std", "frame-system/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index dd76726df3017..9f79a404724e0 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -43,7 +43,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index ac7ab28b5164a..20002c97f8e56 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -17,6 +17,8 @@ //! Benchmarks for the BABE Pallet. +#![cfg(feature = "runtime-benchmarks")] + use super::*; use frame_benchmarking::benchmarks; @@ -70,30 +72,3 @@ benchmarks! { crate::mock::Test, ) } - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - - #[test] - fn test_generate_equivocation_report_blob() { - let (pairs, mut ext) = new_test_ext_with_pairs(3); - - let offending_authority_index = 0; - let offending_authority_pair = &pairs[0]; - - ext.execute_with(|| { - start_era(1); - - let equivocation_proof = generate_equivocation_proof( - offending_authority_index, - offending_authority_pair, - CurrentSlot::::get() + 1, - ); - - println!("equivocation_proof: {:?}", equivocation_proof); - println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); - }); - } -} diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 84e55ed5d050b..1015ada7f0185 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -926,3 +926,24 @@ fn add_epoch_configurations_migration_works() { assert_eq!(PendingEpochConfigChange::::get(), Some(next_config_descriptor)); }); } + +#[test] +fn generate_equivocation_report_blob() { + let (pairs, mut ext) = new_test_ext_with_pairs(3); + + let offending_authority_index = 0; + let offending_authority_pair = &pairs[0]; + + ext.execute_with(|| { + start_era(1); + + let equivocation_proof = generate_equivocation_proof( + offending_authority_index, + offending_authority_pair, + CurrentSlot::::get() + 1, + ); + + println!("equivocation_proof: {:?}", equivocation_proof); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); + }); +} diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 9590d3d3ec4a4..2f9fa6c624538 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -41,12 +41,18 @@ sp-core = { version = "6.0.0", path = "../../primitives/core"} sp-io = { version = "6.0.0", path = "../../primitives/io"} sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } -frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support", features = ["runtime-benchmarks"] } +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } [features] default = ["std"] std = [ + "sp-tracing?/std", + "sp-io?/std", + "sp-core?/std", + "pallet-balances?/std", + "frame-benchmarking?/std", + "scale-info/std", "codec/std", "sp-runtime/std", "sp-std/std", @@ -68,5 +74,6 @@ fuzz = [ "sp-io", "pallet-balances", "sp-tracing", + "frame-election-provider-support/fuzz", ] try-runtime = [ "frame-support/try-runtime" ] diff --git a/frame/bags-list/fuzzer/Cargo.toml b/frame/bags-list/fuzzer/Cargo.toml index ec7d98255b019..0f10077dcbce8 100644 --- a/frame/bags-list/fuzzer/Cargo.toml +++ b/frame/bags-list/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false [dependencies] honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } -frame-election-provider-support = { version = "4.0.0-dev", features = ["runtime-benchmarks"], path = "../../election-provider-support" } +frame-election-provider-support = { version = "4.0.0-dev", features = ["fuzz"], path = "../../election-provider-support" } pallet-bags-list = { version = "4.0.0-dev", features = ["fuzz"], path = ".." } [[bin]] diff --git a/frame/bags-list/src/lib.rs b/frame/bags-list/src/lib.rs index 02adc064790f2..2b48fbf0ca630 100644 --- a/frame/bags-list/src/lib.rs +++ b/frame/bags-list/src/lib.rs @@ -387,7 +387,7 @@ impl, I: 'static> ScoreProvider for Pallet { Node::::get(id).map(|node| node.score()).unwrap_or_default() } - #[cfg(any(feature = "runtime-benchmarks", test))] + #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] fn set_score_of(id: &T::AccountId, new_score: T::Score) { ListNodes::::mutate(id, |maybe_node| { if let Some(node) = maybe_node.as_mut() { diff --git a/frame/bags-list/src/list/mod.rs b/frame/bags-list/src/list/mod.rs index cfa7daf198937..272526ad1a636 100644 --- a/frame/bags-list/src/list/mod.rs +++ b/frame/bags-list/src/list/mod.rs @@ -882,7 +882,7 @@ impl, I: 'static> Node { &self.id } - #[cfg(any(feature = "runtime-benchmarks", test))] + #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] pub fn set_score(&mut self, s: T::Score) { self.score = s } diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index 1044e4c450a41..f6bde63b3867c 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -40,7 +40,7 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { *NextVoteWeightMap::get().get(id).unwrap_or(&NextVoteWeight::get()) } - #[cfg(any(feature = "runtime-benchmarks", test))] + #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] fn set_score_of(id: &AccountId, weight: Self::Score) { NEXT_VOTE_WEIGHT_MAP.with(|m| m.borrow_mut().insert(*id, weight)); } @@ -108,6 +108,7 @@ pub struct ExtBuilder { skip_genesis_ids: bool, } +#[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] impl ExtBuilder { /// Skip adding the default genesis ids to the list. #[cfg(test)] diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 10150f0895906..fd2312993b7e7 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -31,7 +31,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index c098eee528233..49603915ca6da 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -54,4 +54,6 @@ std = [ "sp-std/std", "sp-storage/std", ] -runtime-benchmarks = [] +runtime-benchmarks = [ + "frame-system/runtime-benchmarks", +] diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 3e3888dbf111d..d96ed4dd17b6e 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -975,6 +975,8 @@ macro_rules! impl_benchmark { ( $( $name_extra:ident ),* ) ( $( $name_skip_meta:ident ),* ) ) => { + // We only need to implement benchmarks for the runtime-benchmarks feature or testing. + #[cfg(any(feature = "runtime-benchmarks", test))] impl, $instance: $instance_bound )? > $crate::Benchmarking for Pallet where T: frame_system::Config, $( $where_clause )* diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index 645772fb27669..4aaf088abb5b6 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -33,6 +33,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/child-bounties/Cargo.toml b/frame/child-bounties/Cargo.toml index 575f3e38c8183..ee9a838744d25 100644 --- a/frame/child-bounties/Cargo.toml +++ b/frame/child-bounties/Cargo.toml @@ -34,6 +34,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 0cb2a8b136044..aca2434127f03 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 019f111302a55..284b186a8dff2 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -69,7 +69,7 @@ std = [ "sp-io/std", "sp-std/std", "sp-sandbox/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "wasm-instrument/std", diff --git a/frame/conviction-voting/Cargo.toml b/frame/conviction-voting/Cargo.toml index ab6d04d199bc6..3c40017ece8e7 100644 --- a/frame/conviction-voting/Cargo.toml +++ b/frame/conviction-voting/Cargo.toml @@ -36,7 +36,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", @@ -46,7 +46,8 @@ std = [ "sp-std/std", ] runtime-benchmarks = [ - "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index e0b85ed7d18df..f0ab3162c892b 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -34,7 +34,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index aea3b5cd6d2b0..ca94fef6a4356 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -52,6 +52,7 @@ frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } [features] default = ["std"] std = [ + "pallet-election-provider-support-benchmarking?/std", "codec/std", "scale-info/std", "log/std", @@ -68,7 +69,7 @@ std = [ "frame-election-provider-support/std", "log/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "rand/std", "strum/std", ] diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index 67e1ea63cb655..5d064c770f8d9 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -31,6 +31,7 @@ sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elect [features] default = ["std"] +fuzz = ["default"] std = [ "codec/std", "frame-support/std", diff --git a/frame/election-provider-support/benchmarking/Cargo.toml b/frame/election-provider-support/benchmarking/Cargo.toml index 00037d460db17..0f296d9a70ee0 100644 --- a/frame/election-provider-support/benchmarking/Cargo.toml +++ b/frame/election-provider-support/benchmarking/Cargo.toml @@ -25,7 +25,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../../pri default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-election-provider-support/std", "frame-system/std", "sp-npos-elections/std", diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 21a01ce1904ec..9f60411435da0 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -519,9 +519,7 @@ pub trait SortedListProvider { /// If `who` changes by the returned amount they are guaranteed to have a worst case change /// in their list position. #[cfg(feature = "runtime-benchmarks")] - fn score_update_worst_case(_who: &AccountId, _is_increase: bool) -> Self::Score { - Self::Score::max_value() - } + fn score_update_worst_case(_who: &AccountId, _is_increase: bool) -> Self::Score; } /// Something that can provide the `Score` of an account. Similar to [`ElectionProvider`] and @@ -533,8 +531,8 @@ pub trait ScoreProvider { /// Get the current `Score` of `who`. fn score(who: &AccountId) -> Self::Score; - /// For tests and benchmarks, set the `score`. - #[cfg(any(feature = "runtime-benchmarks", test))] + /// For tests, benchmarks and fuzzing, set the `score`. + #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz", test))] fn set_score_of(_: &AccountId, _: Self::Score) {} } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index d71a74f76a114..2d71a6bed39df 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -36,6 +36,7 @@ substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/examples/basic/Cargo.toml b/frame/examples/basic/Cargo.toml index a5f0c7c89321a..e06bfa374cd9b 100644 --- a/frame/examples/basic/Cargo.toml +++ b/frame/examples/basic/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../../primit default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index d6f61c6d250ba..8c60c847027a3 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -31,7 +31,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 2090a4ea2e228..4bd17b914cefa 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -45,7 +45,7 @@ sp-keyring = { version = "6.0.0", path = "../../primitives/keyring" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 8e821537fd9b2..92e55c5c2b934 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index a90b95b21cd88..8c08ad1a8a89a 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -33,6 +33,7 @@ pallet-session = { version = "4.0.0-dev", path = "../session" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 90eb18a106000..adc3f2a6ea90f 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -30,6 +30,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../balances" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 8f7c8eefe800d..486bb356059f6 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -32,6 +32,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 0473fd46956af..8ec1087e5ac0e 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 75301afed0094..43d7d6ba2eed7 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -33,7 +33,7 @@ itertools = "0.10.3" default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "mmr-lib/std", diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index b698e432534d8..d24364a55f9e6 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -17,6 +17,8 @@ //! Benchmarks for the MMR pallet. +#![cfg(feature = "runtime-benchmarks")] + use crate::*; use frame_benchmarking::benchmarks_instance_pallet; use frame_support::traits::OnInitialize; diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 4644ebcb7da1c..9f989847af0f9 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -63,7 +63,7 @@ use sp_runtime::{ SaturatedConversion, }; -#[cfg(any(feature = "runtime-benchmarks", test))] +#[cfg(feature = "runtime-benchmarks")] mod benchmarking; mod default_weights; mod mmr; diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index a370215032714..6c8b5fbaa7362 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -29,6 +29,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index ef8465e670bf6..459ee5f440a12 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -32,9 +32,6 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } rand = { version = "0.8.5", features = ["small_rng"] } [features] -runtime-benchmarks = [] -try-runtime = [ "frame-support/try-runtime" ] -fuzz-test = [] default = ["std"] std = [ "codec/std", @@ -48,3 +45,10 @@ std = [ "sp-core/std", "log/std", ] +runtime-benchmarks = [ + "sp-staking/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime" +] +fuzz-test = [] diff --git a/frame/nomination-pools/benchmarking/Cargo.toml b/frame/nomination-pools/benchmarking/Cargo.toml index 2e045c95ec9b3..69ba6585481d5 100644 --- a/frame/nomination-pools/benchmarking/Cargo.toml +++ b/frame/nomination-pools/benchmarking/Cargo.toml @@ -22,12 +22,13 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " frame-election-provider-support = { version = "4.0.0-dev", default-features = false, path = "../../election-provider-support" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -pallet-bags-list = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../bags-list" } -pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } -pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../", features = ["runtime-benchmarks"] } +pallet-bags-list = { version = "4.0.0-dev", default-features = false, path = "../../bags-list" } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } +pallet-nomination-pools = { version = "1.0.0", default-features = false, path = "../" } # Substrate Primitives sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime-interface" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -40,6 +41,7 @@ sp-io = { version = "6.0.0", path = "../../../primitives/io" } [features] default = ["std"] + std = [ "frame-benchmarking/std", "frame-election-provider-support/std", @@ -49,7 +51,20 @@ std = [ "pallet-staking/std", "pallet-nomination-pools/std", "sp-runtime/std", + "sp-runtime-interface/std", + "sp-io/std", "sp-staking/std", - "sp-std/std", - "pallet-balances/std", + "sp-std/std", +] + +runtime-benchmarks = [ + "frame-election-provider-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "sp-staking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-nomination-pools/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", ] diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 07d5c63493ef8..581cfc12a3121 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -17,6 +17,7 @@ //! Benchmarks for the nomination pools coupled with the staking and bags list pallets. +#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(test)] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index 2b8e461b84192..3c7a43068af82 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -23,13 +23,9 @@ pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../b pallet-balances = { version = "4.0.0-dev", default-features = false, path = "../../balances" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../grandpa" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../im-online" } -pallet-offences = { version = "4.0.0-dev", default-features = false, features = [ - "runtime-benchmarks", -], path = "../../offences" } +pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../offences" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ - "runtime-benchmarks", -], path = "../../staking" } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -60,3 +56,7 @@ std = [ "sp-staking/std", "sp-std/std", ] + +runtime-benchmarks = [ + "pallet-staking/runtime-benchmarks", +] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index f73dd9caac9c6..c9498214eade4 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -17,6 +17,7 @@ //! Offences pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] mod mock; diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 325e906c61a3c..9a5cc186cca64 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -33,7 +33,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index aaacaa23021e7..afec89ad40fb8 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -30,6 +30,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/ranked-collective/Cargo.toml b/frame/ranked-collective/Cargo.toml index cb43b9ea4c831..c8cf671a97467 100644 --- a/frame/ranked-collective/Cargo.toml +++ b/frame/ranked-collective/Cargo.toml @@ -29,7 +29,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 396555b3e2758..fb33b88d2dfab 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -36,7 +36,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "scale-info/std", diff --git a/frame/referenda/Cargo.toml b/frame/referenda/Cargo.toml index 508f5a5ef8688..4e68d7528ad8a 100644 --- a/frame/referenda/Cargo.toml +++ b/frame/referenda/Cargo.toml @@ -38,7 +38,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "sp-runtime/std", "sp-arithmetic/std", diff --git a/frame/remark/Cargo.toml b/frame/remark/Cargo.toml index fe20365b7c904..f644ea723b59f 100644 --- a/frame/remark/Cargo.toml +++ b/frame/remark/Cargo.toml @@ -31,6 +31,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive default = ["std"] runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index d92d0df0c8037..e78d8cd5061c1 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -34,7 +34,7 @@ runtime-benchmarks = [ ] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index 930ddb0ce7057..5b2fc0c9e1ebf 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../staking" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } @@ -46,3 +46,7 @@ std = [ "sp-session/std", "sp-std/std", ] + +runtime-benchmarks = [ + "pallet-staking/runtime-benchmarks", +] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 265c35cbe4908..9e478ada53cf2 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -18,6 +18,7 @@ //! Benchmarks for the Session Pallet. // This is separated into its own crate due to cyclic dependency issues. +#![cfg(feature = "runtime-benchmarks")] #![cfg_attr(not(feature = "std"), no_std)] mod mock; diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 37d13a54ba5ed..54d21f7ca6bcc 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -43,7 +43,7 @@ sp-npos-elections = { version = "4.0.0-dev", path = "../../primitives/npos-elect pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } -pallet-bags-list = { version = "4.0.0-dev", features = ["runtime-benchmarks"], path = "../bags-list" } +pallet-bags-list = { version = "4.0.0-dev", path = "../bags-list" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } frame-benchmarking = { version = "4.0.0-dev", path = "../benchmarking" } frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } @@ -52,6 +52,7 @@ rand_chacha = { version = "0.2" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "serde", "codec/std", "scale-info/std", @@ -61,6 +62,7 @@ std = [ "sp-runtime/std", "sp-staking/std", "pallet-session/std", + "pallet-bags-list/std", "frame-system/std", "pallet-authorship/std", "sp-application-crypto/std", @@ -74,3 +76,7 @@ runtime-benchmarks = [ "sp-staking/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] +fuzz = [ + "pallet-bags-list/fuzz", + "frame-election-provider-support/fuzz", +] diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 13038527bc2f8..aa8b2c01c6ff2 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1282,7 +1282,7 @@ impl ScoreProvider for Pallet { Self::weight_of(who) } - #[cfg(feature = "runtime-benchmarks")] + #[cfg(any(feature = "runtime-benchmarks", feature = "fuzz"))] fn set_score_of(who: &T::AccountId, weight: Self::Score) { // this will clearly results in an inconsistent state, but it should not matter for a // benchmark. @@ -1378,6 +1378,11 @@ impl SortedListProvider for UseNominatorsAndValidatorsM #[allow(deprecated)] Validators::::remove_all(); } + + #[cfg(feature = "runtime-benchmarks")] + fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { + unimplemented!() + } } impl StakingInterface for Pallet { diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index f8a3b1ba3aab4..b803aad69263f 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -39,7 +39,7 @@ sp-tracing = { path = "../../primitives/tracing" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index d0a40a09f2211..98db3aa6aa49d 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -31,7 +31,7 @@ tt-call = "1.0.8" frame-support-procedural = { version = "4.0.0-dev", default-features = false, path = "./procedural" } paste = "1.0" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.12.0", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../../primitives/state-machine" } bitflags = "1.3" impl-trait-for-tuples = "0.2.2" smallvec = "1.8.0" @@ -49,6 +49,8 @@ parity-util-mem = { version = "0.11.0", default-features = false, features = ["p [features] default = ["std"] std = [ + "sp-core/std", + "k256/std", "once_cell", "serde", "sp-api/std", @@ -62,7 +64,7 @@ std = [ "frame-metadata/std", "sp-inherents/std", "sp-staking/std", - "sp-state-machine", + "sp-state-machine/std", "sp-weights/std", "frame-support-procedural/std", "log/std", diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 437d06a17c781..9ec9ed2ae6d21 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -37,3 +37,9 @@ std = [ "sp-runtime/std", "sp-std/std", ] + +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 8967733f7c5c8..ac495d84b2c1e 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -33,8 +33,9 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ + "sp-io?/std", "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index e2ca152148db6..b00a684c1c83b 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -33,6 +33,7 @@ sp-storage = { version = "6.0.0", path = "../../primitives/storage" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/transaction-payment/asset-tx-payment/Cargo.toml b/frame/transaction-payment/asset-tx-payment/Cargo.toml index de9772d885529..2c1247cfc557a 100644 --- a/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -41,6 +41,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../../balances" } [features] default = ["std"] std = [ + "scale-info/std", "serde", "codec/std", "sp-std/std", diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index 8ed34cb50a652..61b71b000a616 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -36,6 +36,8 @@ sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = true, default = ["std"] runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "hex-literal"] std = [ + "log/std", + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 56c2fcdab58f7..08b0acdba5deb 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -34,6 +34,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index 19b0790947f84..31aa608ff84b6 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -32,7 +32,7 @@ sp-std = { version = "4.0.0", path = "../../primitives/std" } default = ["std"] std = [ "codec/std", - "frame-benchmarking/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 7b56d7974e4b5..cbe2892d72dc7 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -30,6 +30,7 @@ sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index eb902c0633331..6a64b474d1485 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -32,6 +32,7 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/whitelist/Cargo.toml b/frame/whitelist/Cargo.toml index daee560904a08..895a6e753816d 100644 --- a/frame/whitelist/Cargo.toml +++ b/frame/whitelist/Cargo.toml @@ -31,6 +31,7 @@ sp-io = { version = "6.0.0", path = "../../primitives/io" } [features] default = ["std"] std = [ + "frame-benchmarking?/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 75c935bf844bd..a322799048a31 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -19,8 +19,8 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-version = { version = "5.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } -sp-trie = { version = "6.0.0", optional = true, path = "../trie" } +sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../state-machine" } +sp-trie = { version = "6.0.0", default-features = false, optional = true, path = "../trie" } hash-db = { version = "0.15.2", optional = true } thiserror = { version = "1.0.30", optional = true } @@ -36,8 +36,8 @@ std = [ "sp-core/std", "sp-std/std", "sp-runtime/std", - "sp-state-machine", - "sp-trie", + "sp-state-machine/std", + "sp-trie/std", "sp-version/std", "hash-db", "thiserror", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index bf54f8467d1ef..b611449b6730f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -79,6 +79,8 @@ bench = false [features] default = ["std"] std = [ + "parity-util-mem/std", + "merlin?/std", "full_crypto", "log/std", "thiserror", diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml index d85e28d1b2e56..d6fc6bed31fec 100644 --- a/primitives/core/hashing/Cargo.toml +++ b/primitives/core/hashing/Cargo.toml @@ -24,6 +24,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../std" } [features] default = ["std"] std = [ + "digest/std", "blake2/std", "byteorder/std", "sha2/std", diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 628f938880e9f..b176147c053a6 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -19,7 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.30", optional = true } sp-core = { version = "6.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "6.0.0", optional = true, path = "../runtime" } +sp-runtime = { version = "6.0.0", optional = true, default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] @@ -31,7 +31,7 @@ std = [ "async-trait", "codec/std", "sp-core/std", - "sp-runtime", + "sp-runtime/std", "sp-std/std", "thiserror", ] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 2e271d3949dee..26dec17e032dd 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -22,10 +22,10 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-keystore = { version = "0.12.0", default-features = false, optional = true, path = "../keystore" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machine" } +sp-state-machine = { version = "0.12.0", default-features = false, optional = true, path = "../state-machine" } sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "6.0.0", optional = true, path = "../trie" } +sp-trie = { version = "6.0.0", default-features = false, optional = true, path = "../trie" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" } log = { version = "0.4.17", optional = true } @@ -38,14 +38,15 @@ tracing-core = { version = "0.1.28", default-features = false} [features] default = ["std"] std = [ + "bytes/std", "sp-externalities/std", "sp-core/std", "sp-keystore", "codec/std", "sp-std/std", "hash-db/std", - "sp-trie", - "sp-state-machine", + "sp-trie/std", + "sp-state-machine/std", "libsecp256k1", "secp256k1", "sp-runtime-interface/std", diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index a657c98381c9a..51367d40d0cd0 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -37,6 +37,8 @@ trybuild = "1.0.60" [features] default = [ "std" ] std = [ + "sp-storage/std", + "bytes/std", "sp-wasm-interface/std", "sp-std/std", "sp-tracing/std", diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 278962bcf57c4..1d1a7a2c38b1d 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -41,7 +41,6 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] -bench = [] runtime-benchmarks = [] default = ["std"] std = [ diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 9c2b691f64faa..8017a6ac529a2 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -19,10 +19,6 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -// to allow benchmarking -#![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] -extern crate test; #[doc(hidden)] pub use codec; diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 6cea6282f5bd8..744cc527e6012 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -67,6 +67,7 @@ default = [ "std", ] std = [ + "parity-util-mem/std", "beefy-primitives/std", "beefy-merkle-tree/std", "sp-application-crypto/std", diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 4f9d703544096..9e5cb12070af9 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -59,6 +59,6 @@ sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } gethostname = "0.2.3" [features] -default = ["rocksdb", "runtime-benchmarks"] +default = ["rocksdb"] runtime-benchmarks = ["sc-client-db/runtime-benchmarks"] rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index 34d62ab0d8b5f..18f668485114b 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -12,7 +12,7 @@ readme = "README.md" [dependencies] # FRAME frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } -frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support", features = ["runtime-benchmarks"] } +frame-election-provider-support = { version = "4.0.0-dev", path = "../../../frame/election-provider-support" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-staking = { version = "4.0.0-dev", path = "../../../frame/staking" } From 1dcb9ceea7e252eb779f044448bc693ec6161baf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 14 Sep 2022 22:08:02 +0200 Subject: [PATCH 133/348] Clean up the logging output (#12253) * Clean up the logging output Sadly `trust-dns` and `libp2p::iface` are printing stuff that isn't very informative and just confuses the user. So, we just disable logging output from both of these crates as we already have done this for other crates as well. * FMT --- client/tracing/src/logging/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 58941617bfb6a..978e24df68d78 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -133,7 +133,14 @@ where .add_directive( parse_default_directive("cranelift_wasm=warn").expect("provided directive is valid"), ) - .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")); + .add_directive(parse_default_directive("hyper=warn").expect("provided directive is valid")) + .add_directive( + parse_default_directive("trust_dns_proto=off").expect("provided directive is valid"), + ) + .add_directive( + parse_default_directive("libp2p_mdns::behaviour::iface=off") + .expect("provided directive is valid"), + ); if let Ok(lvl) = std::env::var("RUST_LOG") { if lvl != "" { From fd549573b2fffd620d9f2fcea76edab1cdabc62f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 15 Sep 2022 08:00:11 +0200 Subject: [PATCH 134/348] Bump wasm-instrument (#12234) * Bump wasm-instrument * Fix benchmarks --- Cargo.lock | 4 ++-- client/executor/common/Cargo.toml | 2 +- frame/contracts/Cargo.toml | 2 +- frame/contracts/src/benchmarking/mod.rs | 4 ++-- frame/contracts/src/schedule.rs | 2 +- frame/contracts/src/wasm/runtime.rs | 8 ++++---- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2aad399f3bde3..367713b9361ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11578,9 +11578,9 @@ dependencies = [ [[package]] name = "wasm-instrument" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bca81f5279342b38b17d9acbf007a46ddeb73144e2bd5f0a21bfa9fc5d4ab3e" +checksum = "aa1dafb3e60065305741e83db35c6c2584bb3725b692b5b66148a38d72ace6cd" dependencies = [ "parity-wasm 0.45.0", ] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 1af75c7e31610..71a6f2c324591 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } environmental = "1.1.3" thiserror = "1.0.30" -wasm-instrument = "0.2" +wasm-instrument = "0.3" wasmer = { version = "2.2", features = ["singlepass"], optional = true } wasmi = "0.13" sc-allocator = { version = "4.1.0-dev", path = "../../allocator" } diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 284b186a8dff2..e9a90504d7db7 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4", default-features = false } -wasm-instrument = { version = "0.2", default-features = false } +wasm-instrument = { version = "0.3", default-features = false } serde = { version = "1", optional = true, features = ["derive"] } smallvec = { version = "1", default-features = false, features = [ "const_generics", diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 2f8d101504ffa..4a987fd8dde78 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -618,11 +618,11 @@ benchmarks! { imported_functions: vec![ImportedFunction { module: "seal0", name: "gas", - params: vec![ValueType::I32], + params: vec![ValueType::I64], return_type: None, }], call_body: Some(body::repeated(r * API_BENCHMARK_BATCH_SIZE, &[ - Instruction::I32Const(42), + Instruction::I64Const(42), Instruction::Call(0), ])), .. Default::default() diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 63867a0cc7448..790b74106860a 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -525,7 +525,7 @@ impl Default for InstructionWeights { fn default() -> Self { let max_pages = Limits::default().memory_pages; Self { - version: 2, + version: 3, i64const: cost_instr!(instr_i64const, 1), i64load: cost_instr!(instr_i64load, 2), i64store: cost_instr!(instr_i64store, 2), diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 7caa7c5c6fa1f..edd413aa45bf0 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -164,7 +164,7 @@ impl> From for TrapReason { pub enum RuntimeCosts { /// Charge the gas meter with the cost of a metering block. The charged costs are /// the supplied cost of the block plus the overhead of the metering itself. - MeteringBlock(u32), + MeteringBlock(u64), /// Weight charged for copying data from the sandbox. CopyFromContract(u32), /// Weight charged for copying data to the sandbox. @@ -261,7 +261,7 @@ impl RuntimeCosts { { use self::RuntimeCosts::*; let weight = match *self { - MeteringBlock(amount) => s.gas.saturating_add(amount.into()), + MeteringBlock(amount) => s.gas.saturating_add(amount), CopyFromContract(len) => s.return_per_byte.saturating_mul(len.into()), CopyToContract(len) => s.input_per_byte.saturating_mul(len.into()), Caller => s.caller, @@ -957,7 +957,7 @@ pub mod env { /// This call is supposed to be called only by instrumentation injected code. /// /// - amount: How much gas is used. - fn gas(ctx: Runtime, amount: u32) -> Result<(), TrapReason> { + fn gas(ctx: Runtime, amount: u64) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::MeteringBlock(amount))?; Ok(()) } @@ -1737,7 +1737,7 @@ pub mod env { #[prefixed_alias] fn gas_left(ctx: Runtime, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; - let gas_left = &ctx.ext.gas_meter().gas_left().encode(); + let gas_left = &ctx.ext.gas_meter().gas_left().ref_time().encode(); Ok(ctx.write_sandbox_output(out_ptr, out_len_ptr, gas_left, false, already_charged)?) } From 3bef1baae8c428524152e6c5fbb1c35285a128b3 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Thu, 15 Sep 2022 10:27:56 +0300 Subject: [PATCH 135/348] Include Bitswap into request response protocols (#12262) --- client/network/src/service.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index fb98db3251647..dceb57d9e695c 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -150,6 +150,11 @@ where let local_public = local_identity.public(); let local_peer_id = local_public.to_peer_id(); + params + .network_config + .request_response_protocols + .extend(params.request_response_protocol_configs); + params.network_config.boot_nodes = params .network_config .boot_nodes From 12b5b5a523def11125e5302ba0c9b664be51f19a Mon Sep 17 00:00:00 2001 From: Vlad Date: Thu, 15 Sep 2022 14:21:17 +0300 Subject: [PATCH 136/348] Use `--locked` for Cargo in CI everywhere (#12270) --- scripts/ci/gitlab/pipeline/build.yml | 8 +++--- scripts/ci/gitlab/pipeline/test.yml | 38 ++++++++++++++-------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/build.yml b/scripts/ci/gitlab/pipeline/build.yml index eedd2ee0bb409..482d54b50f73d 100644 --- a/scripts/ci/gitlab/pipeline/build.yml +++ b/scripts/ci/gitlab/pipeline/build.yml @@ -62,7 +62,7 @@ build-linux-substrate: - !reference [.rusty-cachier, before_script] script: - rusty-cachier snapshot create - - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose + - WASM_BUILD_NO_COLOR=1 time cargo build --locked --release --verbose - mv $CARGO_TARGET_DIR/release/substrate ./artifacts/substrate/. - echo -n "Substrate version = " - if [ "${CI_COMMIT_TAG}" ]; then @@ -95,7 +95,7 @@ build-linux-substrate: script: - rusty-cachier snapshot create - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo build --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose - cd - - mv $CARGO_TARGET_DIR/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -117,7 +117,7 @@ build-subkey-macos: - mkdir -p ./artifacts/subkey script: - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo build --release --verbose + - SKIP_WASM_BUILD=1 time cargo build --locked --release --verbose - cd - - mv ./target/release/subkey ./artifacts/subkey/. - echo -n "Subkey version = " @@ -149,7 +149,7 @@ build-rustdoc: - ./crate-docs/ script: - rusty-cachier snapshot create - - time cargo +nightly doc --workspace --all-features --verbose --no-deps + - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps - rm -f $CARGO_TARGET_DIR/doc/.lock - mv $CARGO_TARGET_DIR/doc ./crate-docs # FIXME: remove me after CI image gets nonroot diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 7be445b0bca98..bbe38cfb02293 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -83,14 +83,14 @@ cargo-check-benches: - echo "___Running benchmarks___"; - case ${CI_NODE_INDEX} in 1) - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all; - cargo run --release -p node-bench -- ::trie::read::small --json + SKIP_WASM_BUILD=1 time cargo +nightly check --locked --benches --all; + cargo run --locked --release -p node-bench -- ::trie::read::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; echo "___Uploading cache for rusty-cachier___"; rusty-cachier cache upload ;; 2) - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + cargo run --locked --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json ;; esac @@ -140,7 +140,7 @@ cargo-check-subkey: script: - rusty-cachier snapshot create - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --locked --release - rusty-cachier cache upload cargo-check-try-runtime: @@ -154,7 +154,7 @@ cargo-check-try-runtime: - .test-refs script: - rusty-cachier snapshot create - - time cargo check --features try-runtime + - time cargo check --locked --features try-runtime - rusty-cachier cache upload cargo-check-wasmer-sandbox: @@ -168,7 +168,7 @@ cargo-check-wasmer-sandbox: - .test-refs script: - rusty-cachier snapshot create - - time cargo check --features wasmer-sandbox + - time cargo check --locked --features wasmer-sandbox - rusty-cachier cache upload test-deterministic-wasm: @@ -187,13 +187,13 @@ test-deterministic-wasm: script: - rusty-cachier snapshot create # build runtime - - cargo build --verbose --release -p kitchensink-runtime + - cargo build --locked --verbose --release -p kitchensink-runtime # make checksum - sha256sum $CARGO_TARGET_DIR/release/wbuild/kitchensink-runtime/target/wasm32-unknown-unknown/release/kitchensink_runtime.wasm > checksum.sha256 # clean up - rm -rf $CARGO_TARGET_DIR/release/wbuild # build again - - cargo build --verbose --release -p kitchensink-runtime + - cargo build --locked --verbose --release -p kitchensink-runtime # confirm checksum - sha256sum -c ./checksum.sha256 # clean up again, don't put release binaries into the cache @@ -251,7 +251,7 @@ test-frame-support: RUN_UI_TESTS: 1 script: - rusty-cachier snapshot create - - time cargo test -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec + - time cargo test --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - rusty-cachier cache upload @@ -295,7 +295,7 @@ quick-benchmarks: WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" script: - rusty-cachier snapshot create - - time cargo run --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 + - time cargo run --locked --release --features runtime-benchmarks -- benchmark pallet --execution wasm --wasm-execution compiled --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 - rusty-cachier cache upload test-frame-examples-compile-to-wasm: @@ -314,9 +314,9 @@ test-frame-examples-compile-to-wasm: script: - rusty-cachier snapshot create - cd ./frame/examples/offchain-worker/ - - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features + - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features - cd ../basic - - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features + - cargo +nightly build --locked --target=wasm32-unknown-unknown --no-default-features - rusty-cachier cache upload test-linux-stable-int: @@ -358,8 +358,8 @@ check-tracing: script: - rusty-cachier snapshot create # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features - - time cargo +nightly test --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing + - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features + - time cargo +nightly test --locked --manifest-path ./primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - rusty-cachier cache upload # more information about this job can be found here: @@ -383,9 +383,9 @@ test-full-crypto-feature: script: - rusty-cachier snapshot create - cd primitives/core/ - - time cargo +nightly build --verbose --no-default-features --features full_crypto + - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto - cd ../application-crypto - - time cargo +nightly build --verbose --no-default-features --features full_crypto + - time cargo +nightly build --locked --verbose --no-default-features --features full_crypto - rusty-cachier cache upload test-wasmer-sandbox: @@ -399,7 +399,7 @@ test-wasmer-sandbox: script: - rusty-cachier snapshot create - echo "Node index - ${CI_NODE_INDEX}. Total amount - ${CI_NODE_TOTAL}" - - time cargo nextest run --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} + - time cargo nextest run --locked --release --features runtime-benchmarks,wasmer-sandbox,disable-ui-tests --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} - if [ ${CI_NODE_INDEX} == 1 ]; then rusty-cachier cache upload; fi cargo-check-macos: @@ -408,7 +408,7 @@ cargo-check-macos: before_script: - !reference [.rust-info-script, script] script: - - SKIP_WASM_BUILD=1 time cargo check --release + - SKIP_WASM_BUILD=1 time cargo check --locked --release tags: - osx @@ -424,5 +424,5 @@ check-rustdoc: RUSTDOCFLAGS: "-Dwarnings" script: - rusty-cachier snapshot create - - time cargo +nightly doc --workspace --all-features --verbose --no-deps + - time cargo +nightly doc --locked --workspace --all-features --verbose --no-deps - rusty-cachier cache upload From 8bd5510971a8fba46a7b1a6ead8254b9cbf0a73f Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Thu, 15 Sep 2022 17:12:55 +0200 Subject: [PATCH 137/348] Replace 'blake2-rfc' with rust-crypto 'blake2' crate (#12266) * Replace 'blake2-rfc with rust-crypto 'blake2' crate * Bump blake2 to 0.10.4 --- Cargo.lock | 6 ++--- primitives/api/proc-macro/Cargo.toml | 2 +- primitives/core/Cargo.toml | 6 ++--- primitives/core/hashing/Cargo.toml | 2 +- primitives/core/src/crypto.rs | 36 ++++++++++++++++++++-------- 5 files changed, 34 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 367713b9361ba..8216b8b92aef5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94ba84325db59637ffc528bbe8c7f86c02c57cff5c0e2b9b00f9a851f42f309" +checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ "digest 0.10.3", ] @@ -9751,7 +9751,7 @@ version = "6.0.0" dependencies = [ "base58", "bitflags", - "blake2-rfc", + "blake2", "byteorder", "criterion", "dyn-clonable", diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 0a57be8d7a300..8acc15d6a0591 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true quote = "1.0.10" syn = { version = "1.0.98", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.37" -blake2 = { version = "0.10.2", default-features = false } +blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" # Required for the doc tests diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index b611449b6730f..2233a3443447c 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -48,7 +48,7 @@ bitflags = "1.3" # full crypto ed25519-zebra = { version = "3.0.0", default-features = false, optional = true} -blake2-rfc = { version = "0.2.18", default-features = false, optional = true } +blake2 = { version = "0.10.4", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", "u64_backend", @@ -98,7 +98,7 @@ std = [ "hash-db/std", "sp-std/std", "serde", - "blake2-rfc/std", + "blake2/std", "ed25519-zebra", "hex/std", "base58", @@ -130,7 +130,7 @@ std = [ # For the regular wasm runtime builds this should not be used. full_crypto = [ "ed25519-zebra", - "blake2-rfc", + "blake2", "schnorrkel", "hex", "libsecp256k1", diff --git a/primitives/core/hashing/Cargo.toml b/primitives/core/hashing/Cargo.toml index d6fc6bed31fec..efe38af1602dd 100644 --- a/primitives/core/hashing/Cargo.toml +++ b/primitives/core/hashing/Cargo.toml @@ -13,7 +13,7 @@ documentation = "https://docs.rs/sp-core-hashing" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -blake2 = { version = "0.10.2", default-features = false } +blake2 = { version = "0.10.4", default-features = false } byteorder = { version = "1.3.2", default-features = false } digest = { version = "0.10.3", default-features = false } sha2 = { version = "0.10.2", default-features = false } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 80b44449dbac1..377ae480edd17 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -132,9 +132,7 @@ impl DeriveJunction { let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); index.using_encoded(|data| { if data.len() > JUNCTION_ID_LEN { - let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); - let hash = hash_result.as_bytes(); - cc.copy_from_slice(hash); + cc.copy_from_slice(&sp_core_hashing::blake2_256(data)); } else { cc[0..data.len()].copy_from_slice(data); } @@ -292,7 +290,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { } let hash = ss58hash(&data[0..body_len + prefix_len]); - let checksum = &hash.as_bytes()[0..CHECKSUM_LEN]; + let checksum = &hash[0..CHECKSUM_LEN]; if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. return Err(PublicError::InvalidChecksum) @@ -333,7 +331,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + ByteArray { }; v.extend(self.as_ref()); let r = ss58hash(&v); - v.extend(&r.as_bytes()[0..2]); + v.extend(&r[0..2]); v.to_base58() } @@ -366,11 +364,13 @@ pub trait Derive: Sized { const PREFIX: &[u8] = b"SS58PRE"; #[cfg(feature = "std")] -fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { - let mut context = blake2_rfc::blake2b::Blake2b::new(64); - context.update(PREFIX); - context.update(data); - context.finalize() +fn ss58hash(data: &[u8]) -> Vec { + use blake2::{Blake2b512, Digest}; + + let mut ctx = Blake2b512::new(); + ctx.update(PREFIX); + ctx.update(data); + ctx.finalize().to_vec() } /// Default prefix number @@ -1311,6 +1311,14 @@ mod tests { path: vec![DeriveJunction::soft("DOT")] }) ); + assert_eq!( + TestPair::from_string("hello world/0123456789012345678901234567890123456789", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft("0123456789012345678901234567890123456789")] + }) + ); assert_eq!( TestPair::from_string("hello world//1", None), Ok(TestPair::Standard { @@ -1327,6 +1335,14 @@ mod tests { path: vec![DeriveJunction::hard("DOT")] }) ); + assert_eq!( + TestPair::from_string("hello world//0123456789012345678901234567890123456789", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("0123456789012345678901234567890123456789")] + }) + ); assert_eq!( TestPair::from_string("hello world//1/DOT", None), Ok(TestPair::Standard { From 21adbaf88fc3d1a216ab71d19acedd348c74ad15 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Thu, 15 Sep 2022 18:59:06 +0100 Subject: [PATCH 138/348] Expose tracks as a constant. (#12273) * Expose tracks as a constant. * Formatting --- bin/node/runtime/src/lib.rs | 1 + frame/referenda/src/lib.rs | 42 +++++++++++++++++++++++++++++++------ frame/referenda/src/mock.rs | 1 + 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2f0efcaa56740..1e476f358876c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -828,6 +828,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { } } } +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); impl pallet_referenda::Config for Runtime { type WeightInfo = pallet_referenda::weights::SubstrateWeight; diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 92e5ecf5e539b..a2a5aae4e617b 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -72,7 +72,7 @@ use frame_support::{ v2::{Anon as ScheduleAnon, Named as ScheduleNamed}, DispatchTime, MaybeHashed, }, - Currency, Get, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, ReservableCurrency, VoteTally, }, BoundedVec, @@ -108,6 +108,30 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; +pub use frame_support::traits::Get; +pub use sp_std::vec::Vec; + +#[macro_export] +macro_rules! impl_tracksinfo_get { + ($tracksinfo:ty, $balance:ty, $blocknumber:ty) => { + impl + $crate::Get< + $crate::Vec<( + <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id, + $crate::TrackInfo<$balance, $blocknumber>, + )>, + > for $tracksinfo + { + fn get() -> $crate::Vec<( + <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::Id, + $crate::TrackInfo<$balance, $blocknumber>, + )> { + <$tracksinfo as $crate::TracksInfo<$balance, $blocknumber>>::tracks().to_vec() + } + } + }; +} + const ASSEMBLY_ID: LockIdentifier = *b"assembly"; #[frame_support::pallet] @@ -184,11 +208,17 @@ pub mod pallet { // The other stuff. /// Information concerning the different referendum tracks. - type Tracks: TracksInfo< - BalanceOf, - Self::BlockNumber, - Origin = ::PalletsOrigin, - >; + #[pallet::constant] + type Tracks: Get< + Vec<( + , Self::BlockNumber>>::Id, + TrackInfo, Self::BlockNumber>, + )>, + > + TracksInfo< + BalanceOf, + Self::BlockNumber, + Origin = ::PalletsOrigin, + >; } /// The next free referendum index, aka the number of referenda started so far. diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index e046b29898e7b..70ca304eed3cb 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -210,6 +210,7 @@ impl TracksInfo for TestTracksInfo { } } } +impl_tracksinfo_get!(TestTracksInfo, u64, u64); impl Config for Test { type WeightInfo = (); From d65f4bbdf1d34ae228fbad444951176278a03d05 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 16 Sep 2022 10:50:21 +0300 Subject: [PATCH 139/348] client/beefy: fix voter initialization (#12274) Fix corner case where voter gets a single burst of finality notifications just when it starts. The notification stream was consumed by "wait_for_pallet" logic, then main loop would subscribe to finality notifications, but by that time some notifications might've been lost. Fix this by subscribing the main loop to notifications before waiting for pallet to become available. Share the same stream with the main loop so that notifications for blocks before pallet available are ignored, while _all_ notifications after pallet available are processed. Add regression test for this. Signed-off-by: acatangiu --- client/beefy/src/tests.rs | 34 ++++++++++++++++++++++++++++++++++ client/beefy/src/worker.rs | 15 ++++++++------- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index e8d32fe3e8127..e0058e5238d6a 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -811,3 +811,37 @@ fn beefy_importing_blocks() { })); } } + +#[test] +fn voter_initialization() { + sp_tracing::try_init_simple(); + // Regression test for voter initialization where finality notifications were dropped + // after waiting for BEEFY pallet availability. + + let mut runtime = Runtime::new().unwrap(); + let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let session_len = 5; + // Should vote on all mandatory blocks no matter the `min_block_delta`. + let min_block_delta = 10; + + let mut net = BeefyTestNet::new(2, 0); + let api = Arc::new(two_validators::TestApi {}); + let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); + runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); + + // push 30 blocks + net.generate_blocks_and_sync(26, session_len, &validator_set, false); + let net = Arc::new(Mutex::new(net)); + + // Finalize multiple blocks at once to get a burst of finality notifications right from start. + // Need to finalize at least one block in each session, choose randomly. + // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. + finalize_block_and_wait_for_beefy( + &net, + peers, + &mut runtime, + &[1, 6, 10, 17, 24, 26], + &[1, 5, 10, 15, 20, 25], + ); +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9f54a300472de..6e8c89d804984 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -24,10 +24,10 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::StreamExt; +use futures::{stream::Fuse, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; -use sc_client_api::{Backend, FinalityNotification, HeaderBackend}; +use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -723,12 +723,11 @@ where /// Wait for BEEFY runtime pallet to be available. /// Should be called only once during worker initialization. - async fn wait_for_runtime_pallet(&mut self) { + async fn wait_for_runtime_pallet(&mut self, finality: &mut Fuse>) { let mut gossip_engine = &mut self.gossip_engine; - let mut finality_stream = self.client.finality_notification_stream().fuse(); loop { futures::select! { - notif = finality_stream.next() => { + notif = finality.next() => { let notif = match notif { Some(notif) => notif, None => break @@ -762,11 +761,13 @@ where pub(crate) async fn run(mut self) { info!(target: "beefy", "🥩 run BEEFY worker, best grandpa: #{:?}.", self.best_grandpa_block_header.number()); let mut block_import_justif = self.links.from_block_import_justif_stream.subscribe().fuse(); + // Subscribe to finality notifications before waiting for runtime pallet and reuse stream, + // so we process notifications for all finalized blocks after pallet is available. + let mut finality_notifications = self.client.finality_notification_stream().fuse(); - self.wait_for_runtime_pallet().await; + self.wait_for_runtime_pallet(&mut finality_notifications).await; trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); - let mut finality_notifications = self.client.finality_notification_stream().fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) From 7202ca616799a1d78e37ae8ec0093f16c49417c6 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Fri, 16 Sep 2022 16:16:37 +0200 Subject: [PATCH 140/348] Fix warp sync (#12281) --- client/consensus/aura/src/import_queue.rs | 19 ++++++------ client/consensus/babe/src/lib.rs | 34 +++++++++++---------- client/consensus/common/src/block_import.rs | 12 ++++++++ client/consensus/pow/src/lib.rs | 20 ++++++------ 4 files changed, 51 insertions(+), 34 deletions(-) diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 31fe7bbaa1095..90f15fef1b34b 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -225,15 +225,16 @@ where inherent_data.aura_replace_inherent_data(slot); - // skip the inherents verification if the runtime API is old. - if self - .client - .runtime_api() - .has_api_with::, _>( - &BlockId::Hash(parent_hash), - |v| v >= 2, - ) - .map_err(|e| e.to_string())? + // skip the inherents verification if the runtime API is old or not expected to + // exist. + if !block.state_action.skip_execution_checks() && + self.client + .runtime_api() + .has_api_with::, _>( + &BlockId::Hash(parent_hash), + |v| v >= 2, + ) + .map_err(|e| e.to_string())? { self.check_inherents( new_block.clone(), diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 198de865bb290..eca6654f6e4dc 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1237,24 +1237,26 @@ where warn!(target: "babe", "Error checking/reporting BABE equivocation: {}", err); } - // if the body is passed through, we need to use the runtime - // to check that the internally-set timestamp in the inherents - // actually matches the slot set in the seal. if let Some(inner_body) = block.body { - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .map_err(Error::::CreateInherents)?; - inherent_data.babe_replace_inherent_data(slot); let new_block = Block::new(pre_header.clone(), inner_body); - - self.check_inherents( - new_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - create_inherent_data_providers, - block.origin.into(), - ) - .await?; + if !block.state_action.skip_execution_checks() { + // if the body is passed through and the block was executed, + // we need to use the runtime to check that the internally-set + // timestamp in the inherents actually matches the slot set in the seal. + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .map_err(Error::::CreateInherents)?; + inherent_data.babe_replace_inherent_data(slot); + + self.check_inherents( + new_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + create_inherent_data_providers, + block.origin.into(), + ) + .await?; + } let (_, inner_body) = new_block.deconstruct(); block.body = Some(inner_body); diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 10739f63ef779..b8fc815cc5c5c 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -153,6 +153,18 @@ pub enum StateAction { Skip, } +impl StateAction { + /// Check if execution checks that require runtime calls should be skipped. + pub fn skip_execution_checks(&self) -> bool { + match self { + StateAction::ApplyChanges(_) | + StateAction::Execute | + StateAction::ExecuteIfPossible => false, + StateAction::Skip => true, + } + } +} + /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 6c492931d445c..9b3ef501d2396 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -341,15 +341,17 @@ where if let Some(inner_body) = block.body.take() { let check_block = B::new(block.header.clone(), inner_body); - self.check_inherents( - check_block.clone(), - BlockId::Hash(parent_hash), - self.create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await?, - block.origin.into(), - ) - .await?; + if !block.state_action.skip_execution_checks() { + self.check_inherents( + check_block.clone(), + BlockId::Hash(parent_hash), + self.create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await?, + block.origin.into(), + ) + .await?; + } block.body = Some(check_block.deconstruct().1); } From 4d04aba0f864b62532cac6d39ac373812eeea3dc Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Fri, 16 Sep 2022 17:45:44 -0400 Subject: [PATCH 141/348] Add special tag to exclude runtime storage items from benchmarking (#12205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * initial setup * add WhitelistedStorageKeys trait * add (A, B) tuple implementation for whitelisted_storage_keys() * fix formatting * implement WhitelistedStorageKeys for all tuple combinations * impl_for_tuples up to 128 for WhitelistedStorageKeys * refactor to #[benchmarking(cached)] * tweak error message and mark BlockNumber as cached * add benchmarking(cached) to the other default types * add docs for benchmarking(cached) * properly parse storage type declaration * make storage_alias structs public so we can use them in this macro * use BTreeMap since TrackedStorageKey missing Ord outside of std * make WhitelistedStorageKeys accessible * basic detection of benchmarking(cached) :boom: * proper parsing of #[benchmarking(cached)] from pallet parse macro * store presence of #[benchmarking(cached)] macro on StorageDef * will be used for later expansion * compiling blank impl for WhitelistedStorageKeys * move impl to expand_pallet_struct * use frame_support::sp_std::vec::Vec properly * successfully compiling with storage info loaded into a variable :boom: * plausible implementation for whitelisted_storage_keys() * depends on the assumption that storage_info.encode() can be loaded into TrackedStorageKey::new(..) * use Pallet::whitelisted_storage_keys() instead of hard-coded list * AllPallets::whitelisted_storage_keys() properly working :boom: * collect storage names * whitelisted_storage_keys() impl working :boom: * clean up * fix compiler error * just one compiler error * fix doc compiler error * use better import path * fix comment * whoops * whoops again * fix macro import issue * cargo fmt * mark example as ignore * use keyword tokens instead of string parsing * fix keyword-based parsing of benchmarking(cached) * preliminary spec for check_whitelist() * add additional test for benchmarking whitelist * add TODO note * remove irrelevant line from example * use filter_map instead of filter and map * simplify syntax Co-authored-by: Keith Yeung * clean up * fix test * fix tests * use keyword parsing instead of string parsing * use collect() instead of a for loop Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix compiler error * clean up benchmarking(cached) marking code * use cloned() * refactor to not use panic! and remove need for pub types in storage_alias * remove unneeded use Co-authored-by: Bastian Köcher * remove unneeded visibility changes * don't manually hard code hash for treasury account as hex * proper Ord, PartialOrd, and Hash impls for TrackedStorageKey * now based just on key, and available in no-std * use BTreeSet instead of BTreeMap * fix comments * cargo fmt * switch to pallet::whitelist and re-do it basti's way :D * make PartialOrd for TrackedStorageKey consistent with Ord * more correct implementation of hash-related traits for TrackedStorageKey * fix integration test * update TODO * remove unused keyword * remove more unused keywords * use into_iter() Co-authored-by: Keith Yeung * Update frame/support/procedural/src/pallet/parse/mod.rs Co-authored-by: Bastian Köcher * add docs for whitelisted * fix comment Co-authored-by: Keith Yeung Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Bastian Köcher --- bin/node-template/runtime/src/lib.rs | 51 ++++++++++++---- bin/node/runtime/src/lib.rs | 60 ++++++++++++++----- frame/balances/src/lib.rs | 1 + .../src/pallet/expand/pallet_struct.rs | 19 ++++++ .../procedural/src/pallet/parse/storage.rs | 21 ++++++- frame/support/procedural/src/storage/mod.rs | 1 + frame/support/src/lib.rs | 15 +++++ frame/support/src/traits.rs | 1 + frame/support/src/traits/storage.rs | 28 +++++++++ .../storage_invalid_attribute.stderr | 2 +- frame/system/src/lib.rs | 5 ++ primitives/storage/src/lib.rs | 5 +- 12 files changed, 175 insertions(+), 34 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 099654a12a420..c309b27682080 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -521,18 +521,8 @@ impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -563,3 +553,40 @@ impl_runtime_apis! { } } } + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::traits::WhitelistedStorageKeys; + use sp_core::hexdisplay::HexDisplay; + use std::collections::HashSet; + + #[test] + fn check_whitelist() { + let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() + .iter() + .map(|e| HexDisplay::from(&e.key).to_string()) + .collect(); + + // Block Number + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac") + ); + // Total Issuance + assert!( + whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80") + ); + // Execution Phase + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a") + ); + // Event Count + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850") + ); + // System Events + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7") + ); + } +} diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1e476f358876c..1b0d12f8530a2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2148,22 +2148,14 @@ impl_runtime_apis! { impl baseline::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - // System BlockWeight - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96").to_vec().into(), - // Treasury Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), - ]; + use frame_support::traits::WhitelistedStorageKeys; + let mut whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + // Treasury Account + // TODO: this is manual for now, someday we might be able to use a + // macro for this particular key + let treasury_key = frame_system::Account::::hashed_key_for(Treasury::account_id()); + whitelist.push(treasury_key.to_vec().into()); let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -2177,8 +2169,44 @@ impl_runtime_apis! { mod tests { use super::*; use frame_election_provider_support::NposSolution; + use frame_support::traits::WhitelistedStorageKeys; use frame_system::offchain::CreateSignedTransaction; + use sp_core::hexdisplay::HexDisplay; use sp_runtime::UpperOf; + use std::collections::HashSet; + + #[test] + fn check_whitelist() { + let whitelist: HashSet = AllPalletsWithSystem::whitelisted_storage_keys() + .iter() + .map(|e| HexDisplay::from(&e.key).to_string()) + .collect(); + + // Block Number + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac") + ); + // Total Issuance + assert!( + whitelist.contains("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80") + ); + // Execution Phase + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a") + ); + // Event Count + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850") + ); + // System Events + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7") + ); + // System BlockWeight + assert!( + whitelist.contains("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96") + ); + } #[test] fn validate_transaction_submitter_bounds() { diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index e425381172dab..82d6f87dbde1a 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -493,6 +493,7 @@ pub mod pallet { /// The total units issued in the system. #[pallet::storage] #[pallet::getter(fn total_issuance)] + #[pallet::whitelist_storage] pub type TotalIssuance, I: 'static = ()> = StorageValue<_, T::Balance, ValueQuery>; /// The Balances pallet example of storing the balance of an account. diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index f0fb6bacedffb..e5941a33fee13 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -166,6 +166,24 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { quote::quote! { #frame_support::traits::StorageVersion::default() } }; + let whitelisted_storage_idents: Vec = def + .storages + .iter() + .filter_map(|s| s.whitelisted.then_some(s.ident.clone())) + .collect(); + + let whitelisted_storage_keys_impl = quote::quote![ + use #frame_support::traits::{StorageInfoTrait, TrackedStorageKey, WhitelistedStorageKeys}; + impl<#type_impl_gen> WhitelistedStorageKeys for #pallet_ident<#type_use_gen> #storages_where_clauses { + fn whitelisted_storage_keys() -> #frame_support::sp_std::vec::Vec { + use #frame_support::sp_std::vec; + vec![#( + TrackedStorageKey::new(#whitelisted_storage_idents::<#type_use_gen>::hashed_key().to_vec()) + ),*] + } + } + ]; + quote::quote_spanned!(def.pallet_struct.attr_span => #pallet_error_metadata @@ -253,5 +271,6 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { } #storage_info + #whitelisted_storage_keys_impl ) } diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index f0e1353774910..321c4dd5d4914 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -28,6 +28,7 @@ mod keyword { syn::custom_keyword!(getter); syn::custom_keyword!(storage_prefix); syn::custom_keyword!(unbounded); + syn::custom_keyword!(whitelist_storage); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ResultQuery); syn::custom_keyword!(ValueQuery); @@ -37,16 +38,21 @@ mod keyword { /// * `#[pallet::getter(fn dummy)]` /// * `#[pallet::storage_prefix = "CustomName"]` /// * `#[pallet::unbounded]` +/// * `#[pallet::whitelist_storage] pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), Unbounded(proc_macro2::Span), + WhitelistStorage(proc_macro2::Span), } impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) | Self::StorageName(_, span) | Self::Unbounded(span) => *span, + Self::Getter(_, span) | + Self::StorageName(_, span) | + Self::Unbounded(span) | + Self::WhitelistStorage(span) => *span, } } } @@ -84,6 +90,9 @@ impl syn::parse::Parse for PalletStorageAttr { content.parse::()?; Ok(Self::Unbounded(attr_span)) + } else if lookahead.peek(keyword::whitelist_storage) { + content.parse::()?; + Ok(Self::WhitelistStorage(attr_span)) } else { Err(lookahead.error()) } @@ -94,6 +103,7 @@ struct PalletStorageAttrInfo { getter: Option, rename_as: Option, unbounded: bool, + whitelisted: bool, } impl PalletStorageAttrInfo { @@ -101,12 +111,14 @@ impl PalletStorageAttrInfo { let mut getter = None; let mut rename_as = None; let mut unbounded = false; + let mut whitelisted = false; for attr in attrs { match attr { PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => rename_as = Some(name), PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, + PalletStorageAttr::WhitelistStorage(..) if !whitelisted => whitelisted = true, attr => return Err(syn::Error::new( attr.attr_span(), @@ -115,7 +127,7 @@ impl PalletStorageAttrInfo { } } - Ok(PalletStorageAttrInfo { getter, rename_as, unbounded }) + Ok(PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted }) } } @@ -171,6 +183,8 @@ pub struct StorageDef { pub named_generics: Option, /// If the value stored in this storage is unbounded. pub unbounded: bool, + /// Whether or not reads to this storage key will be ignored by benchmarking + pub whitelisted: bool, } /// The parsed generic from the @@ -672,7 +686,7 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, unbounded } = + let PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted } = PalletStorageAttrInfo::from_attrs(attrs)?; let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); @@ -814,6 +828,7 @@ impl StorageDef { cfg_attrs, named_generics, unbounded, + whitelisted, }) } } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 756653f7ba85d..e8e2d7529cb3f 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -32,6 +32,7 @@ pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; use frame_support_procedural_tools::{ generate_crate_access, generate_hidden_includes, syn_ext as ext, }; + use quote::quote; /// All information contained in input of decl_storage diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 8dcc51226923b..e9578cccc23bc 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1858,6 +1858,21 @@ pub mod pallet_prelude { /// pub(super) type MyStorage = StorageValue; /// ``` /// +/// The optional attribute `#[pallet::whitelist_storage]` will declare the +/// storage as whitelisted from benchmarking. Doing so will exclude reads of +/// that value's storage key from counting towards weight calculations during +/// benchmarking. +/// +/// This attribute should only be attached to storages that are known to be +/// read/used in every block. This will result in a more accurate benchmarking weight. +/// +/// ### Example +/// ```ignore +/// #[pallet::storage] +/// #[pallet::whitelist_storage] +/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; +/// ``` +/// /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index d4f0e73557c77..16ac7929831b4 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -89,6 +89,7 @@ pub mod schedule; mod storage; pub use storage::{ Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, + TrackedStorageKey, WhitelistedStorageKeys, }; mod dispatch; diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs index d40d82c28e87e..24653d1899836 100644 --- a/frame/support/src/traits/storage.rs +++ b/frame/support/src/traits/storage.rs @@ -17,7 +17,9 @@ //! Traits for encoding data related to pallet's storage items. +use crate::sp_std::collections::btree_set::BTreeSet; use impl_trait_for_tuples::impl_for_tuples; +pub use sp_core::storage::TrackedStorageKey; use sp_std::prelude::*; /// An instance of a pallet in the storage. @@ -90,3 +92,29 @@ impl StorageInfoTrait for Tuple { pub trait PartialStorageInfoTrait { fn partial_storage_info() -> Vec; } + +/// Allows a pallet to specify storage keys to whitelist during benchmarking. +/// This means those keys will be excluded from the benchmarking performance +/// calculation. +pub trait WhitelistedStorageKeys { + /// Returns a [`Vec`] indicating the storage keys that + /// should be whitelisted during benchmarking. This means that those keys + /// will be excluded from the benchmarking performance calculation. + fn whitelisted_storage_keys() -> Vec; +} + +#[cfg_attr(all(not(feature = "tuples-96"), not(feature = "tuples-128")), impl_for_tuples(64))] +#[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] +#[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] +impl WhitelistedStorageKeys for Tuple { + fn whitelisted_storage_keys() -> Vec { + // de-duplicate the storage keys + let mut combined_keys: BTreeSet = BTreeSet::new(); + for_tuples!( #( + for storage_key in Tuple::whitelisted_storage_keys() { + combined_keys.insert(storage_key); + } + )* ); + combined_keys.into_iter().collect::>() + } +} diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index 6313bd691f943..80c6526bbf888 100644 --- a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected one of: `getter`, `storage_prefix`, `unbounded` +error: expected one of: `getter`, `storage_prefix`, `unbounded`, `whitelist_storage` --> $DIR/storage_invalid_attribute.rs:16:12 | 16 | #[pallet::generate_store(pub trait Store)] diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 739dcadde9ac1..f60e3e7349d6b 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -562,6 +562,7 @@ pub mod pallet { /// The current weight for the block. #[pallet::storage] + #[pallet::whitelist_storage] #[pallet::getter(fn block_weight)] pub(super) type BlockWeight = StorageValue<_, ConsumedWeight, ValueQuery>; @@ -584,6 +585,7 @@ pub mod pallet { /// The current block number being processed. Set by `execute_block`. #[pallet::storage] + #[pallet::whitelist_storage] #[pallet::getter(fn block_number)] pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; @@ -606,12 +608,14 @@ pub mod pallet { /// Events have a large in-memory size. Box the events to not go out-of-memory /// just in case someone still reads them from within the runtime. #[pallet::storage] + #[pallet::whitelist_storage] #[pallet::unbounded] pub(super) type Events = StorageValue<_, Vec>>, ValueQuery>; /// The number of events in the `Events` list. #[pallet::storage] + #[pallet::whitelist_storage] #[pallet::getter(fn event_count)] pub(super) type EventCount = StorageValue<_, EventIndex, ValueQuery>; @@ -647,6 +651,7 @@ pub mod pallet { /// The execution phase of the block. #[pallet::storage] + #[pallet::whitelist_storage] pub(super) type ExecutionPhase = StorageValue<_, Phase>; #[cfg_attr(feature = "std", derive(Default))] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 0948cf431158d..9c046bf9d2dd1 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -47,8 +47,9 @@ impl AsRef<[u8]> for StorageKey { } /// Storage key with read/write tracking information. -#[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] +#[derive( + PartialEq, Eq, Ord, PartialOrd, sp_std::hash::Hash, RuntimeDebug, Clone, Encode, Decode, +)] pub struct TrackedStorageKey { pub key: Vec, pub reads: u32, From 03eb807e4e8efa9d0f94f36f528caf4fc92ccffb Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Sun, 18 Sep 2022 11:28:16 +0200 Subject: [PATCH 142/348] [Feature] Part 1: add TargetList for validator ranking (#12034) * [Feature] Part 1: add TargetList for validator ranking * remove redundant todo * remove typo * cleanup * implement score * more fixes * fix thresholds * fmt * Remove the stuff that has to come in the next PR, some fixes * extended balance import * Change all the references from VoteWeight to Self::Score * Add a migration for VoterBagsList * fix score * add targetList to nomination-pools tests * fix bench * address review comments * change get_npos_targets * address more comments * remove thresholds for the time being * fix instance reference * VoterBagsListInstance * reus * remove params that are not used yet * Introduced pre/post upgrade try-runtime checks * fix * fixes * fix migration * fix migration * fix post_upgrade * change * Fix * eloquent PhantomData * fix PD * more fixes * Update frame/staking/src/pallet/impls.rs Co-authored-by: Squirrel * is_nominator now works * fix test-staking * build fixes * fix remote-tests * Apply suggestions from code review Co-authored-by: parity-processbot <> Co-authored-by: kianenigma Co-authored-by: Squirrel Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- bin/node/runtime/src/lib.rs | 15 ++- bin/node/runtime/src/voter_bags.rs | 9 +- frame/babe/src/mock.rs | 1 + frame/bags-list/remote-tests/src/lib.rs | 33 +++++-- frame/bags-list/remote-tests/src/migration.rs | 5 +- frame/bags-list/remote-tests/src/snapshot.rs | 8 +- frame/bags-list/remote-tests/src/try_state.rs | 10 +- frame/grandpa/src/mock.rs | 1 + .../nomination-pools/benchmarking/src/lib.rs | 5 +- .../nomination-pools/benchmarking/src/mock.rs | 8 +- .../nomination-pools/test-staking/src/mock.rs | 8 +- frame/offences/benchmarking/src/mock.rs | 1 + frame/session/benchmarking/src/mock.rs | 1 + frame/staking/src/benchmarking.rs | 2 +- frame/staking/src/lib.rs | 3 +- frame/staking/src/migrations.rs | 93 +++++++++++++++++ frame/staking/src/mock.rs | 9 +- frame/staking/src/pallet/impls.rs | 99 ++++++++++++++++--- frame/staking/src/pallet/mod.rs | 28 ++++++ utils/frame/generate-bags/src/lib.rs | 15 +++ 20 files changed, 307 insertions(+), 47 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1b0d12f8530a2..b5467aa6f8ea7 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -568,7 +568,9 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::UnboundedExecution; - type VoterList = BagsList; + type VoterList = VoterBagsList; + // This a placeholder, to be introduced in the next PR as an instance of bags-list + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; @@ -724,12 +726,15 @@ parameter_types! { pub const BagThresholds: &'static [u64] = &voter_bags::THRESHOLDS; } -impl pallet_bags_list::Config for Runtime { +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { type RuntimeEvent = RuntimeEvent; + /// The voter bags-list is loosely kept up to date, and the real source of truth for the score + /// of each node is the staking pallet. type ScoreProvider = Staking; - type WeightInfo = pallet_bags_list::weights::SubstrateWeight; type BagThresholds = BagThresholds; type Score = VoteWeight; + type WeightInfo = pallet_bags_list::weights::SubstrateWeight; } parameter_types! { @@ -1637,7 +1642,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, TransactionStorage: pallet_transaction_storage, - BagsList: pallet_bags_list, + VoterBagsList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, @@ -1723,7 +1728,7 @@ mod benches { [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] - [pallet_bags_list, BagsList] + [pallet_bags_list, VoterBagsList] [pallet_balances, Balances] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] diff --git a/bin/node/runtime/src/voter_bags.rs b/bin/node/runtime/src/voter_bags.rs index 93790f028f457..eb540c27abcc7 100644 --- a/bin/node/runtime/src/voter_bags.rs +++ b/bin/node/runtime/src/voter_bags.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated voter bag thresholds. +//! Autogenerated bag thresholds. //! -//! Generated on 2021-07-05T09:17:40.469754927+00:00 +//! Generated on 2022-08-15T19:26:59.939787+00:00 +//! Arguments +//! Total issuance: 100000000000000 +//! Minimum balance: 100000000000000 //! for the node runtime. /// Existential weight for this runtime. diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 0ac8214f45c23..fb251457db9ca 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -202,6 +202,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/frame/bags-list/remote-tests/src/lib.rs b/frame/bags-list/remote-tests/src/lib.rs index 927c3dc91cb58..fc25e3b65ddb1 100644 --- a/frame/bags-list/remote-tests/src/lib.rs +++ b/frame/bags-list/remote-tests/src/lib.rs @@ -18,6 +18,7 @@ //! Utilities for remote-testing pallet-bags-list. use frame_election_provider_support::ScoreProvider; +use pallet_bags_list::Instance1; use sp_std::prelude::*; /// A common log target to use. @@ -30,18 +31,26 @@ pub mod try_state; /// A wrapper for a runtime that the functions of this crate expect. /// /// For example, this can be the `Runtime` type of the Polkadot runtime. -pub trait RuntimeT: - pallet_staking::Config + pallet_bags_list::Config + frame_system::Config +pub trait RuntimeT: + pallet_staking::Config + pallet_bags_list::Config + frame_system::Config +{ +} +impl< + I: 'static, + T: pallet_staking::Config + pallet_bags_list::Config + frame_system::Config, + > RuntimeT for T { } -impl RuntimeT for T {} fn percent(portion: u32, total: u32) -> f64 { (portion as f64 / total as f64) * 100f64 } /// Display the number of nodes in each bag, while identifying those that need a rebag. -pub fn display_and_check_bags(currency_unit: u64, currency_name: &'static str) { +pub fn display_and_check_bags>( + currency_unit: u64, + currency_name: &'static str, +) { use frame_election_provider_support::SortedListProvider; use frame_support::traits::Get; @@ -55,7 +64,8 @@ pub fn display_and_check_bags(currency_unit: u64, currency_na let mut seen_in_bags = 0; let mut rebaggable = 0; let mut active_bags = 0; - for vote_weight_thresh in ::BagThresholds::get() { + for vote_weight_thresh in >::BagThresholds::get() + { let vote_weight_thresh_u64: u64 = (*vote_weight_thresh) .try_into() .map_err(|_| "runtime must configure score to at most u64 to use this test") @@ -64,7 +74,9 @@ pub fn display_and_check_bags(currency_unit: u64, currency_na let vote_weight_thresh_as_unit = vote_weight_thresh_u64 as f64 / currency_unit as f64; let pretty_thresh = format!("Threshold: {}. {}", vote_weight_thresh_as_unit, currency_name); - let bag = match pallet_bags_list::Pallet::::list_bags_get(*vote_weight_thresh) { + let bag = match pallet_bags_list::Pallet::::list_bags_get( + *vote_weight_thresh, + ) { Some(bag) => bag, None => { log::info!(target: LOG_TARGET, "{} NO VOTERS.", pretty_thresh); @@ -75,7 +87,8 @@ pub fn display_and_check_bags(currency_unit: u64, currency_na active_bags += 1; for id in bag.std_iter().map(|node| node.std_id().clone()) { - let vote_weight = ::ScoreProvider::score(&id); + let vote_weight = + >::ScoreProvider::score(&id); let vote_weight_thresh_u64: u64 = (*vote_weight_thresh) .try_into() .map_err(|_| "runtime must configure score to at most u64 to use this test") @@ -92,8 +105,8 @@ pub fn display_and_check_bags(currency_unit: u64, currency_na ); } - let node = - pallet_bags_list::Node::::get(&id).expect("node in bag must exist."); + let node = pallet_bags_list::Node::::get(&id) + .expect("node in bag must exist."); if node.is_misplaced(vote_weight) { rebaggable += 1; let notional_bag = pallet_bags_list::notional_bag_for::(vote_weight); @@ -141,7 +154,7 @@ pub fn display_and_check_bags(currency_unit: u64, currency_na "a total of {} nodes are in {} active bags [{} total bags], {} of which can be rebagged.", voter_list_count, active_bags, - ::BagThresholds::get().len(), + >::BagThresholds::get().len(), rebaggable, ); } diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs index c4cd73c45d377..675dfbe072670 100644 --- a/frame/bags-list/remote-tests/src/migration.rs +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -24,7 +24,10 @@ use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Test voter bags migration. `currency_unit` is the number of planks per the the runtimes `UNITS` /// (i.e. number of decimal places per DOT, KSM etc) -pub async fn execute( +pub async fn execute< + Runtime: RuntimeT, + Block: BlockT + DeserializeOwned, +>( currency_unit: u64, currency_name: &'static str, ws_url: String, diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index 408f5f2bd8aa2..655de10c4af9b 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -22,7 +22,10 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute create a snapshot from pallet-staking. -pub async fn execute( +pub async fn execute< + Runtime: crate::RuntimeT, + Block: BlockT + DeserializeOwned, +>( voter_limit: Option, currency_unit: u64, ws_url: String, @@ -34,7 +37,8 @@ pub async fn execute transport: ws_url.to_string().into(), // NOTE: we don't scrape pallet-staking, this kinda ensures that the source of the data // is bags-list. - pallets: vec![pallet_bags_list::Pallet::::name().to_string()], + pallets: vec![pallet_bags_list::Pallet::::name() + .to_string()], at: None, ..Default::default() })) diff --git a/frame/bags-list/remote-tests/src/try_state.rs b/frame/bags-list/remote-tests/src/try_state.rs index 11278c20eb8ed..9817ef4ceb9e4 100644 --- a/frame/bags-list/remote-tests/src/try_state.rs +++ b/frame/bags-list/remote-tests/src/try_state.rs @@ -25,7 +25,10 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute the sanity check of the bags-list. -pub async fn execute( +pub async fn execute< + Runtime: crate::RuntimeT, + Block: BlockT + DeserializeOwned, +>( currency_unit: u64, currency_name: &'static str, ws_url: String, @@ -33,7 +36,8 @@ pub async fn execute let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), - pallets: vec![pallet_bags_list::Pallet::::name().to_string()], + pallets: vec![pallet_bags_list::Pallet::::name() + .to_string()], ..Default::default() })) .inject_hashed_prefix(&>::prefix_hash()) @@ -44,7 +48,7 @@ pub async fn execute ext.execute_with(|| { sp_core::crypto::set_default_ss58_version(Runtime::SS58Prefix::get().try_into().unwrap()); - pallet_bags_list::Pallet::::try_state().unwrap(); + pallet_bags_list::Pallet::::try_state().unwrap(); log::info!(target: crate::LOG_TARGET, "executed bags-list sanity check with no errors."); crate::display_and_check_bags::(currency_unit, currency_name); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 5aae751de9ea6..19caf0766a3e0 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -206,6 +206,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 581cfc12a3121..18b081367d135 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -42,8 +42,11 @@ type CurrencyOf = ::Currency; const USER_SEED: u32 = 0; const MAX_SPANS: u32 = 100; +type VoterBagsListInstance = pallet_bags_list::Instance1; pub trait Config: - pallet_nomination_pools::Config + pallet_staking::Config + pallet_bags_list::Config + pallet_nomination_pools::Config + + pallet_staking::Config + + pallet_bags_list::Config { } diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index aad38ee8475d2..2dcac34d39a11 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -15,6 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; use sp_runtime::{ @@ -111,7 +112,8 @@ impl pallet_staking::Config for Runtime { type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; type GenesisElectionProvider = Self::ElectionProvider; - type VoterList = pallet_bags_list::Pallet; + type VoterList = VoterList; + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; @@ -122,7 +124,7 @@ parameter_types! { pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; } -impl pallet_bags_list::Config for Runtime { +impl pallet_bags_list::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type BagThresholds = BagThresholds; @@ -180,7 +182,7 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, + VoterList: pallet_bags_list::::{Pallet, Call, Storage, Event}, Pools: pallet_nomination_pools::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index d616ee1ffc180..cb63ad04d1a38 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -126,7 +126,8 @@ impl pallet_staking::Config for Runtime { type ElectionProvider = frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; type GenesisElectionProvider = Self::ElectionProvider; - type VoterList = pallet_bags_list::Pallet; + type VoterList = VoterList; + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; @@ -137,7 +138,8 @@ parameter_types! { pub static BagThresholds: &'static [VoteWeight] = &[10, 20, 30, 40, 50, 60, 1_000, 2_000, 10_000]; } -impl pallet_bags_list::Config for Runtime { +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type BagThresholds = BagThresholds; @@ -193,7 +195,7 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, - BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, + VoterList: pallet_bags_list::::{Pallet, Call, Storage, Event}, Pools: pallet_nomination_pools::{Pallet, Call, Storage, Event}, } ); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 3483f5ec240a5..43c248a09a0b8 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -180,6 +180,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 01be3605601f4..de2f4ae23f423 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -175,6 +175,7 @@ impl pallet_staking::Config for Test { type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 2d5943b51758d..a79eac3c4dc46 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -844,7 +844,7 @@ benchmarks! { v, n, T::MaxNominations::get() as usize, false, None )?; }: { - let targets = >::get_npos_targets(); + let targets = >::get_npos_targets(None); assert_eq!(targets.len() as u32, v); } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 4a1f0cb886ed0..4ff9e99306727 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -886,11 +886,12 @@ enum Releases { V8_0_0, // populate `VoterList`. V9_0_0, // inject validators into `VoterList` as well. V10_0_0, // remove `EarliestUnappliedSlash`. + V11_0_0, // Move pallet storage prefix, e.g. BagsList -> VoterBagsList } impl Default for Releases { fn default() -> Self { - Releases::V10_0_0 + Releases::V11_0_0 } } diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index ff01e1fe3b09b..2556f6d989e1b 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -20,6 +20,99 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::traits::OnRuntimeUpgrade; +pub mod v11 { + use super::*; + use frame_support::{ + storage::migration::move_pallet, + traits::{GetStorageVersion, PalletInfoAccess}, + }; + #[cfg(feature = "try-runtime")] + use sp_io::hashing::twox_128; + + pub struct MigrateToV11(sp_std::marker::PhantomData<(T, P, N)>); + impl> OnRuntimeUpgrade + for MigrateToV11 + { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V10_0_0, + "must upgrade linearly" + ); + let old_pallet_prefix = twox_128(N::get().as_bytes()); + + frame_support::ensure!( + sp_io::storage::next_key(&old_pallet_prefix).is_some(), + "no data for the old pallet name has been detected" + ); + + Ok(()) + } + + /// Migrate the entire storage of this pallet to a new prefix. + /// + /// This new prefix must be the same as the one set in construct_runtime. For safety, use + /// `PalletInfo` to get it, as: + /// `::PalletInfo::name::`. + /// + /// The migration will look into the storage version in order to avoid triggering a + /// migration on an up to date storage. + fn on_runtime_upgrade() -> Weight { + let old_pallet_name = N::get(); + let new_pallet_name =

::name(); + + if StorageVersion::::get() == Releases::V10_0_0 { + // bump version anyway, even if we don't need to move the prefix + StorageVersion::::put(Releases::V11_0_0); + if new_pallet_name == old_pallet_name { + log!( + warn, + "new bags-list name is equal to the old one, only bumping the version" + ); + return T::DbWeight::get().reads(1).saturating_add(T::DbWeight::get().writes(1)) + } + + move_pallet(old_pallet_name.as_bytes(), new_pallet_name.as_bytes()); + ::BlockWeights::get().max_block + } else { + log!(warn, "v11::migrate should be removed."); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V11_0_0, + "wrong version after the upgrade" + ); + + let old_pallet_name = N::get(); + let new_pallet_name =

::name(); + + // skip storage prefix checks for the same pallet names + if new_pallet_name == old_pallet_name { + return Ok(()) + } + + let old_pallet_prefix = twox_128(N::get().as_bytes()); + frame_support::ensure!( + sp_io::storage::next_key(&old_pallet_prefix).is_none(), + "old pallet data hasn't been removed" + ); + + let new_pallet_name =

::name(); + let new_pallet_prefix = twox_128(new_pallet_name.as_bytes()); + frame_support::ensure!( + sp_io::storage::next_key(&new_pallet_prefix).is_some(), + "new pallet data hasn't been created" + ); + + Ok(()) + } + } +} + pub mod v10 { use super::*; use frame_support::storage_alias; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index b8f6254d9a8e8..5980e6b40d15d 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -99,7 +99,7 @@ frame_support::construct_runtime!( Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, Historical: pallet_session::historical::{Pallet, Storage}, - BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, + VoterBagsList: pallet_bags_list::::{Pallet, Call, Storage, Event}, } ); @@ -240,9 +240,11 @@ parameter_types! { pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } -impl pallet_bags_list::Config for Test { +type VoterBagsListInstance = pallet_bags_list::Instance1; +impl pallet_bags_list::Config for Test { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); + // Staking is the source of truth for voter bags list, since they are not kept up to date. type ScoreProvider = Staking; type BagThresholds = BagThresholds; type Score = VoteWeight; @@ -296,7 +298,8 @@ impl crate::pallet::pallet::Config for Test { type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. - type VoterList = BagsList; + type VoterList = VoterBagsList; + type TargetList = UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index aa8b2c01c6ff2..f2f9918cb8bc4 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -756,18 +756,32 @@ impl Pallet { /// Get the targets for an upcoming npos election. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - pub fn get_npos_targets() -> Vec { - let mut validator_count = 0u32; - let targets = Validators::::iter() - .map(|(v, _)| { - validator_count.saturating_inc(); - v - }) - .collect::>(); + pub fn get_npos_targets(maybe_max_len: Option) -> Vec { + let max_allowed_len = maybe_max_len.unwrap_or_else(|| T::TargetList::count() as usize); + let mut all_targets = Vec::::with_capacity(max_allowed_len); + let mut targets_seen = 0; + + let mut targets_iter = T::TargetList::iter(); + while all_targets.len() < max_allowed_len && + targets_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * max_allowed_len as u32) + { + let target = match targets_iter.next() { + Some(target) => { + targets_seen.saturating_inc(); + target + }, + None => break, + }; + + if Validators::::contains_key(&target) { + all_targets.push(target); + } + } - Self::register_weight(T::WeightInfo::get_npos_targets(validator_count)); + Self::register_weight(T::WeightInfo::get_npos_targets(all_targets.len() as u32)); + log!(info, "generated {} npos targets", all_targets.len()); - targets + all_targets } /// This function will add a nominator to the `Nominators` storage map, @@ -899,7 +913,7 @@ impl ElectionDataProvider for Pallet { return Err("Target snapshot too big") } - Ok(Self::get_npos_targets()) + Ok(Self::get_npos_targets(None)) } fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { @@ -1306,6 +1320,65 @@ impl ScoreProvider for Pallet { } } +/// A simple sorted list implementation that does not require any additional pallets. Note, this +/// does not provide validators in sorted order. If you desire nominators in a sorted order take +/// a look at [`pallet-bags-list`]. +pub struct UseValidatorsMap(sp_std::marker::PhantomData); +impl SortedListProvider for UseValidatorsMap { + type Score = BalanceOf; + type Error = (); + + /// Returns iterator over voter list, which can have `take` called on it. + fn iter() -> Box> { + Box::new(Validators::::iter().map(|(v, _)| v)) + } + fn iter_from( + start: &T::AccountId, + ) -> Result>, Self::Error> { + if Validators::::contains_key(start) { + let start_key = Validators::::hashed_key_for(start); + Ok(Box::new(Validators::::iter_from(start_key).map(|(n, _)| n))) + } else { + Err(()) + } + } + fn count() -> u32 { + Validators::::count() + } + fn contains(id: &T::AccountId) -> bool { + Validators::::contains_key(id) + } + fn on_insert(_: T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on insert. + Ok(()) + } + fn get_score(id: &T::AccountId) -> Result { + Ok(Pallet::::weight_of(id).into()) + } + fn on_update(_: &T::AccountId, _weight: Self::Score) -> Result<(), Self::Error> { + // nothing to do on update. + Ok(()) + } + fn on_remove(_: &T::AccountId) -> Result<(), Self::Error> { + // nothing to do on remove. + Ok(()) + } + fn unsafe_regenerate( + _: impl IntoIterator, + _: Box Self::Score>, + ) -> u32 { + // nothing to do upon regenerate. + 0 + } + fn try_state() -> Result<(), &'static str> { + Ok(()) + } + fn unsafe_clear() { + #[allow(deprecated)] + Validators::::remove_all(); + } +} + /// A simple voter list implementation that does not require any additional pallets. Note, this /// does not provided nominators in sorted ordered. If you desire nominators in a sorted order take /// a look at [`pallet-bags-list]. @@ -1458,6 +1531,10 @@ impl StakingInterface for Pallet { #[cfg(feature = "try-runtime")] impl Pallet { pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { + ensure!( + T::VoterList::iter().all(|x| >::contains_key(&x)), + "VoterList contains non-nominators" + ); T::VoterList::try_state()?; Self::check_nominators()?; Self::check_exposures()?; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 1b38f236fc6f3..b8a457beb5eae 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -184,8 +184,36 @@ pub mod pallet { /// /// The changes to nominators are reported to this. Moreover, each validator's self-vote is /// also reported as one independent vote. + /// + /// To keep the load off the chain as much as possible, changes made to the staked amount + /// via rewards and slashes are not reported and thus need to be manually fixed by the + /// staker. In case of `bags-list`, this always means using `rebag` and `putInFrontOf`. + /// + /// Invariant: what comes out of this list will always be a nominator. type VoterList: SortedListProvider; + /// WIP: This is a noop as of now, the actual business logic that's described below is going + /// to be introduced in a follow-up PR. + /// + /// Something that provides a best-effort sorted list of targets aka electable validators, + /// used for NPoS election. + /// + /// The changes to the approval stake of each validator are reported to this. This means any + /// change to: + /// 1. The stake of any validator or nominator. + /// 2. The targets of any nominator + /// 3. The role of any staker (e.g. validator -> chilled, nominator -> validator, etc) + /// + /// Unlike `VoterList`, the values in this list are always kept up to date with reward and + /// slash as well, and thus represent the accurate approval stake of all account being + /// nominated by nominators. + /// + /// Note that while at the time of nomination, all targets are checked to be real + /// validators, they can chill at any point, and their approval stakes will still be + /// recorded. This implies that what comes out of iterating this list MIGHT NOT BE AN ACTIVE + /// VALIDATOR. + type TargetList: SortedListProvider>; + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can have. Effectively /// determines how many unique eras a staker may be unbonding in. #[pallet::constant] diff --git a/utils/frame/generate-bags/src/lib.rs b/utils/frame/generate-bags/src/lib.rs index d4507c3be33ef..23da131a668d8 100644 --- a/utils/frame/generate-bags/src/lib.rs +++ b/utils/frame/generate-bags/src/lib.rs @@ -207,6 +207,10 @@ pub fn generate_thresholds( writeln!(buf, "//! Autogenerated bag thresholds.")?; writeln!(buf, "//!")?; writeln!(buf, "//! Generated on {}", now.to_rfc3339())?; + writeln!(buf, "//! Arguments")?; + writeln!(buf, "//! Total issuance: {}", &total_issuance)?; + writeln!(buf, "//! Minimum balance: {}", &minimum_balance)?; + writeln!( buf, "//! for the {} runtime.", @@ -234,6 +238,17 @@ pub fn generate_thresholds( writeln!(buf)?; writeln!(buf, "/// Upper thresholds delimiting the bag list.")?; writeln!(buf, "pub const THRESHOLDS: [u64; {}] = [", thresholds.len())?; + for threshold in &thresholds { + num_buf.write_formatted(threshold, &format); + // u64::MAX, with spacers every 3 digits, is 26 characters wide + writeln!(buf, " {:>26},", num_buf.as_str())?; + } + writeln!(buf, "];")?; + + // thresholds balance + writeln!(buf)?; + writeln!(buf, "/// Upper thresholds delimiting the bag list.")?; + writeln!(buf, "pub const THRESHOLDS_BALANCES: [u128; {}] = [", thresholds.len())?; for threshold in thresholds { num_buf.write_formatted(&threshold, &format); // u64::MAX, with spacers every 3 digits, is 26 characters wide From 91b97b016618fcc515f55a15b1f008b68e13f5a2 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Sun, 18 Sep 2022 14:50:38 +0200 Subject: [PATCH 143/348] [Fix] Benchmark issue in staking (#12290) --- frame/staking/src/pallet/impls.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index f2f9918cb8bc4..62faaaf68ef62 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1377,6 +1377,11 @@ impl SortedListProvider for UseValidatorsMap { #[allow(deprecated)] Validators::::remove_all(); } + + #[cfg(feature = "runtime-benchmarks")] + fn score_update_worst_case(_who: &T::AccountId, _is_increase: bool) -> Self::Score { + unimplemented!() + } } /// A simple voter list implementation that does not require any additional pallets. Note, this From ee3c159829398add73a7c26fd7cba6de1c51257b Mon Sep 17 00:00:00 2001 From: NingLin-P Date: Mon, 19 Sep 2022 18:52:55 +0800 Subject: [PATCH 144/348] support upgrade hooks to directly pass data (#12185) * update interfaces of OnRuntimeUpgrade & Hooks Signed-off-by: linning * remove try-runtime for PreStateDigest Signed-off-by: linning * remove the Default bound of PreStateDigest Signed-off-by: linning * remove try-runtime for PreStateDigest & pre_upgrade Signed-off-by: linning * remove tmp storage between upgrade hooks Signed-off-by: linning * ensure hooks are storage noop Signed-off-by: linning * remove OnRuntimeUpgradeHelpersExt Signed-off-by: linning * cargo check & fmt Signed-off-by: linning * rename PreStateDigest to PreUpgradeState Signed-off-by: linning * replace associate type with codec & vec Signed-off-by: linning * add helper strcut to help encode/decode tuple Signed-off-by: linning * update comment Signed-off-by: linning * fix Signed-off-by: linning * add test Signed-off-by: linning * address comment Signed-off-by: linning * fix doc Signed-off-by: linning * fix ci Signed-off-by: linning * address comment Signed-off-by: linning * add more test cases Signed-off-by: linning * make clippy happy Signed-off-by: linning * fmt Signed-off-by: linning * update comment Signed-off-by: linning * fmt Signed-off-by: linning Signed-off-by: linning --- frame/bags-list/src/migrations.rs | 22 ++- frame/executive/src/lib.rs | 11 +- frame/nomination-pools/src/migration.rs | 16 +- frame/staking/src/migrations.rs | 17 ++- .../procedural/src/pallet/expand/hooks.rs | 6 +- frame/support/src/dispatch.rs | 12 +- frame/support/src/lib.rs | 2 +- .../support/src/storage/storage_noop_guard.rs | 2 +- frame/support/src/traits.rs | 2 +- frame/support/src/traits/hooks.rs | 141 ++++++++++++++++-- frame/support/src/traits/try_runtime.rs | 38 +---- utils/frame/try-runtime/cli/src/lib.rs | 12 +- 12 files changed, 182 insertions(+), 99 deletions(-) diff --git a/frame/bags-list/src/migrations.rs b/frame/bags-list/src/migrations.rs index 49b7d136125e2..e1dc9f777e537 100644 --- a/frame/bags-list/src/migrations.rs +++ b/frame/bags-list/src/migrations.rs @@ -24,6 +24,8 @@ use frame_support::traits::OnRuntimeUpgrade; #[cfg(feature = "try-runtime")] use frame_support::ensure; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; /// A struct that does not migration, but only checks that the counter prefix exists and is correct. pub struct CheckCounterPrefix, I: 'static>(sp_std::marker::PhantomData<(T, I)>); @@ -33,7 +35,7 @@ impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix Result<(), &'static str> { + fn pre_upgrade() -> Result, &'static str> { // The old explicit storage item. #[frame_support::storage_alias] type CounterForListNodes, I: 'static> = @@ -51,7 +53,7 @@ impl, I: 'static> OnRuntimeUpgrade for CheckCounterPrefix::count() ); - Ok(()) + Ok(Vec::new()) } } @@ -80,17 +82,13 @@ mod old { #[frame_support::storage_alias] pub type CounterForListNodes, I: 'static> = StorageValue, u32, ValueQuery>; - - #[frame_support::storage_alias] - pub type TempStorage, I: 'static> = - StorageValue, u32, ValueQuery>; } /// A struct that migrates all bags lists to contain a score value. pub struct AddScore, I: 'static = ()>(sp_std::marker::PhantomData<(T, I)>); impl, I: 'static> OnRuntimeUpgrade for AddScore { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { + fn pre_upgrade() -> Result, &'static str> { // The list node data should be corrupt at this point, so this is zero. ensure!(crate::ListNodes::::iter().count() == 0, "list node data is not corrupt"); // We can use the helper `old::ListNode` to get the existing data. @@ -98,8 +96,7 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { let tracked_node_count: u32 = old::CounterForListNodes::::get(); crate::log!(info, "number of nodes before: {:?} {:?}", iter_node_count, tracked_node_count); ensure!(iter_node_count == tracked_node_count, "Node count is wrong."); - old::TempStorage::::put(iter_node_count); - Ok(()) + Ok(iter_node_count.encode()) } fn on_runtime_upgrade() -> frame_support::weights::Weight { @@ -122,9 +119,10 @@ impl, I: 'static> OnRuntimeUpgrade for AddScore { } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - let node_count_before = old::TempStorage::::take(); - // Now, the list node data is not corrupt anymore. + fn post_upgrade(node_count_before: Vec) -> Result<(), &'static str> { + let node_count_before: u32 = Decode::decode(&mut node_count_before.as_slice()) + .expect("the state parameter should be something that was generated by pre_upgrade"); + // Now the list node data is not corrupt anymore. let iter_node_count_after: u32 = crate::ListNodes::::iter().count() as u32; let tracked_node_count_after: u32 = crate::ListNodes::::count(); crate::log!( diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 96e71512e54bc..88cefb17c2895 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -287,10 +287,15 @@ where /// /// This should only be used for testing. pub fn try_runtime_upgrade() -> Result { - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::pre_upgrade().unwrap(); + // ensure both `pre_upgrade` and `post_upgrade` won't change the storage root + let state = { + let _guard = frame_support::StorageNoopGuard::default(); + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::pre_upgrade().unwrap() + }; let weight = Self::execute_on_runtime_upgrade(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::post_upgrade().unwrap(); - + let _guard = frame_support::StorageNoopGuard::default(); + <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::post_upgrade(state) + .unwrap(); Ok(weight) } } diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index bcf23ee863a39..4d78f2fa76c96 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -18,7 +18,7 @@ use super::*; use crate::log; use frame_support::traits::OnRuntimeUpgrade; -use sp_std::collections::btree_map::BTreeMap; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; pub mod v1 { use super::*; @@ -97,7 +97,7 @@ pub mod v1 { } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), &'static str> { // new version must be set. assert_eq!(Pallet::::on_chain_storage_version(), 1); Pallet::::try_state(frame_system::Pallet::::block_number())?; @@ -347,7 +347,7 @@ pub mod v2 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { + fn pre_upgrade() -> Result, &'static str> { // all reward accounts must have more than ED. RewardPools::::iter().for_each(|(id, _)| { assert!( @@ -356,11 +356,11 @@ pub mod v2 { ) }); - Ok(()) + Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), &'static str> { // new version must be set. assert_eq!(Pallet::::on_chain_storage_version(), 2); @@ -430,16 +430,16 @@ pub mod v3 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { + fn pre_upgrade() -> Result, &'static str> { ensure!( Pallet::::current_storage_version() > Pallet::::on_chain_storage_version(), "the on_chain version is equal or more than the current one" ); - Ok(()) + Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_: Vec) -> Result<(), &'static str> { ensure!( Metadata::::iter_keys().all(|id| BondedPools::::contains_key(&id)), "not all of the stale metadata has been removed" diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 2556f6d989e1b..4f5dcf49d6ab4 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -154,6 +154,10 @@ pub mod v10 { pub mod v9 { use super::*; + #[cfg(feature = "try-runtime")] + use frame_support::codec::{Decode, Encode}; + #[cfg(feature = "try-runtime")] + use sp_std::vec::Vec; /// Migration implementation that injects all validators into sorted list. /// @@ -192,23 +196,22 @@ pub mod v9 { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - use frame_support::traits::OnRuntimeUpgradeHelpersExt; + fn pre_upgrade() -> Result, &'static str> { frame_support::ensure!( StorageVersion::::get() == crate::Releases::V8_0_0, "must upgrade linearly" ); let prev_count = T::VoterList::count(); - Self::set_temp_storage(prev_count, "prev"); - Ok(()) + Ok(prev_count.encode()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - use frame_support::traits::OnRuntimeUpgradeHelpersExt; + fn post_upgrade(prev_count: Vec) -> Result<(), &'static str> { + let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( + "the state parameter should be something that was generated by pre_upgrade", + ); let post_count = T::VoterList::count(); - let prev_count = Self::get_temp_storage::("prev").unwrap(); let validators = Validators::::count(); assert!(post_count == prev_count + validators); diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 03878f3f186df..48d4aec436d40 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -160,7 +160,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { + fn pre_upgrade() -> Result<#frame_support::sp_std::vec::Vec, &'static str> { < Self as @@ -169,12 +169,12 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(state: #frame_support::sp_std::vec::Vec) -> Result<(), &'static str> { < Self as #frame_support::traits::Hooks<::BlockNumber> - >::post_upgrade() + >::post_upgrade(state) } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 1d8930f96fbe5..cacd599fa008c 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -2158,12 +2158,12 @@ macro_rules! decl_module { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) + fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { + Ok($crate::sp_std::vec::Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { Ok(()) } } @@ -2196,12 +2196,12 @@ macro_rules! decl_module { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) + fn pre_upgrade() -> Result<$crate::sp_std::vec::Vec, &'static str> { + Ok($crate::sp_std::vec::Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_: $crate::sp_std::vec::Vec) -> Result<(), &'static str> { Ok(()) } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index e9578cccc23bc..31b95de5f413d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -93,7 +93,7 @@ pub mod unsigned { }; } -#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +#[cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime", test))] pub use self::storage::storage_noop_guard::StorageNoopGuard; pub use self::{ dispatch::{Callable, Parameter}, diff --git a/frame/support/src/storage/storage_noop_guard.rs b/frame/support/src/storage/storage_noop_guard.rs index afcc699d91313..7186c3eaf467a 100644 --- a/frame/support/src/storage/storage_noop_guard.rs +++ b/frame/support/src/storage/storage_noop_guard.rs @@ -16,7 +16,7 @@ // limitations under the License. // Feature gated since it can panic. -#![cfg(any(feature = "std", feature = "runtime-benchmarks", test))] +#![cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime", test))] //! Contains the [`crate::StorageNoopGuard`] for conveniently asserting //! that no storage mutation has been made by a whole code block. diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 16ac7929831b4..d51c32649a797 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -109,4 +109,4 @@ pub use voting::{ #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] -pub use try_runtime::{OnRuntimeUpgradeHelpersExt, Select as TryStateSelect, TryState}; +pub use try_runtime::{Select as TryStateSelect, TryState}; diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index 25ec333a7dbe0..eb1106e4c383f 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -22,6 +22,9 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; +#[cfg(feature = "try-runtime")] +use codec::{Decode, Encode}; + /// The block initialization trait. /// /// Implementing this lets you express what should happen for your pallet when the block is @@ -135,17 +138,25 @@ pub trait OnRuntimeUpgrade { /// Execute some pre-checks prior to a runtime upgrade. /// + /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), + /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector + /// should be returned if there is no such need. + /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) + fn pre_upgrade() -> Result, &'static str> { + Ok(Vec::new()) } /// Execute some post-checks after a runtime upgrade. /// + /// The `state` parameter is the `Vec` returned by `pre_upgrade` before upgrading, which + /// can be used for post-check. NOTE: if `pre_upgrade` is not implemented an empty vector will + /// be passed in, in such case `post_upgrade` should ignore it. + /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { Ok(()) } } @@ -161,17 +172,21 @@ impl OnRuntimeUpgrade for Tuple { } #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); - result + fn pre_upgrade() -> Result, &'static str> { + let mut state: Vec> = Vec::default(); + for_tuples!( #( state.push(Tuple::pre_upgrade()?); )* ); + Ok(state.encode()) } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); - result + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let state: Vec> = Decode::decode(&mut state.as_slice()) + .expect("the state parameter should be the same as pre_upgrade generated"); + let mut state_iter = state.into_iter(); + for_tuples!( #( Tuple::post_upgrade( + state_iter.next().expect("the state parameter should be the same as pre_upgrade generated") + )?; )* ); + Ok(()) } } @@ -243,17 +258,25 @@ pub trait Hooks { /// Execute some pre-checks prior to a runtime upgrade. /// + /// Return a `Vec` that can contain arbitrary encoded data (usually some pre-upgrade state), + /// which will be passed to `post_upgrade` after upgrading for post-check. An empty vector + /// should be returned if there is no such need. + /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) + fn pre_upgrade() -> Result, &'static str> { + Ok(Vec::new()) } /// Execute some post-checks after a runtime upgrade. /// + /// The `state` parameter is the `Vec` returned by `pre_upgrade` before upgrading, which + /// can be used for post-check. NOTE: if `pre_upgrade` is not implemented an empty vector will + /// be passed in, in such case `post_upgrade` should ignore it. + /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { Ok(()) } @@ -390,4 +413,94 @@ mod tests { ON_IDLE_INVOCATION_ORDER.clear(); } } + + #[cfg(feature = "try-runtime")] + #[test] + fn on_runtime_upgrade_tuple() { + struct Test1; + struct Test2; + struct Test3; + + impl OnRuntimeUpgrade for Test1 { + fn pre_upgrade() -> Result, &'static str> { + Ok("Test1".encode()) + } + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let s: String = Decode::decode(&mut state.as_slice()).unwrap(); + assert_eq!(s, "Test1"); + Ok(()) + } + } + impl OnRuntimeUpgrade for Test2 { + fn pre_upgrade() -> Result, &'static str> { + Ok(100u32.encode()) + } + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let s: u32 = Decode::decode(&mut state.as_slice()).unwrap(); + assert_eq!(s, 100); + Ok(()) + } + } + impl OnRuntimeUpgrade for Test3 { + fn pre_upgrade() -> Result, &'static str> { + Ok(true.encode()) + } + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let s: bool = Decode::decode(&mut state.as_slice()).unwrap(); + assert_eq!(s, true); + Ok(()) + } + } + + type TestEmpty = (); + let origin_state = ::pre_upgrade().unwrap(); + let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); + assert!(states.is_empty()); + ::post_upgrade(origin_state).unwrap(); + + type Test1Tuple = (Test1,); + let origin_state = ::pre_upgrade().unwrap(); + let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); + assert_eq!(states.len(), 1); + assert_eq!( + ::decode(&mut states[0].as_slice()).unwrap(), + "Test1".to_owned() + ); + ::post_upgrade(origin_state).unwrap(); + + type Test123 = (Test1, Test2, Test3); + let origin_state = ::pre_upgrade().unwrap(); + let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); + assert_eq!( + ::decode(&mut states[0].as_slice()).unwrap(), + "Test1".to_owned() + ); + assert_eq!(::decode(&mut states[1].as_slice()).unwrap(), 100u32); + assert_eq!(::decode(&mut states[2].as_slice()).unwrap(), true); + ::post_upgrade(origin_state).unwrap(); + + type Test321 = (Test3, Test2, Test1); + let origin_state = ::pre_upgrade().unwrap(); + let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); + assert_eq!(::decode(&mut states[0].as_slice()).unwrap(), true); + assert_eq!(::decode(&mut states[1].as_slice()).unwrap(), 100u32); + assert_eq!( + ::decode(&mut states[2].as_slice()).unwrap(), + "Test1".to_owned() + ); + ::post_upgrade(origin_state).unwrap(); + + type TestNested123 = (Test1, (Test2, Test3)); + let origin_state = ::pre_upgrade().unwrap(); + let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); + assert_eq!( + ::decode(&mut states[0].as_slice()).unwrap(), + "Test1".to_owned() + ); + // nested state for (Test2, Test3) + let nested_states: Vec> = Decode::decode(&mut states[1].as_slice()).unwrap(); + assert_eq!(::decode(&mut nested_states[0].as_slice()).unwrap(), 100u32); + assert_eq!(::decode(&mut nested_states[1].as_slice()).unwrap(), true); + ::post_upgrade(origin_state).unwrap(); + } } diff --git a/frame/support/src/traits/try_runtime.rs b/frame/support/src/traits/try_runtime.rs index c5ddd1cae42be..640bb566a65af 100644 --- a/frame/support/src/traits/try_runtime.rs +++ b/frame/support/src/traits/try_runtime.rs @@ -17,46 +17,10 @@ //! Try-runtime specific traits and types. -use super::*; use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; -/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. -const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; - -/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. -pub trait OnRuntimeUpgradeHelpersExt { - /// Generate a storage key unique to this runtime upgrade. - /// - /// This can be used to communicate data from pre-upgrade to post-upgrade state and check - /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. - fn storage_key(ident: &str) -> [u8; 32] { - crate::storage::storage_prefix(ON_RUNTIME_UPGRADE_PREFIX, ident.as_bytes()) - } - - /// Get temporary storage data written by [`Self::set_temp_storage`]. - /// - /// Returns `None` if either the data is unavailable or un-decodable. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being read from. - fn get_temp_storage(at: &str) -> Option { - sp_io::storage::get(&Self::storage_key(at)) - .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) - } - - /// Write some temporary data to a specific storage that can be read (potentially in - /// post-upgrade hook) via [`Self::get_temp_storage`]. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being written - /// to. - fn set_temp_storage(data: T, at: &str) { - sp_io::storage::set(&Self::storage_key(at), &data.encode()); - } -} - -impl OnRuntimeUpgradeHelpersExt for U {} - // Which state tests to execute. #[derive(codec::Encode, codec::Decode, Clone)] pub enum Select { @@ -68,7 +32,7 @@ pub enum Select { RoundRobin(u32), /// Run only pallets who's name matches the given list. /// - /// Pallet names are obtained from [`PalletInfoAccess`]. + /// Pallet names are obtained from [`super::PalletInfoAccess`]. Only(Vec>), } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c71496e0b850c..c3382045add16 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -132,20 +132,20 @@ //! added, given the right flag: //! //! ```ignore +//! //! #[cfg(feature = try-runtime)] -//! fn pre_upgrade() -> Result<(), &'static str> {} +//! fn pre_upgrade() -> Result, &'static str> {} //! //! #[cfg(feature = try-runtime)] -//! fn post_upgrade() -> Result<(), &'static str> {} +//! fn post_upgrade(state: Vec) -> Result<(), &'static str> {} //! ``` //! //! (The pallet macro syntax will support this simply as a part of `#[pallet::hooks]`). //! //! These hooks allow you to execute some code, only within the `on-runtime-upgrade` command, before -//! and after the migration. If any data needs to be temporarily stored between the pre/post -//! migration hooks, `OnRuntimeUpgradeHelpersExt` can help with that. Note that you should be -//! mindful with any mutable storage ops in the pre/post migration checks, as you almost certainly -//! will not want to mutate any of the storage that is to be migrated. +//! and after the migration. Moreover, `pre_upgrade` can return a `Vec` that contains arbitrary +//! encoded data (usually some pre-upgrade state) which will be passed to `post_upgrade` after +//! upgrading and used for post checking. //! //! #### Logging //! From b541b2d2c9259c8617cd1ce2cf9a98ac06602679 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 19 Sep 2022 12:56:46 +0200 Subject: [PATCH 145/348] contracts: Use `WeakBoundedVec` for instrumented code (#12186) * Use WeakBoundedVec for instrumented code * Remove `RelaxedMaxCodeLen` from kitchensink --- bin/node/runtime/src/lib.rs | 1 - frame/contracts/src/lib.rs | 13 ++----------- frame/contracts/src/tests.rs | 1 - frame/contracts/src/wasm/code_cache.rs | 14 ++++++++++---- frame/contracts/src/wasm/mod.rs | 12 +----------- 5 files changed, 13 insertions(+), 28 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b5467aa6f8ea7..8b150dca849f0 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1167,7 +1167,6 @@ impl pallet_contracts::Config for Runtime { type AddressGenerator = pallet_contracts::DefaultAddressGenerator; type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; - type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 48f1668440387..842fa02f34529 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -111,7 +111,7 @@ use frame_support::{ ensure, traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, weights::Weight, - BoundedVec, + BoundedVec, WeakBoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; use pallet_contracts_primitives::{ @@ -135,7 +135,7 @@ type TrieId = BoundedVec>; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type CodeVec = BoundedVec::MaxCodeLen>; -type RelaxedCodeVec = BoundedVec::RelaxedMaxCodeLen>; +type RelaxedCodeVec = WeakBoundedVec::MaxCodeLen>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; /// Used as a sentinel value when reading and writing contract memory. @@ -366,15 +366,6 @@ pub mod pallet { /// a wasm binary below this maximum size. type MaxCodeLen: Get; - /// The maximum length of a contract code after reinstrumentation. - /// - /// When uploading a new contract the size defined by [`Self::MaxCodeLen`] is used for both - /// the pristine **and** the instrumented version. When a existing contract needs to be - /// reinstrumented after a runtime upgrade we apply this bound. The reason is that if the - /// new instrumentation increases the size beyond the limit it would make that contract - /// inaccessible until rectified by another runtime upgrade. - type RelaxedMaxCodeLen: Get; - /// The maximum allowable length in bytes for storage keys. type MaxStorageKeyLen: Get; } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index c40968eb2ea57..d5f4df161b723 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -395,7 +395,6 @@ impl Config for Test { type AddressGenerator = DefaultAddressGenerator; type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; - type RelaxedMaxCodeLen = ConstU32<{ 256 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index eb5551e342bca..137eccf3db686 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -39,6 +39,7 @@ use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, traits::{Get, ReservableCurrency}, + WeakBoundedVec, }; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::BadOrigin; @@ -195,10 +196,15 @@ pub fn reinstrument( let original_code = >::get(&prefab_module.code_hash).ok_or(Error::::CodeNotFound)?; let original_code_len = original_code.len(); - prefab_module.code = prepare::reinstrument_contract::(&original_code, schedule) - .map_err(|_| >::CodeRejected)? - .try_into() - .map_err(|_| >::CodeTooLarge)?; + // We need to allow contracts growing too big after re-instrumentation. Otherwise + // the contract can become inaccessible. The user has no influence over this size + // as the contract is already deployed and every change in size would be the result + // of changes in the instrumentation algorithm controlled by the chain authors. + prefab_module.code = WeakBoundedVec::force_from( + prepare::reinstrument_contract::(&original_code, schedule) + .map_err(|_| >::CodeRejected)?, + Some("Contract exceeds limit after re-instrumentation."), + ); prefab_module.instruction_weights_version = schedule.instruction_weights.version; >::insert(&prefab_module.code_hash, &*prefab_module); Ok(original_code_len as u32) diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index e850f458bb745..085bc5a8c6423 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -35,11 +35,7 @@ use crate::{ Schedule, }; use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - dispatch::{DispatchError, DispatchResult}, - ensure, - traits::Get, -}; +use frame_support::dispatch::{DispatchError, DispatchResult}; use sp_core::crypto::UncheckedFrom; use sp_sandbox::{SandboxEnvironmentBuilder, SandboxInstance, SandboxMemory}; use sp_std::prelude::*; @@ -134,12 +130,6 @@ where schedule, owner, )?; - // When instrumenting a new code we apply a stricter limit than enforced by the - // `RelaxedCodeVec` in order to leave some headroom for reinstrumentation. - ensure!( - module.code.len() as u32 <= T::MaxCodeLen::get(), - (>::CodeTooLarge.into(), ""), - ); Ok(module) } From c24431eb6a95197550b30715a4c124be1aea79de Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 19 Sep 2022 15:12:29 +0200 Subject: [PATCH 146/348] Fix CI (#12297) * Put type constraint at the end Signed-off-by: Oliver Tale-Yazdi * Fix rust features Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade hooks Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade hooks Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/bags-list/Cargo.toml | 2 +- frame/staking/Cargo.toml | 1 + frame/staking/src/migrations.rs | 6 +++--- frame/uniques/src/lib.rs | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 2f9fa6c624538..19eb66ae624af 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -65,7 +65,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "sp-core", "sp-io", - "pallet-balances", + "pallet-balances/runtime-benchmarks", "sp-tracing", "frame-election-provider-support/runtime-benchmarks", ] diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 54d21f7ca6bcc..cf9e12dcd82b4 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -74,6 +74,7 @@ runtime-benchmarks = [ "frame-election-provider-support/runtime-benchmarks", "rand_chacha", "sp-staking/runtime-benchmarks", + "pallet-bags-list/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] fuzz = [ diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 4f5dcf49d6ab4..427570e5cf14f 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -34,7 +34,7 @@ pub mod v11 { for MigrateToV11 { #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { + fn pre_upgrade() -> Result, &'static str> { frame_support::ensure!( StorageVersion::::get() == crate::Releases::V10_0_0, "must upgrade linearly" @@ -46,7 +46,7 @@ pub mod v11 { "no data for the old pallet name has been detected" ); - Ok(()) + Ok(Default::default()) } /// Migrate the entire storage of this pallet to a new prefix. @@ -81,7 +81,7 @@ pub mod v11 { } #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { frame_support::ensure!( StorageVersion::::get() == crate::Releases::V11_0_0, "wrong version after the upgrade" diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index e25607e67a224..b02a10270dd7d 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -110,9 +110,9 @@ pub mod pallet { /// Standard collection creation is only allowed if the origin attempting it and the /// collection are in this set. type CreateOrigin: EnsureOriginWithArg< - Success = Self::AccountId, Self::Origin, Self::CollectionId, + Success = Self::AccountId, >; /// Locker trait to enable Locking mechanism downstream. From df3722c8b42dd9ac8e3705d0d06bdba03237d94f Mon Sep 17 00:00:00 2001 From: Vlad Date: Tue, 20 Sep 2022 12:15:48 +0300 Subject: [PATCH 147/348] Fix `cargo-check-benches` job (#12301) --- .gitlab-ci.yml | 18 +++++++++--------- scripts/ci/gitlab/pipeline/test.yml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a639505b3bb03..9df4c0b618661 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -131,31 +131,31 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -.test-refs-no-trigger: +# handle the specific case where benches could store incorrect bench data because of the downstream staging runs +# exclude cargo-check-benches from such runs +.test-refs-check-benches: rules: - - if: $CI_PIPELINE_SOURCE == "pipeline" + - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_IMAGE =~ /staging$/ when: never - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ -.test-refs-no-trigger-prs-only: +.test-refs-no-trigger: rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" + - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ -# handle the specific case where benches could store incorrect bench data because of the downstream staging runs -# exclude cargo-check-benches from such runs -.test-refs-no-trigger-prs-only-check-benches: +.test-refs-no-trigger-prs-only: rules: - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $CI_IMAGE =~ /staging$/ - when: never - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_PIPELINE_SOURCE == "web" diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index bbe38cfb02293..e85ab85afe6ed 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -59,7 +59,7 @@ cargo-check-benches: CI_JOB_NAME: "cargo-check-benches" extends: - .docker-env - - .test-refs-no-trigger-prs-only-check-benches + - .test-refs-check-benches - .collect-artifacts - .pipeline-stopper-artifacts before_script: From 625de4f9cb50cda954df07552222be4ee0de774d Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 20 Sep 2022 12:31:58 +0200 Subject: [PATCH 148/348] [ci] Revert cancel-pipeline job (#12309) * [WIP][ci] Revert cancel-pipeline job * fail test-linux-stable * fix test-linux-stable --- .gitlab-ci.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9df4c0b618661..e6f26cbfd5cf7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -41,7 +41,7 @@ workflow: - if: $CI_COMMIT_TAG - if: $CI_COMMIT_BRANCH -variables: &default-vars +variables: GIT_STRATEGY: fetch GIT_DEPTH: 100 CARGO_INCREMENTAL: 0 @@ -278,8 +278,6 @@ rusty-cachier-notify: PR_NUM: "${PR_NUM}" trigger: project: "parity/infrastructure/ci_cd/pipeline-stopper" - # remove branch, when pipeline-stopper for polakdot is updated to the same branch - branch: "as-improve" # need to copy jobs this way because otherwise gitlab will wait # for all 3 jobs to finish instead of cancelling if one fails From e7c515dad67ee8d9c197cc9c0def8ecb3cd72e97 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Tue, 20 Sep 2022 17:17:34 +0300 Subject: [PATCH 149/348] Proper naming wrt expectations (#12311) Signed-off-by: Andrei Sandu Signed-off-by: Andrei Sandu --- utils/frame/try-runtime/cli/src/commands/execute_block.rs | 2 +- utils/frame/try-runtime/cli/src/commands/follow_chain.rs | 2 +- utils/frame/try-runtime/cli/src/commands/offchain_worker.rs | 2 +- .../frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 70ba3615f874a..0c8b968d1b4b1 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -195,7 +195,7 @@ where block_ws_uri.clone(), expected_spec_name, expected_spec_version, - shared.no_spec_name_check, + shared.no_spec_check_panic, ) .await; diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index f493d5c10cd29..88866b538169b 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -275,7 +275,7 @@ where command.uri.clone(), expected_spec_name, expected_spec_version, - shared.no_spec_name_check, + shared.no_spec_check_panic, ) .await; diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index a579692abd9e2..b0404fff18135 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -152,7 +152,7 @@ where header_ws_uri, expected_spec_name, expected_spec_version, - shared.no_spec_name_check, + shared.no_spec_check_panic, ) .await; diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 9bc6d32d4762a..5055e4fb34581 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -64,7 +64,7 @@ where uri, expected_spec_name, expected_spec_version, - shared.no_spec_name_check, + shared.no_spec_check_panic, ) .await; } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index c3382045add16..b8a2779d57e19 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -421,9 +421,9 @@ pub struct SharedParams { #[clap(long)] pub heap_pages: Option, - /// When enabled, the spec name check will not panic, and instead only show a warning. + /// When enabled, the spec check will not panic, and instead only show a warning. #[clap(long)] - pub no_spec_name_check: bool, + pub no_spec_check_panic: bool, /// State version that is used by the chain. #[clap(long, default_value = "1", parse(try_from_str = parse::state_version))] From 97ad64bd507eacf1dc8c597cf8538c98facb3220 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Tue, 20 Sep 2022 18:05:44 +0300 Subject: [PATCH 150/348] Import target block body during warp sync (#12300) * Receive and import target block body * Request target block * minor: wording * Check for block body in the test * Import target block justifications * Fix: do not fail block validation if no justifications received * Fix: import target blocks without justifications Co-authored-by: arkpar --- client/network/common/src/sync.rs | 2 + client/network/common/src/sync/warp.rs | 3 + client/network/src/protocol.rs | 2 + client/network/sync/src/lib.rs | 90 ++++++++++++++++++--- client/network/sync/src/state.rs | 27 +++++-- client/network/sync/src/warp.rs | 105 ++++++++++++++++++++++--- client/network/test/src/lib.rs | 9 ++- client/network/test/src/sync.rs | 4 +- 8 files changed, 211 insertions(+), 31 deletions(-) diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 2ee8f8c51814b..020b2c9efa4c7 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -96,6 +96,8 @@ pub enum OnBlockData { Import(BlockOrigin, Vec>), /// A new block request needs to be made to the given peer. Request(PeerId, BlockRequest), + /// Continue processing events. + Continue, } /// Result of [`ChainSync::on_block_justification`]. diff --git a/client/network/common/src/sync/warp.rs b/client/network/common/src/sync/warp.rs index 339a4c33a7eeb..c9b9037542388 100644 --- a/client/network/common/src/sync/warp.rs +++ b/client/network/common/src/sync/warp.rs @@ -64,6 +64,8 @@ pub enum WarpSyncPhase { AwaitingPeers, /// Downloading and verifying grandpa warp proofs. DownloadingWarpProofs, + /// Downloading target block. + DownloadingTargetBlock, /// Downloading state data. DownloadingState, /// Importing state. @@ -77,6 +79,7 @@ impl fmt::Display for WarpSyncPhase { match self { Self::AwaitingPeers => write!(f, "Waiting for peers"), Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + Self::DownloadingTargetBlock => write!(f, "Downloading target block"), Self::DownloadingState => write!(f, "Downloading state"), Self::ImportingState => write!(f, "Importing state"), Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9bcf363cd0692..a7af43d53d3b7 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -627,6 +627,7 @@ where CustomMessageOutcome::BlockImport(origin, blocks), Ok(OnBlockData::Request(peer, req)) => prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), + Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -974,6 +975,7 @@ where CustomMessageOutcome::BlockImport(origin, blocks), Ok(OnBlockData::Request(peer, req)) => prepare_block_request(self.chain_sync.as_ref(), &mut self.peers, peer, req), + Ok(OnBlockData::Continue) => CustomMessageOutcome::None, Err(BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index aae5f4de353fe..6ad4a8fbbdcc5 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -80,6 +80,7 @@ use std::{ pin::Pin, sync::Arc, }; +use warp::TargetBlockImportResult; mod extra_requests; @@ -315,6 +316,8 @@ pub enum PeerSyncState { DownloadingState, /// Downloading warp proof. DownloadingWarpProof, + /// Downloading warp sync target block. + DownloadingWarpTargetBlock, /// Actively downloading block history after warp sync. DownloadingGap(NumberFor), } @@ -659,10 +662,11 @@ where } fn block_requests(&mut self) -> Box)> + '_> { - if self.allowed_requests.is_empty() || - self.state_sync.is_some() || - self.mode == SyncMode::Warp - { + if self.mode == SyncMode::Warp { + return Box::new(std::iter::once(self.warp_target_block_request()).flatten()) + } + + if self.allowed_requests.is_empty() || self.state_sync.is_some() { return Box::new(std::iter::empty()) } @@ -824,7 +828,7 @@ where // Only one pending state request is allowed. return None } - if let Some(request) = sync.next_warp_poof_request() { + if let Some(request) = sync.next_warp_proof_request() { let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); if !targets.is_empty() { targets.sort(); @@ -1031,6 +1035,40 @@ where Vec::new() } }, + PeerSyncState::DownloadingWarpTargetBlock => { + peer.state = PeerSyncState::Available; + if let Some(warp_sync) = &mut self.warp_sync { + if blocks.len() == 1 { + validate_blocks::(&blocks, who, Some(request))?; + match warp_sync.import_target_block( + blocks.pop().expect("`blocks` len checked above."), + ) { + TargetBlockImportResult::Success => + return Ok(OnBlockData::Continue), + TargetBlockImportResult::BadResponse => + return Err(BadPeer(*who, rep::VERIFICATION_FAIL)), + } + } else if blocks.is_empty() { + debug!(target: "sync", "Empty block response from {}", who); + return Err(BadPeer(*who, rep::NO_BLOCK)) + } else { + debug!( + target: "sync", + "Too many blocks ({}) in warp target block response from {}", + blocks.len(), + who, + ); + return Err(BadPeer(*who, rep::NOT_REQUESTED)) + } + } else { + debug!( + target: "sync", + "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", + who, + ); + return Ok(OnBlockData::Continue) + } + }, PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingState | @@ -1112,14 +1150,14 @@ where }; match import_result { - state::ImportResult::Import(hash, header, state) => { + state::ImportResult::Import(hash, header, state, body, justifications) => { let origin = BlockOrigin::NetworkInitialSync; let block = IncomingBlock { hash, header: Some(header), - body: None, + body, indexed_body: None, - justifications: None, + justifications, origin: None, allow_missing_state: true, import_existing: true, @@ -1399,8 +1437,13 @@ where number, hash, ); - self.state_sync = - Some(StateSync::new(self.client.clone(), header, *skip_proofs)); + self.state_sync = Some(StateSync::new( + self.client.clone(), + header, + None, + None, + *skip_proofs, + )); self.allowed_requests.set_all(); } } @@ -2163,6 +2206,33 @@ where }) .collect() } + + /// Generate block request for downloading of the target block body during warp sync. + fn warp_target_block_request(&mut self) -> Option<(&PeerId, BlockRequest)> { + if let Some(sync) = &self.warp_sync { + if self.allowed_requests.is_empty() || + sync.is_complete() || + self.peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpTargetBlock) + { + // Only one pending warp target block request is allowed. + return None + } + if let Some((target_number, request)) = sync.next_target_block_request() { + // Find a random peer that has a block with the target number. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target_number { + trace!(target: "sync", "New warp target block request for {}", id); + peer.state = PeerSyncState::DownloadingWarpTargetBlock; + self.allowed_requests.clear(); + return Some((id, request)) + } + } + } + } + None + } } // This is purely during a backwards compatible transitionary period and should be removed diff --git a/client/network/sync/src/state.rs b/client/network/sync/src/state.rs index e70d3b6b33a28..9f64b52334c8a 100644 --- a/client/network/sync/src/state.rs +++ b/client/network/sync/src/state.rs @@ -26,7 +26,10 @@ use sc_consensus::ImportedState; use sc_network_common::sync::StateDownloadProgress; use smallvec::SmallVec; use sp_core::storage::well_known_keys; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor}, + Justifications, +}; use std::{collections::HashMap, sync::Arc}; /// State sync state machine. Accumulates partial state data until it @@ -35,6 +38,8 @@ pub struct StateSync { target_block: B::Hash, target_header: B::Header, target_root: B::Hash, + target_body: Option>, + target_justifications: Option, last_key: SmallVec<[Vec; 2]>, state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, @@ -46,7 +51,7 @@ pub struct StateSync { /// Import state chunk result. pub enum ImportResult { /// State is complete and ready for import. - Import(B::Hash, B::Header, ImportedState), + Import(B::Hash, B::Header, ImportedState, Option>, Option), /// Continue downloading. Continue, /// Bad state chunk. @@ -59,12 +64,20 @@ where Client: ProofProvider + Send + Sync + 'static, { /// Create a new instance. - pub fn new(client: Arc, target: B::Header, skip_proof: bool) -> Self { + pub fn new( + client: Arc, + target_header: B::Header, + target_body: Option>, + target_justifications: Option, + skip_proof: bool, + ) -> Self { Self { client, - target_block: target.hash(), - target_root: *target.state_root(), - target_header: target, + target_block: target_header.hash(), + target_root: *target_header.state_root(), + target_header, + target_body, + target_justifications, last_key: SmallVec::default(), state: HashMap::default(), complete: false, @@ -213,6 +226,8 @@ where block: self.target_block, state: std::mem::take(&mut self.state).into(), }, + self.target_body.clone(), + self.target_justifications.clone(), ) } else { ImportResult::Continue diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index f3fad6c1b7fdb..4f2a71d98613e 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -23,17 +23,21 @@ use crate::{ state::{ImportResult, StateSync}, }; use sc_client_api::ProofProvider; -use sc_network_common::sync::warp::{ - EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, - WarpSyncProvider, +use sc_network_common::sync::{ + message::{BlockAttributes, BlockData, BlockRequest, Direction, FromBlock}, + warp::{ + EncodedProof, VerificationResult, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, + WarpSyncProvider, + }, }; use sp_blockchain::HeaderBackend; use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; use std::sync::Arc; enum Phase { WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, + TargetBlock(B::Header), State(StateSync), } @@ -45,6 +49,14 @@ pub enum WarpProofImportResult { BadResponse, } +/// Import target block result. +pub enum TargetBlockImportResult { + /// Import was successful. + Success, + /// Invalid block. + BadResponse, +} + /// Warp sync state machine. Accumulates warp proofs and state. pub struct WarpSync { phase: Phase, @@ -72,7 +84,7 @@ where /// Validate and import a state response. pub fn import_state(&mut self, response: StateResponse) -> ImportResult { match &mut self.phase { - Phase::WarpProof { .. } => { + Phase::WarpProof { .. } | Phase::TargetBlock(_) => { log::debug!(target: "sync", "Unexpected state response"); ImportResult::BadResponse }, @@ -83,7 +95,7 @@ where /// Validate and import a warp proof response. pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { match &mut self.phase { - Phase::State(_) => { + Phase::State(_) | Phase::TargetBlock(_) => { log::debug!(target: "sync", "Unexpected warp proof response"); WarpProofImportResult::BadResponse }, @@ -104,8 +116,7 @@ where Ok(VerificationResult::Complete(new_set_id, _, header)) => { log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id); self.total_proof_bytes += response.0.len() as u64; - let state_sync = StateSync::new(self.client.clone(), header, false); - self.phase = Phase::State(state_sync); + self.phase = Phase::TargetBlock(header); WarpProofImportResult::Success }, } @@ -113,35 +124,100 @@ where } } + /// Import the target block body. + pub fn import_target_block(&mut self, block: BlockData) -> TargetBlockImportResult { + match &mut self.phase { + Phase::WarpProof { .. } | Phase::State(_) => { + log::debug!(target: "sync", "Unexpected target block response"); + TargetBlockImportResult::BadResponse + }, + Phase::TargetBlock(header) => + if let Some(block_header) = &block.header { + if block_header == header { + if block.body.is_some() { + let state_sync = StateSync::new( + self.client.clone(), + header.clone(), + block.body, + block.justifications, + false, + ); + self.phase = Phase::State(state_sync); + TargetBlockImportResult::Success + } else { + log::debug!( + target: "sync", + "Importing target block failed: missing body.", + ); + TargetBlockImportResult::BadResponse + } + } else { + log::debug!( + target: "sync", + "Importing target block failed: different header.", + ); + TargetBlockImportResult::BadResponse + } + } else { + log::debug!(target: "sync", "Importing target block failed: missing header."); + TargetBlockImportResult::BadResponse + }, + } + } + /// Produce next state request. pub fn next_state_request(&self) -> Option { match &self.phase { Phase::WarpProof { .. } => None, + Phase::TargetBlock(_) => None, Phase::State(sync) => Some(sync.next_request()), } } /// Produce next warp proof request. - pub fn next_warp_poof_request(&self) -> Option> { + pub fn next_warp_proof_request(&self) -> Option> { match &self.phase { - Phase::State(_) => None, Phase::WarpProof { last_hash, .. } => Some(WarpProofRequest { begin: *last_hash }), + Phase::TargetBlock(_) => None, + Phase::State(_) => None, + } + } + + /// Produce next target block request. + pub fn next_target_block_request(&self) -> Option<(NumberFor, BlockRequest)> { + match &self.phase { + Phase::WarpProof { .. } => None, + Phase::TargetBlock(header) => { + let request = BlockRequest:: { + id: 0, + fields: BlockAttributes::HEADER | + BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, + from: FromBlock::Hash(header.hash()), + to: Some(header.hash()), + direction: Direction::Ascending, + max: Some(1), + }; + Some((*header.number(), request)) + }, + Phase::State(_) => None, } } /// Return target block hash if it is known. pub fn target_block_hash(&self) -> Option { match &self.phase { - Phase::State(s) => Some(s.target()), Phase::WarpProof { .. } => None, + Phase::TargetBlock(_) => None, + Phase::State(s) => Some(s.target()), } } /// Return target block number if it is known. pub fn target_block_number(&self) -> Option> { match &self.phase { - Phase::State(s) => Some(s.target_block_num()), Phase::WarpProof { .. } => None, + Phase::TargetBlock(header) => Some(*header.number()), + Phase::State(s) => Some(s.target_block_num()), } } @@ -149,6 +225,7 @@ where pub fn is_complete(&self) -> bool { match &self.phase { Phase::WarpProof { .. } => false, + Phase::TargetBlock(_) => false, Phase::State(sync) => sync.is_complete(), } } @@ -160,6 +237,10 @@ where phase: WarpSyncPhase::DownloadingWarpProofs, total_bytes: self.total_proof_bytes, }, + Phase::TargetBlock(_) => WarpSyncProgress { + phase: WarpSyncPhase::DownloadingTargetBlock, + total_bytes: self.total_proof_bytes, + }, Phase::State(sync) => WarpSyncProgress { phase: if self.is_complete() { WarpSyncPhase::ImportingState diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 5a29e587ceff5..e78b91a4e04ee 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -69,7 +69,7 @@ use sc_network_sync::{ use sc_service::client::Client; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, - HeaderBackend, Info as BlockchainInfo, Result as ClientResult, + Backend as BlockchainBackend, HeaderBackend, Info as BlockchainInfo, Result as ClientResult, }; use sp_consensus::{ block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, @@ -540,6 +540,13 @@ where .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) .unwrap_or(false) } + + pub fn has_body(&self, hash: &H256) -> bool { + self.backend + .as_ref() + .map(|backend| backend.blockchain().body(BlockId::hash(*hash)).unwrap().is_some()) + .unwrap_or(false) + } } pub trait BlockImportAdapterFull: diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c0778767b75af..6115e0e3b2c86 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1192,7 +1192,7 @@ fn warp_sync() { ..Default::default() }); let gap_end = net.peer(0).push_blocks(63, false); - net.peer(0).push_blocks(1, false); + let target = net.peer(0).push_blocks(1, false); net.peer(1).push_blocks(64, false); net.peer(2).push_blocks(64, false); // Wait for peer 1 to sync state. @@ -1203,7 +1203,7 @@ fn warp_sync() { // Wait for peer 1 download block history block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(3).has_block(&gap_end) { + if net.peer(3).has_body(&gap_end) && net.peer(3).has_body(&target) { Poll::Ready(()) } else { Poll::Pending From de5a7f60a5144d79e3641ac7fe9d2019aef35931 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 20 Sep 2022 18:18:03 +0300 Subject: [PATCH 151/348] rpc: Implement `chainSpec` RPC API (#12261) * rpc/chain_spec: Add traits for `chainSpec` API Signed-off-by: Alexandru Vasile * rpc/chain_spec: Implement `chainSpec` RPC methods Signed-off-by: Alexandru Vasile * rpc/chain_spec: Add tests Signed-off-by: Alexandru Vasile * bin/node: Enable `chainSpec` API Signed-off-by: Alexandru Vasile * rpc/chain_spec: Assume `genesis_hash` as non-empty Signed-off-by: Alexandru Vasile * Update client/rpc-spec/Cargo.toml Co-authored-by: Niklas Adolfsson * Update client/rpc-spec/src/lib.rs Co-authored-by: Niklas Adolfsson * client/rpc_spec: Rename crate to `rpc_spec_v2` Signed-off-by: Alexandru Vasile * rpc-servers: Remove the `version` field from `rpc_methods` Signed-off-by: Alexandru Vasile * rpc/chain_spec: Fix copyright years Signed-off-by: Alexandru Vasile Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson --- Cargo.lock | 12 ++++ Cargo.toml | 1 + bin/node/rpc/Cargo.toml | 1 + bin/node/rpc/src/lib.rs | 6 ++ client/rpc-servers/src/lib.rs | 1 - client/rpc-spec-v2/Cargo.toml | 23 +++++++ client/rpc-spec-v2/README.md | 5 ++ client/rpc-spec-v2/src/chain_spec/api.rs | 53 ++++++++++++++++ .../rpc-spec-v2/src/chain_spec/chain_spec.rs | 60 ++++++++++++++++++ client/rpc-spec-v2/src/chain_spec/mod.rs | 38 ++++++++++++ client/rpc-spec-v2/src/chain_spec/tests.rs | 61 +++++++++++++++++++ client/rpc-spec-v2/src/lib.rs | 26 ++++++++ 12 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 client/rpc-spec-v2/Cargo.toml create mode 100644 client/rpc-spec-v2/README.md create mode 100644 client/rpc-spec-v2/src/chain_spec/api.rs create mode 100644 client/rpc-spec-v2/src/chain_spec/chain_spec.rs create mode 100644 client/rpc-spec-v2/src/chain_spec/mod.rs create mode 100644 client/rpc-spec-v2/src/chain_spec/tests.rs create mode 100644 client/rpc-spec-v2/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8216b8b92aef5..d04f82f4bbecc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4836,6 +4836,7 @@ dependencies = [ "sc-finality-grandpa-rpc", "sc-rpc", "sc-rpc-api", + "sc-rpc-spec-v2", "sc-sync-state-rpc", "sc-transaction-pool-api", "sp-api", @@ -8774,6 +8775,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "sc-rpc-spec-v2" +version = "0.10.0-dev" +dependencies = [ + "hex", + "jsonrpsee", + "sc-chain-spec", + "serde_json", + "tokio", +] + [[package]] name = "sc-runtime-test" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 4dbf65dc7e1fe..48a31940fd3bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ members = [ "client/rpc", "client/rpc-api", "client/rpc-servers", + "client/rpc-spec-v2", "client/service", "client/service/test", "client/state-db", diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 851eb2cfc5104..0b69ae27010fa 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -26,6 +26,7 @@ sc-finality-grandpa = { version = "0.10.0-dev", path = "../../../client/finality sc-finality-grandpa-rpc = { version = "0.10.0-dev", path = "../../../client/finality-grandpa/rpc" } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } +sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../../../client/rpc-spec-v2" } sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state-rpc" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 1c8b9cce1a744..0e6b04087fa63 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -124,6 +124,7 @@ where use sc_consensus_babe_rpc::{Babe, BabeApiServer}; use sc_finality_grandpa_rpc::{Grandpa, GrandpaApiServer}; use sc_rpc::dev::{Dev, DevApiServer}; + use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer}; use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; @@ -140,6 +141,11 @@ where finality_provider, } = grandpa; + let chain_name = chain_spec.name().to_string(); + let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); + let properties = chain_spec.properties(); + io.merge(ChainSpec::new(chain_name, genesis_hash, properties).into_rpc())?; + io.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 68b4aa6767348..0e2aca0dcc829 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -203,7 +203,6 @@ fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModu rpc_api .register_method("rpc_methods", move |_, _| { Ok(serde_json::json!({ - "version": 1, "methods": available_methods, })) }) diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml new file mode 100644 index 0000000000000..12dec7464e6d0 --- /dev/null +++ b/client/rpc-spec-v2/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "sc-rpc-spec-v2" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate RPC interface v2." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } +# Internal chain structures for "chain_spec". +sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +hex = "0.4" + +[dev-dependencies] +serde_json = "1.0" +tokio = { version = "1.17.0", features = ["macros"] } diff --git a/client/rpc-spec-v2/README.md b/client/rpc-spec-v2/README.md new file mode 100644 index 0000000000000..e860e0c2334da --- /dev/null +++ b/client/rpc-spec-v2/README.md @@ -0,0 +1,5 @@ +Substrate RPC interfaces. + +A collection of RPC methods and subscriptions supported by all substrate clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/rpc-spec-v2/src/chain_spec/api.rs b/client/rpc-spec-v2/src/chain_spec/api.rs new file mode 100644 index 0000000000000..dfe2d76de6501 --- /dev/null +++ b/client/rpc-spec-v2/src/chain_spec/api.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API trait of the chain spec. + +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sc_chain_spec::Properties; + +#[rpc(client, server)] +pub trait ChainSpecApi { + /// Get the chain name, as present in the chain specification. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "chainSpec_unstable_chainName")] + fn chain_spec_unstable_chain_name(&self) -> RpcResult; + + /// Get the chain's genesis hash. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "chainSpec_unstable_genesisHash")] + fn chain_spec_unstable_genesis_hash(&self) -> RpcResult; + + /// Get the properties of the chain, as present in the chain specification. + /// + /// # Note + /// + /// The json whitespaces are not guaranteed to persist. + /// + /// # Unstable + /// + /// This method is unstable and subject to change in the future. + #[method(name = "chainSpec_unstable_properties")] + fn chain_spec_unstable_properties(&self) -> RpcResult; +} diff --git a/client/rpc-spec-v2/src/chain_spec/chain_spec.rs b/client/rpc-spec-v2/src/chain_spec/chain_spec.rs new file mode 100644 index 0000000000000..90d05f1d9d41d --- /dev/null +++ b/client/rpc-spec-v2/src/chain_spec/chain_spec.rs @@ -0,0 +1,60 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API implementation for the specification of a chain. + +use crate::chain_spec::api::ChainSpecApiServer; +use jsonrpsee::core::RpcResult; +use sc_chain_spec::Properties; + +/// An API for chain spec RPC calls. +pub struct ChainSpec { + /// The name of the chain. + name: String, + /// The hexadecimal encoded hash of the genesis block. + genesis_hash: String, + /// Chain properties. + properties: Properties, +} + +impl ChainSpec { + /// Creates a new [`ChainSpec`]. + pub fn new>( + name: String, + genesis_hash: Hash, + properties: Properties, + ) -> Self { + let genesis_hash = format!("0x{}", hex::encode(genesis_hash)); + + Self { name, properties, genesis_hash } + } +} + +impl ChainSpecApiServer for ChainSpec { + fn chain_spec_unstable_chain_name(&self) -> RpcResult { + Ok(self.name.clone()) + } + + fn chain_spec_unstable_genesis_hash(&self) -> RpcResult { + Ok(self.genesis_hash.clone()) + } + + fn chain_spec_unstable_properties(&self) -> RpcResult { + Ok(self.properties.clone()) + } +} diff --git a/client/rpc-spec-v2/src/chain_spec/mod.rs b/client/rpc-spec-v2/src/chain_spec/mod.rs new file mode 100644 index 0000000000000..cd4fcf246f603 --- /dev/null +++ b/client/rpc-spec-v2/src/chain_spec/mod.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate chain specification API. +//! +//! The *chain spec* (short for *chain specification*) allows inspecting the content of +//! the specification of the chain that a JSON-RPC server is targeting. +//! +//! The values returned by the API are guaranteed to never change during the lifetime of the +//! JSON-RPC server. +//! +//! # Note +//! +//! Methods are prefixed by `chainSpec`. + +#[cfg(test)] +mod tests; + +pub mod api; +pub mod chain_spec; + +pub use api::ChainSpecApiServer; +pub use chain_spec::ChainSpec; diff --git a/client/rpc-spec-v2/src/chain_spec/tests.rs b/client/rpc-spec-v2/src/chain_spec/tests.rs new file mode 100644 index 0000000000000..6c078b2974e98 --- /dev/null +++ b/client/rpc-spec-v2/src/chain_spec/tests.rs @@ -0,0 +1,61 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::*; +use jsonrpsee::{types::EmptyParams, RpcModule}; +use sc_chain_spec::Properties; + +const CHAIN_NAME: &'static str = "TEST_CHAIN_NAME"; +const CHAIN_GENESIS: [u8; 32] = [0; 32]; +const CHAIN_PROPERTIES: &'static str = r#"{"three": "123", "one": 1, "two": 12}"#; + +fn api() -> RpcModule { + ChainSpec::new( + CHAIN_NAME.to_string(), + CHAIN_GENESIS, + serde_json::from_str(CHAIN_PROPERTIES).unwrap(), + ) + .into_rpc() +} + +#[tokio::test] +async fn chain_spec_chain_name_works() { + let name = api() + .call::<_, String>("chainSpec_unstable_chainName", EmptyParams::new()) + .await + .unwrap(); + assert_eq!(name, CHAIN_NAME); +} + +#[tokio::test] +async fn chain_spec_genesis_hash_works() { + let genesis = api() + .call::<_, String>("chainSpec_unstable_genesisHash", EmptyParams::new()) + .await + .unwrap(); + assert_eq!(genesis, format!("0x{}", hex::encode(CHAIN_GENESIS))); +} + +#[tokio::test] +async fn chain_spec_properties_works() { + let properties = api() + .call::<_, Properties>("chainSpec_unstable_properties", EmptyParams::new()) + .await + .unwrap(); + assert_eq!(properties, serde_json::from_str(CHAIN_PROPERTIES).unwrap()); +} diff --git a/client/rpc-spec-v2/src/lib.rs b/client/rpc-spec-v2/src/lib.rs new file mode 100644 index 0000000000000..297fda13172d6 --- /dev/null +++ b/client/rpc-spec-v2/src/lib.rs @@ -0,0 +1,26 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate JSON-RPC interface v2. +//! +//! Specification [document](https://paritytech.github.io/json-rpc-interface-spec/). + +#![warn(missing_docs)] +#![deny(unused_crate_dependencies)] + +pub mod chain_spec; From 006dbc01304b9d3644f3fc4c1bf0e610acd3ad90 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 20 Sep 2022 23:19:38 +0200 Subject: [PATCH 152/348] Use temporary db for benchmarking (#12254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use temporary db for benchmarking If no db option was given benchmarks shall use temporary database. Otherwise the test can use locally stored database which maybe out-of-date causing test to fail. * nicer syntax * explanatory comment added * Update utils/frame/benchmarking-cli/src/lib.rs Co-authored-by: Bastian Köcher --- utils/frame/benchmarking-cli/src/lib.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 6e4f092084ef5..a44a208b16ae9 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -30,6 +30,7 @@ pub use extrinsic::{ExtrinsicBuilder, ExtrinsicCmd, ExtrinsicFactory}; pub use machine::{MachineCmd, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; pub use overhead::OverheadCmd; pub use pallet::PalletCmd; +pub use sc_service::BasePath; pub use storage::StorageCmd; use sc_cli::{CliConfiguration, DatabaseParams, ImportParams, PruningParams, Result, SharedParams}; @@ -87,6 +88,19 @@ impl CliConfiguration for BenchmarkCmd { } } + fn base_path(&self) -> Result> { + let inner = unwrap_cmd! { + self, cmd, cmd.base_path() + }; + + // If the base path was not provided, benchmark command shall use temporary path. Otherwise + // we may end up using shared path, which may be inappropriate for benchmarking. + match inner { + Ok(None) => Some(BasePath::new_temp_dir()).transpose().map_err(|e| e.into()), + e => e, + } + } + fn pruning_params(&self) -> Option<&PruningParams> { unwrap_cmd! { self, cmd, cmd.pruning_params() From a47f200eebeb88a5bde6f1ed2be9728b82536dde Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 21 Sep 2022 00:13:09 +0200 Subject: [PATCH 153/348] BREAKING: Rename Origin (#12258) * BREAKING: Rename Origin * more renaming * a bit more renaming * fix * more fixing * fix in frame_support * even more fixes * fix * small fix * ... * update .stderr * docs * update docs * update docs * docs --- .../pallets/template/src/mock.rs | 2 +- .../pallets/template/src/tests.rs | 7 +- bin/node-template/runtime/src/lib.rs | 2 +- bin/node/runtime/src/lib.rs | 14 +- client/consensus/common/src/import_queue.rs | 20 +- .../common/src/import_queue/basic_queue.rs | 10 +- .../common/src/import_queue/buffered_link.rs | 6 +- client/network/src/behaviour.rs | 4 +- client/network/src/protocol.rs | 6 +- docs/Upgrading-2.0-to-3.0.md | 2 +- frame/alliance/src/lib.rs | 8 +- frame/alliance/src/mock.rs | 40 +- frame/alliance/src/tests.rs | 177 ++-- frame/assets/src/lib.rs | 2 +- frame/assets/src/mock.rs | 2 +- frame/assets/src/tests.rs | 614 ++++++++------ frame/atomic-swap/src/tests.rs | 22 +- frame/aura/src/mock.rs | 2 +- frame/authority-discovery/src/lib.rs | 2 +- frame/authorship/src/lib.rs | 4 +- frame/babe/src/mock.rs | 2 +- frame/babe/src/tests.rs | 24 +- frame/bags-list/src/mock.rs | 2 +- frame/bags-list/src/tests.rs | 59 +- frame/balances/src/tests.rs | 62 +- frame/balances/src/tests_composite.rs | 2 +- frame/balances/src/tests_local.rs | 2 +- frame/balances/src/tests_reentrancy.rs | 2 +- frame/beefy-mmr/src/mock.rs | 2 +- frame/beefy/src/mock.rs | 2 +- frame/benchmarking/src/baseline.rs | 2 +- frame/benchmarking/src/lib.rs | 12 +- frame/benchmarking/src/tests.rs | 4 +- frame/benchmarking/src/tests_instance.rs | 4 +- frame/bounties/src/benchmarking.rs | 8 +- frame/bounties/src/tests.rs | 275 ++++--- frame/child-bounties/src/tests.rs | 336 ++++---- frame/collective/src/lib.rs | 8 +- frame/collective/src/tests.rs | 293 ++++--- frame/contracts/src/exec.rs | 2 +- frame/contracts/src/lib.rs | 2 +- frame/contracts/src/tests.rs | 259 +++--- frame/conviction-voting/src/tests.rs | 343 ++++---- frame/democracy/src/benchmarking.rs | 16 +- frame/democracy/src/lib.rs | 24 +- frame/democracy/src/tests.rs | 10 +- frame/democracy/src/tests/cancellation.rs | 19 +- frame/democracy/src/tests/delegation.rs | 76 +- .../democracy/src/tests/external_proposing.rs | 60 +- frame/democracy/src/tests/fast_tracking.rs | 30 +- frame/democracy/src/tests/lock_voting.rs | 154 ++-- frame/democracy/src/tests/preimage.rs | 64 +- frame/democracy/src/tests/public_proposals.rs | 39 +- frame/democracy/src/tests/scheduling.rs | 26 +- frame/democracy/src/tests/voting.rs | 39 +- .../election-provider-multi-phase/src/lib.rs | 20 +- .../election-provider-multi-phase/src/mock.rs | 2 +- .../src/signed.rs | 62 +- .../src/unsigned.rs | 16 +- .../election-provider-support/src/onchain.rs | 2 +- frame/elections-phragmen/src/lib.rs | 769 +++++++++--------- frame/examples/basic/src/lib.rs | 2 +- frame/examples/basic/src/tests.rs | 10 +- frame/examples/offchain-worker/src/tests.rs | 6 +- frame/examples/parallel/src/tests.rs | 8 +- frame/executive/src/lib.rs | 4 +- frame/gilt/src/benchmarking.rs | 2 +- frame/gilt/src/lib.rs | 2 +- frame/gilt/src/mock.rs | 2 +- frame/gilt/src/tests.rs | 158 ++-- frame/grandpa/src/mock.rs | 2 +- frame/grandpa/src/tests.rs | 18 +- frame/identity/src/benchmarking.rs | 20 +- frame/identity/src/lib.rs | 4 +- frame/identity/src/tests.rs | 181 +++-- frame/im-online/src/mock.rs | 4 +- frame/im-online/src/tests.rs | 2 +- frame/indices/src/mock.rs | 2 +- frame/indices/src/tests.rs | 4 +- frame/lottery/src/benchmarking.rs | 6 +- frame/lottery/src/lib.rs | 4 +- frame/lottery/src/mock.rs | 2 +- frame/lottery/src/tests.rs | 110 +-- frame/membership/src/lib.rs | 71 +- frame/merkle-mountain-range/src/mock.rs | 2 +- frame/multisig/src/lib.rs | 2 +- frame/multisig/src/tests.rs | 202 ++--- frame/nicks/src/lib.rs | 43 +- frame/node-authorization/src/lib.rs | 8 +- frame/node-authorization/src/mock.rs | 2 +- frame/node-authorization/src/tests.rs | 100 ++- .../nomination-pools/benchmarking/src/lib.rs | 48 +- .../nomination-pools/benchmarking/src/mock.rs | 2 +- frame/nomination-pools/src/migration.rs | 2 +- frame/nomination-pools/src/mock.rs | 6 +- frame/nomination-pools/src/tests.rs | 631 +++++++------- .../nomination-pools/test-staking/src/lib.rs | 88 +- .../nomination-pools/test-staking/src/mock.rs | 4 +- frame/offences/benchmarking/src/mock.rs | 2 +- frame/offences/src/mock.rs | 2 +- frame/preimage/src/benchmarking.rs | 18 +- frame/preimage/src/lib.rs | 6 +- frame/preimage/src/mock.rs | 2 +- frame/preimage/src/tests.rs | 82 +- frame/proxy/src/lib.rs | 4 +- frame/proxy/src/tests.rs | 174 ++-- frame/randomness-collective-flip/src/lib.rs | 2 +- frame/ranked-collective/src/lib.rs | 28 +- frame/ranked-collective/src/tests.rs | 241 +++--- frame/recovery/src/lib.rs | 2 +- frame/recovery/src/mock.rs | 2 +- frame/recovery/src/tests.rs | 220 +++-- frame/referenda/src/benchmarking.rs | 24 +- frame/referenda/src/lib.rs | 12 +- frame/referenda/src/mock.rs | 14 +- frame/referenda/src/tests.rs | 86 +- frame/referenda/src/types.rs | 28 +- frame/remark/src/mock.rs | 2 +- frame/scheduler/src/benchmarking.rs | 2 +- frame/scheduler/src/lib.rs | 31 +- frame/scheduler/src/mock.rs | 6 +- frame/scheduler/src/tests.rs | 25 +- frame/scored-pool/README.md | 2 +- frame/scored-pool/src/lib.rs | 6 +- frame/scored-pool/src/mock.rs | 2 +- frame/scored-pool/src/tests.rs | 63 +- frame/session/benchmarking/src/mock.rs | 2 +- frame/session/src/mock.rs | 2 +- frame/session/src/tests.rs | 30 +- frame/society/src/lib.rs | 14 +- frame/society/src/mock.rs | 2 +- frame/society/src/tests.rs | 274 ++++--- frame/staking/src/benchmarking.rs | 8 +- frame/staking/src/mock.rs | 21 +- frame/staking/src/pallet/mod.rs | 12 +- frame/staking/src/tests.rs | 581 +++++++------ frame/state-trie-migration/src/lib.rs | 28 +- frame/sudo/src/lib.rs | 2 +- frame/sudo/src/mock.rs | 2 +- frame/sudo/src/tests.rs | 34 +- .../src/construct_runtime/expand/call.rs | 10 +- .../src/construct_runtime/expand/origin.rs | 34 +- .../procedural/src/pallet/expand/call.rs | 4 +- .../procedural/src/pallet/parse/mod.rs | 8 +- frame/support/src/dispatch.rs | 38 +- frame/support/src/error.rs | 2 +- frame/support/src/lib.rs | 6 +- .../src/storage/generator/double_map.rs | 4 +- frame/support/src/storage/generator/map.rs | 4 +- frame/support/src/storage/generator/mod.rs | 6 +- frame/support/src/storage/generator/nmap.rs | 4 +- frame/support/src/traits/dispatch.rs | 8 +- frame/support/src/traits/schedule.rs | 28 +- frame/support/test/compile_pass/src/lib.rs | 2 +- frame/support/test/src/lib.rs | 4 +- frame/support/test/tests/construct_runtime.rs | 10 +- .../no_std_genesis_config.rs | 2 +- .../old_unsupported_pallet_decl.rs | 2 +- .../pallet_error_too_large.rs | 2 +- .../undefined_call_part.rs | 2 +- .../undefined_event_part.rs | 2 +- .../undefined_genesis_config_part.rs | 2 +- .../undefined_inherent_part.rs | 2 +- .../undefined_origin_part.rs | 2 +- .../undefined_origin_part.stderr | 33 - .../undefined_validate_unsigned_part.rs | 2 +- ...served_keyword_two_times_integrity_test.rs | 2 +- ...ed_keyword_two_times_integrity_test.stderr | 2 +- ...eserved_keyword_two_times_on_initialize.rs | 2 +- ...ved_keyword_two_times_on_initialize.stderr | 2 +- frame/support/test/tests/decl_storage.rs | 16 +- .../tests/decl_storage_ui/config_duplicate.rs | 2 +- .../decl_storage_ui/config_get_duplicate.rs | 2 +- .../tests/decl_storage_ui/get_duplicate.rs | 2 +- frame/support/test/tests/final_keys.rs | 4 +- frame/support/test/tests/genesisconfig.rs | 4 +- frame/support/test/tests/instance.rs | 24 +- frame/support/test/tests/issue2219.rs | 4 +- frame/support/test/tests/origin.rs | 44 +- frame/support/test/tests/pallet.rs | 10 +- .../test/tests/pallet_compatibility.rs | 4 +- .../tests/pallet_compatibility_instance.rs | 4 +- frame/support/test/tests/pallet_instance.rs | 2 +- .../tests/pallet_with_name_trait_is_valid.rs | 4 +- frame/support/test/tests/storage_layers.rs | 17 +- .../support/test/tests/storage_transaction.rs | 4 +- frame/support/test/tests/system.rs | 4 +- frame/system/benches/bench.rs | 2 +- frame/system/benchmarking/src/mock.rs | 2 +- frame/system/src/lib.rs | 6 +- frame/system/src/mock.rs | 2 +- frame/system/src/tests.rs | 12 +- frame/timestamp/src/mock.rs | 2 +- frame/timestamp/src/tests.rs | 6 +- frame/tips/src/benchmarking.rs | 2 +- frame/tips/src/tests.rs | 129 +-- .../asset-tx-payment/src/tests.rs | 18 +- frame/transaction-payment/src/lib.rs | 2 +- frame/transaction-storage/src/lib.rs | 2 +- frame/transaction-storage/src/mock.rs | 4 +- frame/transaction-storage/src/tests.rs | 6 +- frame/treasury/src/benchmarking.rs | 6 +- frame/treasury/src/lib.rs | 6 +- frame/treasury/src/tests.rs | 120 +-- frame/uniques/src/lib.rs | 4 +- frame/uniques/src/mock.rs | 2 +- frame/uniques/src/tests.rs | 564 ++++++++----- frame/utility/src/benchmarking.rs | 6 +- frame/utility/src/lib.rs | 8 +- frame/utility/src/tests.rs | 74 +- frame/vesting/src/mock.rs | 2 +- frame/whitelist/src/benchmarking.rs | 8 +- frame/whitelist/src/lib.rs | 6 +- frame/whitelist/src/mock.rs | 2 +- frame/whitelist/src/tests.rs | 56 +- .../runtime/src/generic/checked_extrinsic.rs | 9 +- primitives/runtime/src/testing.rs | 11 +- primitives/runtime/src/traits.rs | 12 +- test-utils/runtime/src/lib.rs | 21 +- test-utils/runtime/src/system.rs | 2 +- utils/frame/rpc/support/src/lib.rs | 2 +- 221 files changed, 5238 insertions(+), 4205 deletions(-) diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 3289ec2da4952..989681fa59a00 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -27,7 +27,7 @@ impl system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs index 2205658601721..527aec8ed00c0 100644 --- a/bin/node-template/pallets/template/src/tests.rs +++ b/bin/node-template/pallets/template/src/tests.rs @@ -5,7 +5,7 @@ use frame_support::{assert_noop, assert_ok}; fn it_works_for_default_value() { new_test_ext().execute_with(|| { // Dispatch a signed extrinsic. - assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); + assert_ok!(TemplateModule::do_something(RuntimeOrigin::signed(1), 42)); // Read pallet storage and assert an expected result. assert_eq!(TemplateModule::something(), Some(42)); }); @@ -15,6 +15,9 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!(TemplateModule::cause_error(Origin::signed(1)), Error::::NoneValue); + assert_noop!( + TemplateModule::cause_error(RuntimeOrigin::signed(1)), + Error::::NoneValue + ); }); } diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index c309b27682080..f801068b10fda 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -174,7 +174,7 @@ impl frame_system::Config for Runtime { /// The ubiquitous event type. type RuntimeEvent = RuntimeEvent; /// The ubiquitous origin type. - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8b150dca849f0..21ed23d6244a5 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -205,7 +205,7 @@ impl frame_system::Config for Runtime { type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = Index; type BlockNumber = BlockNumber; @@ -346,7 +346,7 @@ parameter_types! { impl pallet_scheduler::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; @@ -796,7 +796,7 @@ parameter_types! { pub struct TracksInfo; impl pallet_referenda::TracksInfo for TracksInfo { type Id = u16; - type Origin = ::PalletsOrigin; + type RuntimeOrigin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { static DATA: [(u16, pallet_referenda::TrackInfo); 1] = [( 0u16, @@ -822,7 +822,7 @@ impl pallet_referenda::TracksInfo for TracksInfo { )]; &DATA[..] } - fn track_for(id: &Self::Origin) -> Result { + fn track_for(id: &Self::RuntimeOrigin) -> Result { if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { match system_origin { frame_system::RawOrigin::Root => Ok(0), @@ -957,7 +957,7 @@ parameter_types! { type CouncilCollective = pallet_collective::Instance1; impl pallet_collective::Config for Runtime { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = CouncilMotionDuration; @@ -1014,7 +1014,7 @@ parameter_types! { type TechnicalCollective = pallet_collective::Instance2; impl pallet_collective::Config for Runtime { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = TechnicalMotionDuration; @@ -1536,7 +1536,7 @@ parameter_types! { type AllianceCollective = pallet_collective::Instance3; impl pallet_collective::Config for Runtime { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = AllianceMotionDuration; diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index c71e21ccd4b00..3741fa99663cd 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -62,8 +62,8 @@ pub type BoxBlockImport = pub type BoxJustificationImport = Box + Send + Sync>; -/// Maps to the Origin used by the network. -pub type Origin = libp2p::PeerId; +/// Maps to the RuntimeOrigin used by the network. +pub type RuntimeOrigin = libp2p::PeerId; /// Block data used by the queue. #[derive(Debug, PartialEq, Eq, Clone)] @@ -79,7 +79,7 @@ pub struct IncomingBlock { /// Justification(s) if requested. pub justifications: Option, /// The peer, we received this from - pub origin: Option, + pub origin: Option, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, /// Skip block execution and state verification. @@ -112,7 +112,7 @@ pub trait ImportQueue: Send { /// Import block justifications. fn import_justifications( &mut self, - who: Origin, + who: RuntimeOrigin, hash: B::Hash, number: NumberFor, justifications: Justifications, @@ -140,7 +140,7 @@ pub trait Link: Send { /// Justification import result. fn justification_imported( &mut self, - _who: Origin, + _who: RuntimeOrigin, _hash: &B::Hash, _number: NumberFor, _success: bool, @@ -155,9 +155,9 @@ pub trait Link: Send { #[derive(Debug, PartialEq)] pub enum BlockImportStatus { /// Imported known block. - ImportedKnown(N, Option), + ImportedKnown(N, Option), /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + ImportedUnknown(N, ImportedAux, Option), } impl BlockImportStatus { @@ -175,15 +175,15 @@ impl BlockImportStatus { pub enum BlockImportError { /// Block missed header, can't be imported #[error("block is missing a header (origin = {0:?})")] - IncompleteHeader(Option), + IncompleteHeader(Option), /// Block verification failed, can't be imported #[error("block verification failed (origin = {0:?}): {1}")] - VerificationFailed(Option, String), + VerificationFailed(Option, String), /// Block is known to be Bad #[error("bad block (origin = {0:?})")] - BadBlock(Option), + BadBlock(Option), /// Parent state is missing. #[error("block is missing parent state")] diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 84ccba990e599..0e607159b75c3 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -34,7 +34,7 @@ use crate::{ import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, - BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, Verifier, + BoxJustificationImport, ImportQueue, IncomingBlock, Link, RuntimeOrigin, Verifier, }, metrics::Metrics, }; @@ -120,7 +120,7 @@ impl ImportQueue for BasicQueue fn import_justifications( &mut self, - who: Origin, + who: RuntimeOrigin, hash: B::Hash, number: NumberFor, justifications: Justifications, @@ -152,7 +152,7 @@ mod worker_messages { pub struct ImportBlocks(pub BlockOrigin, pub Vec>); pub struct ImportJustification( - pub Origin, + pub RuntimeOrigin, pub B::Hash, pub NumberFor, pub Justification, @@ -289,7 +289,7 @@ impl BlockImportWorker { async fn import_justification( &mut self, - who: Origin, + who: RuntimeOrigin, hash: B::Hash, number: NumberFor, justification: Justification, @@ -530,7 +530,7 @@ mod tests { fn justification_imported( &mut self, - _who: Origin, + _who: RuntimeOrigin, hash: &Hash, _number: BlockNumber, _success: bool, diff --git a/client/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs index d3d91f5bd31c5..5d418dddf0853 100644 --- a/client/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -38,7 +38,7 @@ //! }); //! ``` -use crate::import_queue::{Link, Origin}; +use crate::import_queue::{Link, RuntimeOrigin}; use futures::prelude::*; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -82,7 +82,7 @@ impl Clone for BufferedLinkSender { /// Internal buffered message. enum BlockImportWorkerMsg { BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), - JustificationImported(Origin, B::Hash, NumberFor, bool), + JustificationImported(RuntimeOrigin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), } @@ -100,7 +100,7 @@ impl Link for BufferedLinkSender { fn justification_imported( &mut self, - who: Origin, + who: RuntimeOrigin, hash: &B::Hash, number: NumberFor, success: bool, diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 88571647fb0e3..14962c837aa10 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -37,7 +37,7 @@ use libp2p::{ }; use log::debug; -use sc_consensus::import_queue::{IncomingBlock, Origin}; +use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ config::ProtocolId, protocol::{ @@ -103,7 +103,7 @@ where /// Event generated by `Behaviour`. pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justifications), + JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. RandomKademliaStarted(ProtocolId), diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a7af43d53d3b7..17187f2d63c1c 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -41,7 +41,9 @@ use message::{ use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; -use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; +use sc_consensus::import_queue::{ + BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, +}; use sc_network_common::{ config::ProtocolId, protocol::ProtocolName, @@ -1257,7 +1259,7 @@ fn prepare_warp_sync_request( #[must_use] pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justifications), + JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index 210023c3aec5a..7540e0d5b5b8c 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -147,7 +147,7 @@ And update the overall definition for weights on frame and a few related types a + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = Index; @@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 9d320c4215a70..82c7e21dba3af 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -245,7 +245,7 @@ pub mod pallet { /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable + + Dispatchable + From> + From> + GetDispatchInfo @@ -253,13 +253,13 @@ pub mod pallet { + IsType<::RuntimeCall>; /// Origin for admin-level operations, like setting the Alliance's rules. - type AdminOrigin: EnsureOrigin; + type AdminOrigin: EnsureOrigin; /// Origin that manages entry and forcible discharge from the Alliance. - type MembershipManager: EnsureOrigin; + type MembershipManager: EnsureOrigin; /// Origin for making announcements and adding/removing unscrupulous items. - type AnnouncementOrigin: EnsureOrigin; + type AnnouncementOrigin: EnsureOrigin; /// The currency used for deposits. type Currency: ReservableCurrency; diff --git a/frame/alliance/src/mock.rs b/frame/alliance/src/mock.rs index 42c97876d40b0..196e0003b537f 100644 --- a/frame/alliance/src/mock.rs +++ b/frame/alliance/src/mock.rs @@ -47,7 +47,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = BlockNumber; @@ -95,7 +95,7 @@ parameter_types! { } type AllianceCollective = pallet_collective::Instance1; impl pallet_collective::Config for Test { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = MotionDuration; @@ -283,7 +283,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 1)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 1)); let info = IdentityInfo { additional: BoundedVec::default(), @@ -296,58 +296,58 @@ pub fn new_test_ext() -> sp_io::TestExternalities { image: Data::default(), twitter: Data::default(), }; - assert_ok!(Identity::set_identity(Origin::signed(1), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(1), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 1, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(2), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(2), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 2, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(3), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(3), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 3, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(4), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(4), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 4, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(5), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(5), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 5, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(6), Box::new(info.clone()))); - assert_ok!(Identity::set_identity(Origin::signed(8), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(6), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(8), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 8, Judgement::KnownGood, BlakeTwo256::hash_of(&info) )); - assert_ok!(Identity::set_identity(Origin::signed(9), Box::new(info.clone()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(9), Box::new(info.clone()))); assert_ok!(Identity::provide_judgement( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, 9, Judgement::KnownGood, @@ -356,11 +356,11 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // Joining before init should fail. assert_noop!( - Alliance::join_alliance(Origin::signed(1)), + Alliance::join_alliance(RuntimeOrigin::signed(1)), Error::::AllianceNotYetInitialized ); - assert_ok!(Alliance::init_members(Origin::root(), vec![1, 2], vec![3], vec![])); + assert_ok!(Alliance::init_members(RuntimeOrigin::root(), vec![1, 2], vec![3], vec![])); System::set_block_number(1); }); diff --git a/frame/alliance/src/tests.rs b/frame/alliance/src/tests.rs index 79fd33965edb5..c55826768c695 100644 --- a/frame/alliance/src/tests.rs +++ b/frame/alliance/src/tests.rs @@ -30,28 +30,31 @@ fn init_members_works() { new_test_ext().execute_with(|| { // alliance must be reset first, no witness data assert_noop!( - Alliance::init_members(Origin::root(), vec![8], vec![], vec![],), + Alliance::init_members(RuntimeOrigin::root(), vec![8], vec![], vec![],), Error::::AllianceAlreadyInitialized, ); // give a retirement notice to check later a retiring member not removed - assert_ok!(Alliance::give_retirement_notice(Origin::signed(2))); + assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(2))); assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); // disband the Alliance to init new - assert_ok!(Alliance::disband(Origin::root(), DisbandWitness::new(2, 0))); + assert_ok!(Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 0))); // fails without root - assert_noop!(Alliance::init_members(Origin::signed(1), vec![], vec![], vec![]), BadOrigin); + assert_noop!( + Alliance::init_members(RuntimeOrigin::signed(1), vec![], vec![], vec![]), + BadOrigin + ); // founders missing, other members given assert_noop!( - Alliance::init_members(Origin::root(), vec![], vec![4], vec![2],), + Alliance::init_members(RuntimeOrigin::root(), vec![], vec![4], vec![2],), Error::::FoundersMissing, ); // success call - assert_ok!(Alliance::init_members(Origin::root(), vec![8, 5], vec![4], vec![2],)); + assert_ok!(Alliance::init_members(RuntimeOrigin::root(), vec![8, 5], vec![4], vec![2],)); // assert new set of voting members assert_eq!(Alliance::voting_members_sorted(), vec![4, 5, 8]); @@ -78,36 +81,36 @@ fn disband_works() { assert_eq!(Alliance::voting_members_sorted(), vec![1, 2, 3]); // give a retirement notice to check later a retiring member not removed - assert_ok!(Alliance::give_retirement_notice(Origin::signed(2))); + assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(2))); assert!(Alliance::is_member_of(&2, MemberRole::Retiring)); // join alliance and reserve funds assert_eq!(Balances::free_balance(9), 40); - assert_ok!(Alliance::join_alliance(Origin::signed(9))); + assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(9))); assert_eq!(Alliance::deposit_of(9), Some(25)); assert_eq!(Balances::free_balance(9), 15); assert!(Alliance::is_member_of(&9, MemberRole::Ally)); // fails without root - assert_noop!(Alliance::disband(Origin::signed(1), Default::default()), BadOrigin); + assert_noop!(Alliance::disband(RuntimeOrigin::signed(1), Default::default()), BadOrigin); // bad witness data checks assert_noop!( - Alliance::disband(Origin::root(), Default::default(),), + Alliance::disband(RuntimeOrigin::root(), Default::default(),), Error::::BadWitness ); assert_noop!( - Alliance::disband(Origin::root(), DisbandWitness::new(1, 1)), + Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(1, 1)), Error::::BadWitness, ); assert_noop!( - Alliance::disband(Origin::root(), DisbandWitness::new(2, 0)), + Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 0)), Error::::BadWitness, ); // success call - assert_ok!(Alliance::disband(Origin::root(), DisbandWitness::new(2, 1))); + assert_ok!(Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(2, 1))); // assert members disband assert!(!Alliance::is_member(&1)); @@ -125,7 +128,7 @@ fn disband_works() { // the Alliance must be set first assert_noop!( - Alliance::disband(Origin::root(), DisbandWitness::new(100, 100)), + Alliance::disband(RuntimeOrigin::root(), DisbandWitness::new(100, 100)), Error::::AllianceNotYetInitialized, ); }) @@ -138,12 +141,17 @@ fn propose_works() { // only voting member can propose proposal, 4 is ally not have vote rights assert_noop!( - Alliance::propose(Origin::signed(4), 3, Box::new(proposal.clone()), proposal_len), + Alliance::propose( + RuntimeOrigin::signed(4), + 3, + Box::new(proposal.clone()), + proposal_len + ), Error::::NoVotingRights ); assert_ok!(Alliance::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -171,12 +179,12 @@ fn vote_works() { new_test_ext().execute_with(|| { let (proposal, proposal_len, hash) = make_remark_proposal(42); assert_ok!(Alliance::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Alliance::vote(RuntimeOrigin::signed(2), hash, 0, true)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -205,21 +213,21 @@ fn veto_works() { new_test_ext().execute_with(|| { let (proposal, proposal_len, hash) = make_remark_proposal(42); assert_ok!(Alliance::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); // only set_rule/elevate_ally can be veto assert_noop!( - Alliance::veto(Origin::signed(1), hash), + Alliance::veto(RuntimeOrigin::signed(1), hash), Error::::NotVetoableProposal ); let cid = test_cid(); let (vetoable_proposal, vetoable_proposal_len, vetoable_hash) = make_set_rule_proposal(cid); assert_ok!(Alliance::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(vetoable_proposal.clone()), vetoable_proposal_len @@ -227,11 +235,11 @@ fn veto_works() { // only founder have veto rights, 3 is fellow assert_noop!( - Alliance::veto(Origin::signed(3), vetoable_hash), + Alliance::veto(RuntimeOrigin::signed(3), vetoable_hash), Error::::NotFounder ); - assert_ok!(Alliance::veto(Origin::signed(2), vetoable_hash)); + assert_ok!(Alliance::veto(RuntimeOrigin::signed(2), vetoable_hash)); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( System::events(), @@ -262,15 +270,21 @@ fn close_works() { let (proposal, proposal_len, hash) = make_remark_proposal(42); let proposal_weight = proposal.get_dispatch_info().weight; assert_ok!(Alliance::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Alliance::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Alliance::vote(Origin::signed(2), hash, 0, true)); - assert_ok!(Alliance::vote(Origin::signed(3), hash, 0, true)); - assert_ok!(Alliance::close(Origin::signed(1), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Alliance::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Alliance::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Alliance::vote(RuntimeOrigin::signed(3), hash, 0, true)); + assert_ok!(Alliance::close( + RuntimeOrigin::signed(1), + hash, + 0, + proposal_weight, + proposal_len + )); let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!( @@ -324,7 +338,7 @@ fn close_works() { fn set_rule_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - assert_ok!(Alliance::set_rule(Origin::signed(1), cid.clone())); + assert_ok!(Alliance::set_rule(RuntimeOrigin::signed(1), cid.clone())); assert_eq!(Alliance::rule(), Some(cid.clone())); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::NewRuleSet { @@ -338,9 +352,9 @@ fn announce_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - assert_noop!(Alliance::announce(Origin::signed(2), cid.clone()), BadOrigin); + assert_noop!(Alliance::announce(RuntimeOrigin::signed(2), cid.clone()), BadOrigin); - assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); + assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { @@ -353,7 +367,7 @@ fn announce_works() { fn remove_announcement_works() { new_test_ext().execute_with(|| { let cid = test_cid(); - assert_ok!(Alliance::announce(Origin::signed(3), cid.clone())); + assert_ok!(Alliance::announce(RuntimeOrigin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![cid.clone()]); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::Announced { announcement: cid.clone(), @@ -361,7 +375,7 @@ fn remove_announcement_works() { System::set_block_number(2); - assert_ok!(Alliance::remove_announcement(Origin::signed(3), cid.clone())); + assert_ok!(Alliance::remove_announcement(RuntimeOrigin::signed(3), cid.clone())); assert_eq!(Alliance::announcements(), vec![]); System::assert_last_event(mock::RuntimeEvent::Alliance( crate::Event::AnnouncementRemoved { announcement: cid }, @@ -373,46 +387,52 @@ fn remove_announcement_works() { fn join_alliance_works() { new_test_ext().execute_with(|| { // check already member - assert_noop!(Alliance::join_alliance(Origin::signed(1)), Error::::AlreadyMember); + assert_noop!( + Alliance::join_alliance(RuntimeOrigin::signed(1)), + Error::::AlreadyMember + ); // check already listed as unscrupulous assert_ok!(Alliance::add_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); assert_noop!( - Alliance::join_alliance(Origin::signed(4)), + Alliance::join_alliance(RuntimeOrigin::signed(4)), Error::::AccountNonGrata ); assert_ok!(Alliance::remove_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); // check deposit funds assert_noop!( - Alliance::join_alliance(Origin::signed(5)), + Alliance::join_alliance(RuntimeOrigin::signed(5)), Error::::InsufficientFunds ); // success to submit - assert_ok!(Alliance::join_alliance(Origin::signed(4))); + assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); assert_eq!(Alliance::deposit_of(4), Some(25)); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); // check already member - assert_noop!(Alliance::join_alliance(Origin::signed(4)), Error::::AlreadyMember); + assert_noop!( + Alliance::join_alliance(RuntimeOrigin::signed(4)), + Error::::AlreadyMember + ); // check missing identity judgement #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(Origin::signed(6)), + Alliance::join_alliance(RuntimeOrigin::signed(6)), Error::::WithoutGoodIdentityJudgement ); // check missing identity info #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(Origin::signed(7)), + Alliance::join_alliance(RuntimeOrigin::signed(7)), Error::::WithoutIdentityDisplayAndWebsite ); }); @@ -423,51 +443,51 @@ fn nominate_ally_works() { new_test_ext().execute_with(|| { // check already member assert_noop!( - Alliance::nominate_ally(Origin::signed(1), 2), + Alliance::nominate_ally(RuntimeOrigin::signed(1), 2), Error::::AlreadyMember ); // only voting member(founder/fellow) have nominate right assert_noop!( - Alliance::nominate_ally(Origin::signed(5), 4), + Alliance::nominate_ally(RuntimeOrigin::signed(5), 4), Error::::NoVotingRights ); // check already listed as unscrupulous assert_ok!(Alliance::add_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); assert_noop!( - Alliance::nominate_ally(Origin::signed(1), 4), + Alliance::nominate_ally(RuntimeOrigin::signed(1), 4), Error::::AccountNonGrata ); assert_ok!(Alliance::remove_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(4)] )); // success to nominate - assert_ok!(Alliance::nominate_ally(Origin::signed(1), 4)); + assert_ok!(Alliance::nominate_ally(RuntimeOrigin::signed(1), 4)); assert_eq!(Alliance::deposit_of(4), None); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); // check already member assert_noop!( - Alliance::nominate_ally(Origin::signed(1), 4), + Alliance::nominate_ally(RuntimeOrigin::signed(1), 4), Error::::AlreadyMember ); // check missing identity judgement #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(Origin::signed(6)), + Alliance::join_alliance(RuntimeOrigin::signed(6)), Error::::WithoutGoodIdentityJudgement ); // check missing identity info #[cfg(not(feature = "runtime-benchmarks"))] assert_noop!( - Alliance::join_alliance(Origin::signed(7)), + Alliance::join_alliance(RuntimeOrigin::signed(7)), Error::::WithoutIdentityDisplayAndWebsite ); }); @@ -476,13 +496,16 @@ fn nominate_ally_works() { #[test] fn elevate_ally_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::elevate_ally(Origin::signed(2), 4), Error::::NotAlly); + assert_noop!( + Alliance::elevate_ally(RuntimeOrigin::signed(2), 4), + Error::::NotAlly + ); - assert_ok!(Alliance::join_alliance(Origin::signed(4))); + assert_ok!(Alliance::join_alliance(RuntimeOrigin::signed(4))); assert_eq!(Alliance::members(MemberRole::Ally), vec![4]); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::elevate_ally(Origin::signed(2), 4)); + assert_ok!(Alliance::elevate_ally(RuntimeOrigin::signed(2), 4)); assert_eq!(Alliance::members(MemberRole::Ally), Vec::::new()); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3, 4]); }); @@ -492,12 +515,12 @@ fn elevate_ally_works() { fn give_retirement_notice_work() { new_test_ext().execute_with(|| { assert_noop!( - Alliance::give_retirement_notice(Origin::signed(4)), + Alliance::give_retirement_notice(RuntimeOrigin::signed(4)), Error::::NotMember ); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); assert_eq!(Alliance::members(MemberRole::Retiring), vec![3]); System::assert_last_event(mock::RuntimeEvent::Alliance( @@ -505,7 +528,7 @@ fn give_retirement_notice_work() { )); assert_noop!( - Alliance::give_retirement_notice(Origin::signed(3)), + Alliance::give_retirement_notice(RuntimeOrigin::signed(3)), Error::::AlreadyRetiring ); }); @@ -515,23 +538,23 @@ fn give_retirement_notice_work() { fn retire_works() { new_test_ext().execute_with(|| { assert_noop!( - Alliance::retire(Origin::signed(2)), + Alliance::retire(RuntimeOrigin::signed(2)), Error::::RetirementNoticeNotGiven ); assert_noop!( - Alliance::retire(Origin::signed(4)), + Alliance::retire(RuntimeOrigin::signed(4)), Error::::RetirementNoticeNotGiven ); assert_eq!(Alliance::members(MemberRole::Fellow), vec![3]); - assert_ok!(Alliance::give_retirement_notice(Origin::signed(3))); + assert_ok!(Alliance::give_retirement_notice(RuntimeOrigin::signed(3))); assert_noop!( - Alliance::retire(Origin::signed(3)), + Alliance::retire(RuntimeOrigin::signed(3)), Error::::RetirementPeriodNotPassed ); System::set_block_number(System::block_number() + RetirementPeriod::get()); - assert_ok!(Alliance::retire(Origin::signed(3))); + assert_ok!(Alliance::retire(RuntimeOrigin::signed(3))); assert_eq!(Alliance::members(MemberRole::Fellow), Vec::::new()); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberRetired { member: (3), @@ -541,11 +564,11 @@ fn retire_works() { // Move time on: System::set_block_number(System::block_number() + RetirementPeriod::get()); - assert_powerless(Origin::signed(3)); + assert_powerless(RuntimeOrigin::signed(3)); }); } -fn assert_powerless(user: Origin) { +fn assert_powerless(user: RuntimeOrigin) { //vote / veto with a valid propsal let cid = test_cid(); let (proposal, _, _) = make_kick_member_proposal(42); @@ -578,13 +601,16 @@ fn assert_powerless(user: Origin) { #[test] fn kick_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::kick_member(Origin::signed(4), 4), BadOrigin); + assert_noop!(Alliance::kick_member(RuntimeOrigin::signed(4), 4), BadOrigin); - assert_noop!(Alliance::kick_member(Origin::signed(2), 4), Error::::NotMember); + assert_noop!( + Alliance::kick_member(RuntimeOrigin::signed(2), 4), + Error::::NotMember + ); >::insert(2, 25); assert_eq!(Alliance::members(MemberRole::Founder), vec![1, 2]); - assert_ok!(Alliance::kick_member(Origin::signed(2), 2)); + assert_ok!(Alliance::kick_member(RuntimeOrigin::signed(2), 2)); assert_eq!(Alliance::members(MemberRole::Founder), vec![1]); assert_eq!(>::get(2), None); System::assert_last_event(mock::RuntimeEvent::Alliance(crate::Event::MemberKicked { @@ -597,10 +623,10 @@ fn kick_member_works() { #[test] fn add_unscrupulous_items_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::add_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_noop!(Alliance::add_unscrupulous_items(RuntimeOrigin::signed(2), vec![]), BadOrigin); assert_ok!(Alliance::add_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![ UnscrupulousItem::AccountId(3), UnscrupulousItem::Website("abc".as_bytes().to_vec().try_into().unwrap()) @@ -611,7 +637,7 @@ fn add_unscrupulous_items_works() { assert_noop!( Alliance::add_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] ), Error::::AlreadyUnscrupulous @@ -622,23 +648,26 @@ fn add_unscrupulous_items_works() { #[test] fn remove_unscrupulous_items_works() { new_test_ext().execute_with(|| { - assert_noop!(Alliance::remove_unscrupulous_items(Origin::signed(2), vec![]), BadOrigin); + assert_noop!( + Alliance::remove_unscrupulous_items(RuntimeOrigin::signed(2), vec![]), + BadOrigin + ); assert_noop!( Alliance::remove_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] ), Error::::NotListedAsUnscrupulous ); assert_ok!(Alliance::add_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); assert_eq!(Alliance::unscrupulous_accounts(), vec![3]); assert_ok!(Alliance::remove_unscrupulous_items( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![UnscrupulousItem::AccountId(3)] )); assert_eq!(Alliance::unscrupulous_accounts(), Vec::::new()); diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index fbb4a3ea1568b..28743f917d323 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -208,7 +208,7 @@ pub mod pallet { /// The origin which may forcibly create or destroy an asset or otherwise alter privileged /// attributes. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The basic amount of funds that must be reserved for an asset. #[pallet::constant] diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 9344e5920d205..1f105eb6b4fbc 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -49,7 +49,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 598b6049b3d57..48cfad45a49fc 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -26,10 +26,10 @@ use sp_runtime::{traits::ConvertInto, TokenError}; #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 2), 100); }); } @@ -37,46 +37,46 @@ fn basic_minting_should_work() { #[test] fn minting_too_many_insufficient_assets_fails() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); - assert_ok!(Assets::force_create(Origin::root(), 1, 1, false, 1)); - assert_ok!(Assets::force_create(Origin::root(), 2, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 1, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 2, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 1, 1, 100)); - assert_noop!(Assets::mint(Origin::signed(1), 2, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 1, 1, 100)); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&2, 1); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 2, 1, 100)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100)); }); } #[test] fn minting_insufficient_asset_with_deposit_should_work_when_consumers_exhausted() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); - assert_ok!(Assets::force_create(Origin::root(), 1, 1, false, 1)); - assert_ok!(Assets::force_create(Origin::root(), 2, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 1, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 2, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 1, 1, 100)); - assert_noop!(Assets::mint(Origin::signed(1), 2, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 1, 1, 100)); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100), TokenError::CannotCreate); - assert_ok!(Assets::touch(Origin::signed(1), 2)); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 2)); assert_eq!(Balances::reserved_balance(&1), 10); - assert_ok!(Assets::mint(Origin::signed(1), 2, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100)); }); } #[test] fn minting_insufficient_assets_with_deposit_without_consumer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); - assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(Origin::signed(1), 0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Balances::reserved_balance(&1), 10); assert_eq!(System::consumers(&1), 0); }); @@ -85,11 +85,11 @@ fn minting_insufficient_assets_with_deposit_without_consumer_should_work() { #[test] fn refunding_asset_deposit_with_burn_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(Origin::signed(1), 0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::refund(Origin::signed(1), 0, true)); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, true)); assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Assets::balance(1, 0), 0); }); @@ -98,28 +98,28 @@ fn refunding_asset_deposit_with_burn_should_work() { #[test] fn refunding_asset_deposit_with_burn_disallowed_should_fail() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(Origin::signed(1), 0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_noop!(Assets::refund(Origin::signed(1), 0, false), Error::::WouldBurn); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_noop!(Assets::refund(RuntimeOrigin::signed(1), 0, false), Error::::WouldBurn); }); } #[test] fn refunding_asset_deposit_without_burn_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); - assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(Origin::signed(1), 0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 2), 100); assert_eq!(Assets::balance(0, 1), 0); assert_eq!(Balances::reserved_balance(&1), 10); - assert_ok!(Assets::refund(Origin::signed(1), 0, false)); + assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, false)); assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Assets::balance(1, 0), 0); }); @@ -129,11 +129,11 @@ fn refunding_asset_deposit_without_burn_should_work() { #[test] fn refunding_calls_died_hook() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::touch(Origin::signed(1), 0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::refund(Origin::signed(1), 0, true)); + assert_ok!(Assets::touch(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::refund(RuntimeOrigin::signed(1), 0, true)); assert_eq!(Asset::::get(0).unwrap().accounts, 0); assert_eq!(hooks(), vec![Hook::Died(0, 1)]); @@ -144,17 +144,20 @@ fn refunding_calls_died_hook() { fn approval_lifecycle_works() { new_test_ext().execute_with(|| { // can't approve non-existent token - assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + assert_noop!( + Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), + Error::::Unknown + ); // so we create it :) - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 40)); + assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 40)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_eq!(Assets::balance(0, 1), 60); assert_eq!(Assets::balance(0, 3), 40); @@ -166,17 +169,20 @@ fn approval_lifecycle_works() { fn transfer_approved_all_funds() { new_test_ext().execute_with(|| { // can't approve non-existent token - assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Unknown); + assert_noop!( + Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), + Error::::Unknown + ); // so we create it :) - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_eq!(Balances::reserved_balance(&1), 1); // transfer the full amount, which should trigger auto-cleanup - assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); + assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 3), 50); @@ -187,20 +193,20 @@ fn transfer_approved_all_funds() { #[test] fn approval_deposits_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); let e = BalancesError::::InsufficientBalance; - assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), e); + assert_noop!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), e); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 50)); + assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 50)); assert_eq!(Balances::reserved_balance(&1), 0); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); - assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(Balances::reserved_balance(&1), 0); }); } @@ -208,72 +214,84 @@ fn approval_deposits_work() { #[test] fn cannot_transfer_more_than_approved() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); let e = Error::::Unapproved; - assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 51), e); + assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 51), e); }); } #[test] fn cannot_transfer_more_than_exists() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 101)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 101)); let e = Error::::BalanceLow; - assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 101), e); + assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 101), e); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_noop!(Assets::cancel_approval(Origin::signed(1), 1, 2), Error::::Unknown); - assert_noop!(Assets::cancel_approval(Origin::signed(2), 0, 2), Error::::Unknown); - assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 3), Error::::Unknown); + assert_noop!( + Assets::cancel_approval(RuntimeOrigin::signed(1), 1, 2), + Error::::Unknown + ); + assert_noop!( + Assets::cancel_approval(RuntimeOrigin::signed(2), 0, 2), + Error::::Unknown + ); + assert_noop!( + Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 3), + Error::::Unknown + ); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::cancel_approval(Origin::signed(1), 0, 2)); + assert_ok!(Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); - assert_noop!(Assets::cancel_approval(Origin::signed(1), 0, 2), Error::::Unknown); + assert_noop!( + Assets::cancel_approval(RuntimeOrigin::signed(1), 0, 2), + Error::::Unknown + ); }); } #[test] fn force_cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Asset::::get(0).unwrap().approvals, 1); let e = Error::::NoPermission; - assert_noop!(Assets::force_cancel_approval(Origin::signed(2), 0, 1, 2), e); + assert_noop!(Assets::force_cancel_approval(RuntimeOrigin::signed(2), 0, 1, 2), e); assert_noop!( - Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), + Assets::force_cancel_approval(RuntimeOrigin::signed(1), 1, 1, 2), Error::::Unknown ); assert_noop!( - Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), + Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 2, 2), Error::::Unknown ); assert_noop!( - Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), + Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 3), Error::::Unknown ); assert_eq!(Asset::::get(0).unwrap().approvals, 1); - assert_ok!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2)); + assert_ok!(Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); assert_noop!( - Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), + Assets::force_cancel_approval(RuntimeOrigin::signed(1), 0, 1, 2), Error::::Unknown ); }); @@ -283,42 +301,42 @@ fn force_cancel_approval_works() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); assert!(Asset::::contains_key(0)); - assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_ok!(Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0], vec![0], 12)); assert_eq!(Balances::reserved_balance(&1), 4); assert!(Metadata::::contains_key(0)); Balances::make_free_balance_be(&10, 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); Balances::make_free_balance_be(&20, 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 20, 100)); assert_eq!(Account::::iter_prefix(0).count(), 2); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Asset::::contains_key(0)); assert!(!Metadata::::contains_key(0)); assert_eq!(Account::::iter_prefix(0).count(), 0); - assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); assert!(Asset::::contains_key(0)); - assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0], vec![0], 12)); + assert_ok!(Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0], vec![0], 12)); assert_eq!(Balances::reserved_balance(&1), 4); assert!(Metadata::::contains_key(0)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 20, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 20, 100)); assert_eq!(Account::::iter_prefix(0).count(), 2); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(Origin::root(), 0, w)); + assert_ok!(Assets::destroy(RuntimeOrigin::root(), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Asset::::contains_key(0)); @@ -331,15 +349,15 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); let mut w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); // witness too low - assert_noop!(Assets::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + assert_noop!(Assets::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); // witness too high is okay though w.accounts += 2; w.sufficients += 2; - assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); }); } @@ -347,15 +365,15 @@ fn destroy_with_bad_witness_should_not_work() { fn destroy_should_refund_approvals() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 10, 100)); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 3, 50)); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 4, 50)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 3, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 4, 50)); assert_eq!(Balances::reserved_balance(&1), 3); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); // all approvals are removed @@ -366,69 +384,72 @@ fn destroy_should_refund_approvals() { #[test] fn non_providing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, false, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, false, 1)); Balances::make_free_balance_be(&0, 100); - assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 0, 100)); // Cannot mint into account 2 since it doesn't (yet) exist... - assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100), TokenError::CannotCreate); // ...or transfer... - assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(0), 0, 1, 50), + TokenError::CannotCreate + ); // ...or force-transfer assert_noop!( - Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), + Assets::force_transfer(RuntimeOrigin::signed(1), 0, 0, 1, 50), TokenError::CannotCreate ); Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::transfer(Origin::signed(0), 0, 1, 25)); - assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 0, 2, 25)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(0), 0, 1, 25)); + assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 0, 2, 25)); }); } #[test] fn min_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Asset::::get(0).unwrap().accounts, 1); // Cannot create a new account with a balance that is below minimum... - assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 9), TokenError::BelowMinimum); assert_noop!( - Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), + Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 9), TokenError::BelowMinimum ); // When deducting from an account to below minimum, it should be reaped. // Death by `transfer`. - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 91)); assert!(Assets::maybe_balance(0, 1).is_none()); assert_eq!(Assets::balance(0, 2), 100); assert_eq!(Asset::::get(0).unwrap().accounts, 1); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); // Death by `force_transfer`. - assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 2, 1, 91)); + assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 2, 1, 91)); assert!(Assets::maybe_balance(0, 2).is_none()); assert_eq!(Assets::balance(0, 1), 100); assert_eq!(Asset::::get(0).unwrap().accounts, 1); assert_eq!(take_hooks(), vec![Hook::Died(0, 2)]); // Death by `burn`. - assert_ok!(Assets::burn(Origin::signed(1), 0, 1, 91)); + assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, 91)); assert!(Assets::maybe_balance(0, 1).is_none()); assert_eq!(Asset::::get(0).unwrap().accounts, 0); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); // Death by `transfer_approved`. - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 100)); - assert_ok!(Assets::transfer_approved(Origin::signed(2), 0, 1, 3, 91)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 100)); + assert_ok!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 3, 91)); assert_eq!(take_hooks(), vec![Hook::Died(0, 1)]); }); } @@ -436,17 +457,17 @@ fn min_balance_should_work() { #[test] fn querying_total_supply_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 3, 31)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 19); assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::burn(Origin::signed(1), 0, 3, u64::MAX)); + assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 3, u64::MAX)); assert_eq!(Assets::total_supply(0), 69); }); } @@ -454,10 +475,10 @@ fn querying_total_supply_should_work() { #[test] fn transferring_amount_below_available_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); }); @@ -466,14 +487,14 @@ fn transferring_amount_below_available_balance_should_work() { #[test] fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_noop!( - Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), + Assets::transfer_keep_alive(RuntimeOrigin::signed(1), 0, 2, 91), Error::::BalanceLow ); - assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); + assert_ok!(Assets::transfer_keep_alive(RuntimeOrigin::signed(1), 0, 2, 90)); assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); assert!(hooks().is_empty()); @@ -483,26 +504,26 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { #[test] fn transferring_frozen_user_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze(Origin::signed(1), 0, 1)); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); - assert_ok!(Assets::thaw(Origin::signed(1), 0, 1)); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze(RuntimeOrigin::signed(1), 0, 1)); + assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); }); } #[test] fn transferring_frozen_asset_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); - assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze_asset(RuntimeOrigin::signed(1), 0)); + assert_noop!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50), Error::::Frozen); + assert_ok!(Assets::thaw_asset(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); }); } @@ -510,36 +531,48 @@ fn transferring_frozen_asset_should_not_work() { fn approve_transfer_frozen_asset_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::freeze_asset(Origin::signed(1), 0)); - assert_noop!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50), Error::::Frozen); - assert_ok!(Assets::thaw_asset(Origin::signed(1), 0)); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze_asset(RuntimeOrigin::signed(1), 0)); + assert_noop!( + Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50), + Error::::Frozen + ); + assert_ok!(Assets::thaw_asset(RuntimeOrigin::signed(1), 0)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_noop!( + Assets::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!( + Assets::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), + Error::::NoPermission + ); + assert_noop!(Assets::freeze(RuntimeOrigin::signed(2), 0, 1), Error::::NoPermission); + assert_noop!(Assets::thaw(RuntimeOrigin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!( + Assets::mint(RuntimeOrigin::signed(2), 0, 2, 100), + Error::::NoPermission + ); assert_noop!( - Assets::transfer_ownership(Origin::signed(2), 0, 2), + Assets::burn(RuntimeOrigin::signed(2), 0, 1, 100), Error::::NoPermission ); - assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); - assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); - assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); - assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); - assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); assert_noop!( - Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), + Assets::force_transfer(RuntimeOrigin::signed(2), 0, 1, 2, 100), Error::::NoPermission ); let w = Asset::::get(0).unwrap().destroy_witness(); - assert_noop!(Assets::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Assets::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); }); } @@ -548,22 +581,28 @@ fn transfer_owner_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); - assert_ok!(Assets::create(Origin::signed(1), 0, 1, 1)); + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); assert_eq!(Balances::reserved_balance(&1), 1); - assert_ok!(Assets::transfer_ownership(Origin::signed(1), 0, 2)); + assert_ok!(Assets::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(Balances::reserved_balance(&2), 1); assert_eq!(Balances::reserved_balance(&1), 0); assert_noop!( - Assets::transfer_ownership(Origin::signed(1), 0, 1), + Assets::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), Error::::NoPermission ); // Set metadata now and make sure that deposit gets transferred back. - assert_ok!(Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12)); - assert_ok!(Assets::transfer_ownership(Origin::signed(2), 0, 1)); + assert_ok!(Assets::set_metadata( + RuntimeOrigin::signed(2), + 0, + vec![0u8; 10], + vec![0u8; 10], + 12 + )); + assert_ok!(Assets::transfer_ownership(RuntimeOrigin::signed(2), 0, 1)); assert_eq!(Balances::reserved_balance(&1), 22); assert_eq!(Balances::reserved_balance(&2), 0); }); @@ -572,27 +611,27 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::set_team(Origin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Assets::mint(Origin::signed(2), 0, 2, 100)); - assert_ok!(Assets::freeze(Origin::signed(4), 0, 2)); - assert_ok!(Assets::thaw(Origin::signed(3), 0, 2)); - assert_ok!(Assets::force_transfer(Origin::signed(3), 0, 2, 3, 100)); - assert_ok!(Assets::burn(Origin::signed(3), 0, 3, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Assets::mint(RuntimeOrigin::signed(2), 0, 2, 100)); + assert_ok!(Assets::freeze(RuntimeOrigin::signed(4), 0, 2)); + assert_ok!(Assets::thaw(RuntimeOrigin::signed(3), 0, 2)); + assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(3), 0, 2, 3, 100)); + assert_ok!(Assets::burn(RuntimeOrigin::signed(3), 0, 3, 100)); }); } #[test] fn transferring_to_frozen_account_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 1), 100); assert_eq!(Assets::balance(0, 2), 100); - assert_ok!(Assets::freeze(Origin::signed(1), 0, 2)); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::freeze(RuntimeOrigin::signed(1), 0, 2)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 2), 150); }); } @@ -600,26 +639,32 @@ fn transferring_to_frozen_account_should_work() { #[test] fn transferring_amount_more_than_available_balance_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(Assets::balance(0, 1), 50); assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); + assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::NoAccount); - assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 51), Error::::BalanceLow); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(1), 0, 1, 50), + Error::::NoAccount + ); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 51), + Error::::BalanceLow + ); }); } #[test] fn transferring_less_than_one_unit_is_fine() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 0)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 0)); // `ForceCreated` and `Issued` but no `Transferred` event. assert_eq!(System::events().len(), 2); }); @@ -628,20 +673,23 @@ fn transferring_less_than_one_unit_is_fine() { #[test] fn transferring_more_units_than_total_supply_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 101), + Error::::BalanceLow + ); }); } #[test] fn burning_asset_balance_with_positive_balance_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::burn(Origin::signed(1), 0, 1, u64::MAX)); + assert_ok!(Assets::burn(RuntimeOrigin::signed(1), 0, 1, u64::MAX)); assert_eq!(Assets::balance(0, 1), 0); }); } @@ -649,10 +697,13 @@ fn burning_asset_balance_with_positive_balance_should_work() { #[test] fn burning_asset_balance_with_zero_balance_does_nothing() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::MAX), Error::::NoAccount); + assert_noop!( + Assets::burn(RuntimeOrigin::signed(1), 0, 2, u64::MAX), + Error::::NoAccount + ); assert_eq!(Assets::balance(0, 2), 0); assert_eq!(Assets::total_supply(0), 100); }); @@ -663,48 +714,69 @@ fn set_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown asset assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), + Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), Error::::Unknown, ); - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); // Cannot add metadata to unowned asset assert_noop!( - Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), + Assets::set_metadata(RuntimeOrigin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), Error::::NoPermission, ); // Cannot add oversized metadata assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), + Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), Error::::BadMetadata, ); assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), + Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), Error::::BadMetadata, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12)); + assert_ok!(Assets::set_metadata( + RuntimeOrigin::signed(1), + 0, + vec![0u8; 10], + vec![0u8; 10], + 12 + )); assert_eq!(Balances::free_balance(&1), 9); // Update deposit - assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 5], 12)); + assert_ok!(Assets::set_metadata( + RuntimeOrigin::signed(1), + 0, + vec![0u8; 10], + vec![0u8; 5], + 12 + )); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 15], 12)); + assert_ok!(Assets::set_metadata( + RuntimeOrigin::signed(1), + 0, + vec![0u8; 10], + vec![0u8; 15], + 12 + )); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), + Assets::set_metadata(RuntimeOrigin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), BalancesError::::InsufficientBalance, ); // Clear Metadata assert!(Metadata::::contains_key(0)); - assert_noop!(Assets::clear_metadata(Origin::signed(2), 0), Error::::NoPermission); - assert_noop!(Assets::clear_metadata(Origin::signed(1), 1), Error::::Unknown); - assert_ok!(Assets::clear_metadata(Origin::signed(1), 0)); + assert_noop!( + Assets::clear_metadata(RuntimeOrigin::signed(2), 0), + Error::::NoPermission + ); + assert_noop!(Assets::clear_metadata(RuntimeOrigin::signed(1), 1), Error::::Unknown); + assert_ok!(Assets::clear_metadata(RuntimeOrigin::signed(1), 0)); assert!(!Metadata::::contains_key(0)); }); } @@ -713,13 +785,13 @@ fn set_metadata_should_work() { #[test] fn destroy_calls_died_hooks() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 50)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 50)); // Create account 1 and 2. - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); // Destroy the asset. let w = Asset::::get(0).unwrap().destroy_witness(); - assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); + assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); // Asset is gone and accounts 1 and 2 died. assert!(Asset::::get(0).is_none()); @@ -730,36 +802,39 @@ fn destroy_calls_died_hooks() { #[test] fn freezer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); // freeze 50 of it. set_frozen_balance(0, 1, 50); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 20)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 20)); // cannot transfer another 21 away as this would take the non-frozen balance (30) to below // the minimum balance (10). - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 21), Error::::BalanceLow); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 21), + Error::::BalanceLow + ); // create an approved transfer... Balances::make_free_balance_be(&1, 100); - assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 2, 50)); let e = Error::::BalanceLow; // ...but that wont work either: - assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 2, 21), e); + assert_noop!(Assets::transfer_approved(RuntimeOrigin::signed(2), 0, 1, 2, 21), e); // a force transfer won't work also. let e = Error::::BalanceLow; - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21), e); + assert_noop!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 21), e); // reduce it to only 49 frozen... set_frozen_balance(0, 1, 49); // ...and it's all good: - assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21)); + assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 1, 2, 21)); // and if we clear it, we can remove the account completely. clear_frozen_balance(0, 1); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 50)); assert_eq!(hooks(), vec![Hook::Died(0, 1)]); }); } @@ -769,7 +844,7 @@ fn imbalances_should_work() { use frame_support::traits::tokens::fungibles::Balanced; new_test_ext().execute_with(|| { - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); let imb = Assets::issue(0, 100); assert_eq!(Assets::total_supply(0), 100); @@ -792,9 +867,9 @@ fn imbalances_should_work() { fn force_metadata_should_work() { new_test_ext().execute_with(|| { // force set metadata works - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_set_metadata( - Origin::root(), + RuntimeOrigin::root(), 0, vec![0u8; 10], vec![0u8; 10], @@ -806,7 +881,7 @@ fn force_metadata_should_work() { // overwrites existing metadata let asset_original_metadata = Metadata::::get(0); assert_ok!(Assets::force_set_metadata( - Origin::root(), + RuntimeOrigin::root(), 0, vec![1u8; 10], vec![1u8; 10], @@ -817,7 +892,14 @@ fn force_metadata_should_work() { // attempt to set metadata for non-existent asset class assert_noop!( - Assets::force_set_metadata(Origin::root(), 1, vec![0u8; 10], vec![0u8; 10], 8, false), + Assets::force_set_metadata( + RuntimeOrigin::root(), + 1, + vec![0u8; 10], + vec![0u8; 10], + 8, + false + ), Error::::Unknown ); @@ -825,7 +907,7 @@ fn force_metadata_should_work() { let limit = 50usize; assert_noop!( Assets::force_set_metadata( - Origin::root(), + RuntimeOrigin::root(), 0, vec![0u8; limit + 1], vec![0u8; 10], @@ -836,7 +918,7 @@ fn force_metadata_should_work() { ); assert_noop!( Assets::force_set_metadata( - Origin::root(), + RuntimeOrigin::root(), 0, vec![0u8; 10], vec![0u8; limit + 1], @@ -848,11 +930,14 @@ fn force_metadata_should_work() { // force clear metadata works assert!(Metadata::::contains_key(0)); - assert_ok!(Assets::force_clear_metadata(Origin::root(), 0)); + assert_ok!(Assets::force_clear_metadata(RuntimeOrigin::root(), 0)); assert!(!Metadata::::contains_key(0)); // Error handles clearing non-existent asset class - assert_noop!(Assets::force_clear_metadata(Origin::root(), 1), Error::::Unknown); + assert_noop!( + Assets::force_clear_metadata(RuntimeOrigin::root(), 1), + Error::::Unknown + ); }); } @@ -861,34 +946,57 @@ fn force_asset_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 10); Balances::make_free_balance_be(&2, 10); - assert_ok!(Assets::create(Origin::signed(1), 0, 1, 30)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 50)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 150)); + assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 30)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 50)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 150)); // force asset status to change min_balance > balance - assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 100, true, false)); + assert_ok!(Assets::force_asset_status( + RuntimeOrigin::root(), + 0, + 1, + 1, + 1, + 1, + 100, + true, + false + )); assert_eq!(Assets::balance(0, 1), 50); // account can recieve assets for balance < min_balance - assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 1)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); // account on outbound transfer will cleanup for balance < min_balance - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 1)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 1)); assert_eq!(Assets::balance(0, 1), 0); // won't create new account with balance below min_balance - assert_noop!(Assets::transfer(Origin::signed(2), 0, 3, 50), TokenError::BelowMinimum); + assert_noop!( + Assets::transfer(RuntimeOrigin::signed(2), 0, 3, 50), + TokenError::BelowMinimum + ); // force asset status will not execute for non-existent class assert_noop!( - Assets::force_asset_status(Origin::root(), 1, 1, 1, 1, 1, 90, true, false), + Assets::force_asset_status(RuntimeOrigin::root(), 1, 1, 1, 1, 1, 90, true, false), Error::::Unknown ); // account drains to completion when funds dip below min_balance - assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 110, true, false)); - assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 110)); + assert_ok!(Assets::force_asset_status( + RuntimeOrigin::root(), + 0, + 1, + 1, + 1, + 1, + 110, + true, + false + )); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(2), 0, 1, 110)); assert_eq!(Assets::balance(0, 1), 200); assert_eq!(Assets::balance(0, 2), 0); assert_eq!(Assets::total_supply(0), 200); @@ -901,9 +1009,9 @@ fn balance_conversion_should_work() { use frame_support::traits::tokens::BalanceConversion; let id = 42; - assert_ok!(Assets::force_create(Origin::root(), id, 1, true, 10)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), id, 1, true, 10)); let not_sufficient = 23; - assert_ok!(Assets::force_create(Origin::root(), not_sufficient, 1, false, 10)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), not_sufficient, 1, false, 10)); assert_eq!( BalanceToAssetBalance::::to_asset_balance(100, 1234), @@ -938,9 +1046,9 @@ fn assets_from_genesis_should_exist() { fn querying_name_symbol_and_decimals_should_work() { new_test_ext().execute_with(|| { use frame_support::traits::tokens::fungibles::metadata::Inspect; - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::force_set_metadata( - Origin::root(), + RuntimeOrigin::root(), 0, vec![0u8; 10], vec![1u8; 10], @@ -957,8 +1065,8 @@ fn querying_name_symbol_and_decimals_should_work() { fn querying_allowance_should_work() { new_test_ext().execute_with(|| { use frame_support::traits::tokens::fungibles::approvals::{Inspect, Mutate}; - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, 100)); Balances::make_free_balance_be(&1, 1); assert_ok!(Assets::approve(0, &1, &2, 50)); assert_eq!(Assets::allowance(0, &1, &2), 50); @@ -972,9 +1080,9 @@ fn querying_allowance_should_work() { fn transfer_large_asset() { new_test_ext().execute_with(|| { let amount = u64::pow(2, 63) + 2; - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::mint(Origin::signed(1), 0, 1, amount)); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, amount - 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 1, amount)); + assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, amount - 1)); }) } @@ -982,9 +1090,9 @@ fn transfer_large_asset() { fn querying_roles_should_work() { new_test_ext().execute_with(|| { use frame_support::traits::tokens::fungibles::roles::Inspect; - assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); assert_ok!(Assets::set_team( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, // Issuer 2, diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index d3d8b37d82794..5ad63dfce55b4 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -37,7 +37,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -100,7 +100,7 @@ fn two_party_successful_swap() { // A creates the swap on chain1. chain1.execute_with(|| { AtomicSwap::create_swap( - Origin::signed(A), + RuntimeOrigin::signed(A), B, hashed_proof, BalanceSwapAction::new(50), @@ -115,7 +115,7 @@ fn two_party_successful_swap() { // B creates the swap on chain2. chain2.execute_with(|| { AtomicSwap::create_swap( - Origin::signed(B), + RuntimeOrigin::signed(B), A, hashed_proof, BalanceSwapAction::new(75), @@ -129,8 +129,12 @@ fn two_party_successful_swap() { // A reveals the proof and claims the swap on chain2. chain2.execute_with(|| { - AtomicSwap::claim_swap(Origin::signed(A), proof.to_vec(), BalanceSwapAction::new(75)) - .unwrap(); + AtomicSwap::claim_swap( + RuntimeOrigin::signed(A), + proof.to_vec(), + BalanceSwapAction::new(75), + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 + 75); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -138,8 +142,12 @@ fn two_party_successful_swap() { // B use the revealed proof to claim the swap on chain1. chain1.execute_with(|| { - AtomicSwap::claim_swap(Origin::signed(B), proof.to_vec(), BalanceSwapAction::new(50)) - .unwrap(); + AtomicSwap::claim_swap( + RuntimeOrigin::signed(B), + proof.to_vec(), + BalanceSwapAction::new(50), + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200 + 50); diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 673190c95f2e1..07fa9aa680e80 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 3505f20a05929..b642a9ac283f2 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -235,7 +235,7 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 3f484ef24eec0..c08e773abe3a7 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -468,7 +468,7 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -738,7 +738,7 @@ mod tests { System::reset_events(); System::initialize(¤t_depth, &parent_hash, &Default::default()); Authorship::on_initialize(current_depth); - Authorship::set_uncles(Origin::none(), uncles).unwrap(); + Authorship::set_uncles(RuntimeOrigin::none(), uncles).unwrap(); Authorship::on_finalize(current_depth); max_item_count = std::cmp::max(max_item_count, ::Uncles::get().len()); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index fb251457db9ca..8264cd11ddc88 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -74,7 +74,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 1015ada7f0185..8d2a9b326cd0f 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -289,7 +289,7 @@ fn can_enact_next_config() { assert_eq!(NextEpochConfig::::get(), Some(next_config.clone())); Babe::plan_config_change( - Origin::root(), + RuntimeOrigin::root(), NextConfigDescriptor::V1 { c: next_next_config.c, allowed_slots: next_next_config.allowed_slots, @@ -323,15 +323,15 @@ fn only_root_can_enact_config_change() { let next_config = NextConfigDescriptor::V1 { c: (1, 4), allowed_slots: AllowedSlots::PrimarySlots }; - let res = Babe::plan_config_change(Origin::none(), next_config.clone()); + let res = Babe::plan_config_change(RuntimeOrigin::none(), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change(Origin::signed(1), next_config.clone()); + let res = Babe::plan_config_change(RuntimeOrigin::signed(1), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change(Origin::root(), next_config); + let res = Babe::plan_config_change(RuntimeOrigin::root(), next_config); assert!(res.is_ok()); }); @@ -464,7 +464,7 @@ fn report_equivocation_current_session_works() { // report the equivocation Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -536,7 +536,7 @@ fn report_equivocation_old_session_works() { // report the equivocation Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -588,7 +588,7 @@ fn report_equivocation_invalid_key_owner_proof() { key_owner_proof.session = 0; assert_err!( Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof.clone()), key_owner_proof ), @@ -608,7 +608,7 @@ fn report_equivocation_invalid_key_owner_proof() { assert_err!( Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -642,7 +642,7 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation = |equivocation_proof| { assert_err!( Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), @@ -784,7 +784,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { // we submit the report Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -857,7 +857,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation. let post_info = Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) @@ -871,7 +871,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation again which is invalid now since it is // duplicate. let post_info = Babe::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) diff --git a/frame/bags-list/src/mock.rs b/frame/bags-list/src/mock.rs index f6bde63b3867c..8cc96a988e72a 100644 --- a/frame/bags-list/src/mock.rs +++ b/frame/bags-list/src/mock.rs @@ -49,7 +49,7 @@ impl frame_election_provider_support::ScoreProvider for StakingMock { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/bags-list/src/tests.rs b/frame/bags-list/src/tests.rs index 01c1642f882c1..63a395ed0d65a 100644 --- a/frame/bags-list/src/tests.rs +++ b/frame/bags-list/src/tests.rs @@ -37,7 +37,7 @@ mod pallet { // when increasing score to the level of non-existent bag assert_eq!(List::::get_score(&42).unwrap(), 20); StakingMock::set_score_of(&42, 2_000); - assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); assert_eq!(List::::get_score(&42).unwrap(), 2_000); // then a new bag is created and the id moves into it @@ -48,7 +48,7 @@ mod pallet { // when decreasing score within the range of the current bag StakingMock::set_score_of(&42, 1_001); - assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); // then the id does not move assert_eq!( @@ -60,7 +60,7 @@ mod pallet { // when reducing score to the level of a non-existent bag StakingMock::set_score_of(&42, 30); - assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); // then a new bag is created and the id moves into it assert_eq!( @@ -71,7 +71,7 @@ mod pallet { // when increasing score to the level of a pre-existing bag StakingMock::set_score_of(&42, 500); - assert_ok!(BagsList::rebag(Origin::signed(0), 42)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 42)); // then the id moves into that bag assert_eq!( @@ -92,7 +92,7 @@ mod pallet { // when StakingMock::set_score_of(&4, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 4)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 4)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4]), (1_000, vec![2, 3])]); @@ -100,7 +100,7 @@ mod pallet { // when StakingMock::set_score_of(&3, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 3)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3]), (1_000, vec![2])]); @@ -111,7 +111,7 @@ mod pallet { // when StakingMock::set_score_of(&2, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 2)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 4, 3, 2])]); @@ -126,7 +126,7 @@ mod pallet { ExtBuilder::default().build_and_execute(|| { // when StakingMock::set_score_of(&2, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 2)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2]), (1_000, vec![3, 4])]); @@ -134,7 +134,7 @@ mod pallet { // when StakingMock::set_score_of(&3, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 3)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3]), (1_000, vec![4])]); @@ -142,7 +142,7 @@ mod pallet { // when StakingMock::set_score_of(&4, 10); - assert_ok!(BagsList::rebag(Origin::signed(0), 4)); + assert_ok!(BagsList::rebag(RuntimeOrigin::signed(0), 4)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1, 2, 3, 4])]); @@ -159,12 +159,15 @@ mod pallet { assert!(!node_3.is_misplaced(500)); // then calling rebag on account 3 with score 500 is a noop - assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 3), Ok(()))); + assert_storage_noop!(assert_eq!(BagsList::rebag(RuntimeOrigin::signed(0), 3), Ok(()))); // when account 42 is not in the list assert!(!BagsList::contains(&42)); // then rebag-ing account 42 is an error - assert_storage_noop!(assert!(matches!(BagsList::rebag(Origin::signed(0), 42), Err(_)))); + assert_storage_noop!(assert!(matches!( + BagsList::rebag(RuntimeOrigin::signed(0), 42), + Err(_) + ))); }); } @@ -200,7 +203,7 @@ mod pallet { ); // any rebag is noop. - assert_storage_noop!(assert_eq!(BagsList::rebag(Origin::signed(0), 1), Ok(()))); + assert_storage_noop!(assert_eq!(BagsList::rebag(RuntimeOrigin::signed(0), 1), Ok(()))); }) } @@ -214,7 +217,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(20, vec![10, 11])]); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(11), 10)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(11), 10)); // then assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); @@ -231,7 +234,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(11), 10)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(11), 10)); // then assert_eq!(List::::get_bags(), vec![(20, vec![11, 10])]); @@ -247,7 +250,7 @@ mod pallet { StakingMock::set_score_of(&3, 999); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(4), 3)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(4), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3, 5])]); @@ -268,7 +271,7 @@ mod pallet { StakingMock::set_score_of(&5, 999); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(3), 5)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(3), 5)); // then assert_eq!( @@ -287,7 +290,7 @@ mod pallet { StakingMock::set_score_of(&2, 999); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(3), 2)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(3), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4])]); @@ -303,7 +306,7 @@ mod pallet { StakingMock::set_score_of(&3, 999); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(4), 3)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(4), 3)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3])]); @@ -319,7 +322,7 @@ mod pallet { StakingMock::set_score_of(&2, 999); // when - assert_ok!(BagsList::put_in_front_of(Origin::signed(5), 2)); + assert_ok!(BagsList::put_in_front_of(RuntimeOrigin::signed(5), 2)); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![5, 2, 3, 4])]); @@ -335,7 +338,7 @@ mod pallet { StakingMock::set_score_of(&4, 999); // when - BagsList::put_in_front_of(Origin::signed(2), 4).unwrap(); + BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4, 5])]); @@ -349,7 +352,7 @@ mod pallet { assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 3, 4, 5])]); // when - BagsList::put_in_front_of(Origin::signed(3), 5).unwrap(); + BagsList::put_in_front_of(RuntimeOrigin::signed(3), 5).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![2, 4, 3, 5])]); @@ -365,7 +368,7 @@ mod pallet { StakingMock::set_score_of(&4, 999); // when - BagsList::put_in_front_of(Origin::signed(2), 4).unwrap(); + BagsList::put_in_front_of(RuntimeOrigin::signed(2), 4).unwrap(); // then assert_eq!(List::::get_bags(), vec![(10, vec![1]), (1_000, vec![3, 2, 4])]); @@ -382,7 +385,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(Origin::signed(3), 2), + BagsList::put_in_front_of(RuntimeOrigin::signed(3), 2), crate::pallet::Error::::List(ListError::NotHeavier) ); }); @@ -396,7 +399,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(Origin::signed(3), 4), + BagsList::put_in_front_of(RuntimeOrigin::signed(3), 4), crate::pallet::Error::::List(ListError::NotHeavier) ); }); @@ -413,7 +416,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(Origin::signed(5), 4), + BagsList::put_in_front_of(RuntimeOrigin::signed(5), 4), crate::pallet::Error::::List(ListError::NodeNotFound) ); }); @@ -427,7 +430,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(Origin::signed(4), 5), + BagsList::put_in_front_of(RuntimeOrigin::signed(4), 5), crate::pallet::Error::::List(ListError::NodeNotFound) ); }); @@ -441,7 +444,7 @@ mod pallet { // then assert_noop!( - BagsList::put_in_front_of(Origin::signed(4), 1), + BagsList::put_in_front_of(RuntimeOrigin::signed(4), 1), crate::pallet::Error::::List(ListError::NotInSameBag) ); }); diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 9c641f6b38a22..faf53e16cb028 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -825,7 +825,7 @@ macro_rules! decl_tests { /* User has no reference counter, so they can die in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive @@ -833,35 +833,35 @@ macro_rules! decl_tests { System::assert_last_event(RuntimeEvent::Balances(crate::Event::Slashed { who: 1, amount: 900 })); // SCENARIO: Slash will kill account because not enough balance left. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(950), 0)); // Account is killed assert!(!System::account_exists(&1)); // SCENARIO: Over-slash will kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); // Slashed full free_balance, and reports 300 not slashed assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1000), 300)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, and kill. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is dead because 50 reserved balance is not enough to keep alive assert!(!System::account_exists(&1)); // SCENARIO: Over-slash can take as much as possible from reserved, kill, and report missing amount. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); // Account is super dead @@ -870,7 +870,7 @@ macro_rules! decl_tests { /* User will now have a reference counter on them, keeping them alive in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests // Slashed completed in full assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); @@ -878,35 +878,35 @@ macro_rules! decl_tests { assert!(System::account_exists(&1)); // SCENARIO: Slash will take as much as possible without killing account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); // Slashed completed in full assert_eq!(Balances::slash(&1, 950), (NegativeImbalance::new(900), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash will not kill account, and report missing slash amount. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 0)); // Slashed full free_balance minus ED, and reports 400 not slashed assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(900), 400)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 400)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 400)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1300), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take from reserved, but keep alive. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 350)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 350)); // Slashed full free_balance and 250 of reserved balance to leave ED assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1250), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash can take as much as possible from reserved and report missing amount. - assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 250)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 1_000, 250)); // Slashed full free_balance and 300 of reserved balance assert_eq!(Balances::slash(&1, 1_300), (NegativeImbalance::new(1150), 150)); // Account is still alive @@ -926,28 +926,28 @@ macro_rules! decl_tests { /* User has no reference counter, so they can die in these scenarios */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Slash would kill account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(1_000), 0)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash would kill account, and reports left over slash. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is dead assert!(!System::account_exists(&1)); // SCENARIO: Over-slash does not take from free balance. - assert_ok!(Balances::set_balance(Origin::root(), 1, 300, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 300, 1_000)); // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is alive because of free balance @@ -956,7 +956,7 @@ macro_rules! decl_tests { /* User has a reference counter, so they cannot die */ // SCENARIO: Slash would not kill account. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); assert_ok!(System::inc_consumers(&1)); // <-- Reference counter added here is enough for all tests // Slashed completed in full assert_eq!(Balances::slash_reserved(&1, 900), (NegativeImbalance::new(900), 0)); @@ -964,21 +964,21 @@ macro_rules! decl_tests { assert!(System::account_exists(&1)); // SCENARIO: Slash as much as possible without killing. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_000), (NegativeImbalance::new(950), 50)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash reports correctly, where reserved is needed to keep alive. - assert_ok!(Balances::set_balance(Origin::root(), 1, 50, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 50, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(950), 350)); // Account is still alive assert!(System::account_exists(&1)); // SCENARIO: Over-slash reports correctly, where full reserved is removed. - assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 1_000)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 1_000)); // Slashed as much as possible assert_eq!(Balances::slash_reserved(&1, 1_300), (NegativeImbalance::new(1_000), 300)); // Account is still alive @@ -1018,7 +1018,7 @@ macro_rules! decl_tests { .existential_deposit(100) .build() .execute_with(|| { - assert_ok!(Balances::set_balance(Origin::root(), 1, 100, 100)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 100, 100)); assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 100); @@ -1032,32 +1032,32 @@ macro_rules! decl_tests { .build() .execute_with(|| { // setup - assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); // transfer all and allow death assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); assert_eq!(Balances::total_balance(&1), 0); assert_eq!(Balances::total_balance(&2), 200); // setup - assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 0)); - assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); // transfer all and keep alive assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); assert_eq!(Balances::total_balance(&1), 100); assert_eq!(Balances::total_balance(&2), 100); // setup - assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); // transfer all and allow death w/ reserved assert_ok!(Balances::transfer_all(Some(1).into(), 2, false)); assert_eq!(Balances::total_balance(&1), 0); assert_eq!(Balances::total_balance(&2), 200); // setup - assert_ok!(Balances::set_balance(Origin::root(), 1, 200, 10)); - assert_ok!(Balances::set_balance(Origin::root(), 2, 0, 0)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1, 200, 10)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 0, 0)); // transfer all and keep alive w/ reserved assert_ok!(Balances::transfer_all(Some(1).into(), 2, true)); assert_eq!(Balances::total_balance(&1), 100); @@ -1281,7 +1281,7 @@ macro_rules! decl_tests { let _ = Balances::deposit_creating(&1, 111); assert_ok!(frame_system::Pallet::::inc_consumers(&1)); assert_noop!( - Balances::set_balance(Origin::root(), 1, 0, 0), + Balances::set_balance(RuntimeOrigin::root(), 1, 0, 0), DispatchError::ConsumerRemaining, ); }); @@ -1291,7 +1291,7 @@ macro_rules! decl_tests { fn set_balance_handles_total_issuance() { <$ext_builder>::default().build().execute_with(|| { let old_total_issuance = Balances::total_issuance(); - assert_ok!(Balances::set_balance(Origin::root(), 1337, 69, 42)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 1337, 69, 42)); assert_eq!(Balances::total_issuance(), old_total_issuance + 69 + 42); assert_eq!(Balances::total_balance(&1337), 69 + 42); assert_eq!(Balances::free_balance(&1337), 69); diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 1276a812c7861..1e38d611773d4 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -55,7 +55,7 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index b391b03ed6b6a..e080eafb66067 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 6fda1697dcfeb..fa2eb0e488e7d 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -59,7 +59,7 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 792b775b77998..602d0aa5fe1a6 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -67,7 +67,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs index 7c4cd1af01533..ad3a672333dd5 100644 --- a/frame/beefy/src/mock.rs +++ b/frame/beefy/src/mock.rs @@ -62,7 +62,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 38aa7b261cb01..6a310330cafb1 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -176,7 +176,7 @@ pub mod mock { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index d96ed4dd17b6e..4401fb059f90b 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -112,14 +112,14 @@ macro_rules! whitelist { /// foo { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: _(Origin::Signed(caller), vec![0u8; l]) +/// }: _(RuntimeOrigin::Signed(caller), vec![0u8; l]) /// /// // second dispatchable: bar; this is a root dispatchable and accepts a `u8` vector of size /// // `l`. /// // In this case, we explicitly name the call using `bar` instead of `_`. /// bar { /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: bar(Origin::Root, vec![0u8; l]) +/// }: bar(RuntimeOrigin::Root, vec![0u8; l]) /// /// // third dispatchable: baz; this is a user dispatchable. It isn't dependent on length like the /// // other two but has its own complexity `c` that needs setting up. It uses `caller` (in the @@ -128,20 +128,20 @@ macro_rules! whitelist { /// baz1 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c(&caller, c); -/// }: baz(Origin::Signed(caller)) +/// }: baz(RuntimeOrigin::Signed(caller)) /// /// // this is a second benchmark of the baz dispatchable with a different setup. /// baz2 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let c = 0 .. 10 => setup_c_in_some_other_way(&caller, c); -/// }: baz(Origin::Signed(caller)) +/// }: baz(RuntimeOrigin::Signed(caller)) /// /// // You may optionally specify the origin type if it can't be determined automatically like /// // this. /// baz3 { /// let caller = account::(b"caller", 0, benchmarks_seed); /// let l in 1 .. MAX_LENGTH => initialize_l(l); -/// }: baz(Origin::Signed(caller), vec![0u8; l]) +/// }: baz(RuntimeOrigin::Signed(caller), vec![0u8; l]) /// /// // this is benchmarking some code that is not a dispatchable. /// populate_a_set { @@ -617,7 +617,7 @@ macro_rules! to_origin { $origin.into() }; ($origin:expr, $origin_type:ty) => { - <::Origin as From<$origin_type>>::from($origin) + <::RuntimeOrigin as From<$origin_type>>::from($origin) }; } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index fd7e0cce03c22..3bbd06ea64f84 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -90,7 +90,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -148,7 +148,7 @@ mod benchmarks { crate::benchmarks! { where_clause { where - crate::tests::Origin: From::AccountId>>, + crate::tests::RuntimeOrigin: From::AccountId>>, } set_value { diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index 7617696a949cf..1fb122f031a76 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -41,7 +41,7 @@ mod pallet_test { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call where - origin: T::Origin, ::OtherEvent: Into<>::RuntimeEvent> + origin: T::RuntimeOrigin, ::OtherEvent: Into<>::RuntimeEvent> { #[weight = 0] fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { @@ -91,7 +91,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 9dc920f6cca28..6ccd587cebc10 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -98,7 +98,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, bounty_id) + }: _(approve_origin, bounty_id) propose_curator { setup_pot_account::(); @@ -110,7 +110,7 @@ benchmarks_instance_pallet! { Bounties::::approve_bounty(approve_origin, bounty_id)?; Treasury::::on_initialize(T::BlockNumber::zero()); let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, bounty_id, curator_lookup, fee) + }: _(approve_origin, bounty_id, curator_lookup, fee) // Worst case when curator is inactive and any sender unassigns the curator. unassign_curator { @@ -171,7 +171,7 @@ benchmarks_instance_pallet! { Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::::get() - 1; let approve_origin = T::ApproveOrigin::successful_origin(); - }: close_bounty(approve_origin, bounty_id) + }: close_bounty(approve_origin, bounty_id) close_bounty_active { setup_pot_account::(); @@ -179,7 +179,7 @@ benchmarks_instance_pallet! { Treasury::::on_initialize(T::BlockNumber::zero()); let bounty_id = BountyCount::::get() - 1; let approve_origin = T::ApproveOrigin::successful_origin(); - }: close_bounty(approve_origin, bounty_id) + }: close_bounty(approve_origin, bounty_id) verify { assert_last_event::(Event::BountyCanceled { index: bounty_id }.into()) } diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index f1d794eb89dbe..68aa56ccdde7f 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -68,7 +68,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -229,7 +229,7 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -238,7 +238,7 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -248,7 +248,7 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(Origin::signed(2), 100, 3), + Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), TreasuryError::InsufficientProposersBalance, ); }); @@ -259,8 +259,8 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -286,8 +286,8 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -300,9 +300,12 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_noop!( + Treasury::reject_proposal(RuntimeOrigin::root(), 0), + TreasuryError::InvalidIndex + ); }); } @@ -310,7 +313,7 @@ fn reject_already_rejected_spend_proposal_fails() { fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::reject_proposal(Origin::root(), 0), + Treasury::reject_proposal(RuntimeOrigin::root(), 0), pallet_treasury::Error::::InvalidIndex ); }); @@ -319,7 +322,10 @@ fn reject_non_existent_spend_proposal_fails() { #[test] fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + assert_noop!( + Treasury::approve_proposal(RuntimeOrigin::root(), 0), + TreasuryError::InvalidIndex + ); }); } @@ -328,9 +334,12 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), TreasuryError::InvalidIndex); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_noop!( + Treasury::approve_proposal(RuntimeOrigin::root(), 0), + TreasuryError::InvalidIndex + ); }); } @@ -340,8 +349,8 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -355,8 +364,8 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -377,14 +386,14 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -407,10 +416,10 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -434,7 +443,7 @@ fn propose_bounty_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"1234567890".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 10, b"1234567890".to_vec())); assert_eq!(last_event(), BountiesEvent::BountyProposed { index: 0 }); @@ -469,17 +478,21 @@ fn propose_bounty_validation_works() { assert_eq!(Treasury::pot(), 100); assert_noop!( - Bounties::propose_bounty(Origin::signed(1), 0, [0; 17_000].to_vec()), + Bounties::propose_bounty(RuntimeOrigin::signed(1), 0, [0; 17_000].to_vec()), Error::::ReasonTooBig ); assert_noop!( - Bounties::propose_bounty(Origin::signed(1), 10, b"12345678901234567890".to_vec()), + Bounties::propose_bounty( + RuntimeOrigin::signed(1), + 10, + b"12345678901234567890".to_vec() + ), Error::::InsufficientProposersBalance ); assert_noop!( - Bounties::propose_bounty(Origin::signed(1), 0, b"12345678901234567890".to_vec()), + Bounties::propose_bounty(RuntimeOrigin::signed(1), 0, b"12345678901234567890".to_vec()), Error::::InvalidValue ); }); @@ -490,11 +503,11 @@ fn close_bounty_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::InvalidIndex); + assert_noop!(Bounties::close_bounty(RuntimeOrigin::root(), 0), Error::::InvalidIndex); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 10, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 10, b"12345".to_vec())); - assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); let deposit: u64 = 80 + 5; @@ -515,11 +528,14 @@ fn approve_bounty_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Bounties::approve_bounty(Origin::root(), 0), Error::::InvalidIndex); + assert_noop!( + Bounties::approve_bounty(RuntimeOrigin::root(), 0), + Error::::InvalidIndex + ); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); let deposit: u64 = 80 + 5; @@ -536,7 +552,10 @@ fn approve_bounty_works() { ); assert_eq!(Bounties::bounty_approvals(), vec![0]); - assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); + assert_noop!( + Bounties::close_bounty(RuntimeOrigin::root(), 0), + Error::::UnexpectedStatus + ); // deposit not returned yet assert_eq!(Balances::reserved_balance(0), deposit); @@ -572,24 +591,24 @@ fn assign_curator_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_noop!( - Bounties::propose_curator(Origin::root(), 0, 4, 4), + Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 4), Error::::InvalidIndex ); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); assert_noop!( - Bounties::propose_curator(Origin::root(), 0, 4, 50), + Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 50), Error::::InvalidFee ); let fee = 4; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -603,15 +622,18 @@ fn assign_curator_works() { } ); - assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); assert_noop!( - Bounties::accept_curator(Origin::signed(4), 0), + Bounties::accept_curator(RuntimeOrigin::signed(1), 0), + Error::::RequireCurator + ); + assert_noop!( + Bounties::accept_curator(RuntimeOrigin::signed(4), 0), pallet_balances::Error::::InsufficientBalance ); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); @@ -637,18 +659,18 @@ fn unassign_curator_works() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); - assert_noop!(Bounties::unassign_curator(Origin::signed(1), 0), BadOrigin); - assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_noop!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0), BadOrigin); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -662,11 +684,11 @@ fn unassign_curator_works() { } ); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); - assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -691,26 +713,26 @@ fn award_and_claim_bounty_works() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); let expected_deposit = Bounties::calculate_curator_deposit(&fee); assert_eq!(Balances::free_balance(4), 10 - expected_deposit); assert_noop!( - Bounties::award_bounty(Origin::signed(1), 0, 3), + Bounties::award_bounty(RuntimeOrigin::signed(1), 0, 3), Error::::RequireCurator ); - assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -724,14 +746,18 @@ fn award_and_claim_bounty_works() { } ); - assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); + assert_noop!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0), Error::::Premature); System::set_block_number(5); >::on_initialize(5); - assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + assert_ok!(Balances::transfer( + RuntimeOrigin::signed(0), + Bounties::bounty_account_id(0), + 10 + )); - assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0)); assert_eq!( last_event(), @@ -754,17 +780,17 @@ fn claim_handles_high_fee() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 30); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 49)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 49)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); - assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 3)); System::set_block_number(5); >::on_initialize(5); @@ -773,7 +799,7 @@ fn claim_handles_high_fee() { let res = Balances::slash(&Bounties::bounty_account_id(0), 10); assert_eq!(res.0.peek(), 10); - assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); + assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(1), 0)); assert_eq!( last_event(), @@ -796,14 +822,18 @@ fn cancel_and_refund() { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); + assert_ok!(Balances::transfer( + RuntimeOrigin::signed(0), + Bounties::bounty_account_id(0), + 10 + )); assert_eq!( Bounties::bounties(0).unwrap(), @@ -819,9 +849,9 @@ fn cancel_and_refund() { assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); - assert_noop!(Bounties::close_bounty(Origin::signed(0), 0), BadOrigin); + assert_noop!(Bounties::close_bounty(RuntimeOrigin::signed(0), 0), BadOrigin); - assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); // `- 25 + 10` assert_eq!(Treasury::pot(), 85); @@ -833,27 +863,30 @@ fn award_and_cancel() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 0, 10)); - assert_ok!(Bounties::accept_curator(Origin::signed(0), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 0, 10)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(0), 0)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); - assert_ok!(Bounties::award_bounty(Origin::signed(0), 0, 3)); + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(0), 0, 3)); // Cannot close bounty directly when payout is happening... - assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::PendingPayout); + assert_noop!( + Bounties::close_bounty(RuntimeOrigin::root(), 0), + Error::::PendingPayout + ); // Instead unassign the curator to slash them and then close. - assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); - assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); + assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); assert_eq!(last_event(), BountiesEvent::BountyCanceled { index: 0 }); @@ -873,15 +906,15 @@ fn expire_and_unassign() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); - assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); @@ -889,12 +922,15 @@ fn expire_and_unassign() { System::set_block_number(22); >::on_initialize(22); - assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); + assert_noop!( + Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), + Error::::Premature + ); System::set_block_number(23); >::on_initialize(23); - assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(0), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -919,20 +955,20 @@ fn extend_expiry() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); assert_noop!( - Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), + Bounties::extend_bounty_expiry(RuntimeOrigin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus ); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 10)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 10)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); assert_eq!(Balances::free_balance(4), 5); assert_eq!(Balances::reserved_balance(4), 5); @@ -941,10 +977,10 @@ fn extend_expiry() { >::on_initialize(10); assert_noop!( - Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), + Bounties::extend_bounty_expiry(RuntimeOrigin::signed(0), 0, Vec::new()), Error::::RequireCurator ); - assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( Bounties::bounties(0).unwrap(), @@ -958,7 +994,7 @@ fn extend_expiry() { } ); - assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); + assert_ok!(Bounties::extend_bounty_expiry(RuntimeOrigin::signed(4), 0, Vec::new())); assert_eq!( Bounties::bounties(0).unwrap(), @@ -975,8 +1011,11 @@ fn extend_expiry() { System::set_block_number(25); >::on_initialize(25); - assert_noop!(Bounties::unassign_curator(Origin::signed(0), 0), Error::::Premature); - assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); + assert_noop!( + Bounties::unassign_curator(RuntimeOrigin::signed(0), 0), + Error::::Premature + ); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(4), 0)); assert_eq!(Balances::free_balance(4), 10); // not slashed assert_eq!(Balances::reserved_balance(4), 0); @@ -1049,14 +1088,14 @@ fn unassign_curator_self() { new_test_ext().execute_with(|| { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 1, 10)); - assert_ok!(Bounties::accept_curator(Origin::signed(1), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 1, 10)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 5); @@ -1064,7 +1103,7 @@ fn unassign_curator_self() { System::set_block_number(8); >::on_initialize(8); - assert_ok!(Bounties::unassign_curator(Origin::signed(1), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(1), 0)); assert_eq!( Bounties::bounties(0).unwrap(), @@ -1097,14 +1136,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&user, 100); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!(Balances::free_balance(&user), 100 - expected_deposit); @@ -1119,14 +1158,14 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&Treasury::account_id(), 101); Balances::make_free_balance_be(&user, 100); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); System::set_block_number(4); >::on_initialize(4); - assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMin::get(); assert_eq!(Balances::free_balance(&user), 100 - expected_deposit); @@ -1143,14 +1182,14 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&user, starting_balance); Balances::make_free_balance_be(&0, starting_balance); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), value, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), bounty_index)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), value, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), bounty_index)); System::set_block_number(6); >::on_initialize(6); - assert_ok!(Bounties::propose_curator(Origin::root(), bounty_index, user, fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(user), bounty_index)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), bounty_index, user, fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(user), bounty_index)); let expected_deposit = CuratorDepositMax::get(); assert_eq!(Balances::free_balance(&user), starting_balance - expected_deposit); @@ -1170,8 +1209,8 @@ fn approve_bounty_works_second_instance() { assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); - assert_ok!(Bounties1::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties1::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties1::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties1::approve_bounty(RuntimeOrigin::root(), 0)); >::on_initialize(2); >::on_initialize(2); diff --git a/frame/child-bounties/src/tests.rs b/frame/child-bounties/src/tests.rs index c9dbc8742c56a..67983b15f4579 100644 --- a/frame/child-bounties/src/tests.rs +++ b/frame/child-bounties/src/tests.rs @@ -71,7 +71,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -211,19 +211,19 @@ fn add_child_bounty() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 8; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); Balances::make_free_balance_be(&4, 10); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // This verifies that the accept curator logic took a deposit. let expected_deposit = CuratorDepositMultiplier::get() * fee; @@ -234,7 +234,7 @@ fn add_child_bounty() { // Acc-4 is the parent curator. // Call from invalid origin & check for error "RequireCurator". assert_noop!( - ChildBounties::add_child_bounty(Origin::signed(0), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(RuntimeOrigin::signed(0), 0, 10, b"12345-p1".to_vec()), BountiesError::RequireCurator, ); @@ -246,17 +246,22 @@ fn add_child_bounty() { assert_eq!(Balances::reserved_balance(Bounties::bounty_account_id(0)), 0); assert_noop!( - ChildBounties::add_child_bounty(Origin::signed(4), 0, 50, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 50, b"12345-p1".to_vec()), pallet_balances::Error::::KeepAlive, ); assert_noop!( - ChildBounties::add_child_bounty(Origin::signed(4), 0, 100, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 100, b"12345-p1".to_vec()), Error::::InsufficientBountyBalance, ); // Add child-bounty with valid value, which can be funded by parent bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); // Check for the event child-bounty added. assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -299,16 +304,16 @@ fn child_bounty_assign_curator() { Balances::make_free_balance_be(&4, 101); Balances::make_free_balance_be(&8, 101); - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let fee = 4; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Bounty account status before adding child-bounty. assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -322,7 +327,12 @@ fn child_bounty_assign_curator() { // Add child-bounty. // Acc-4 is the parent curator & make sure enough deposit. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -335,7 +345,7 @@ fn child_bounty_assign_curator() { assert_eq!(Balances::reserved_balance(ChildBounties::child_bounty_account_id(0)), 0); let fee = 6u64; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -353,11 +363,11 @@ fn child_bounty_assign_curator() { assert_eq!(Balances::reserved_balance(4), expected_deposit); assert_noop!( - ChildBounties::accept_curator(Origin::signed(3), 0, 0), + ChildBounties::accept_curator(RuntimeOrigin::signed(3), 0, 0), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -403,34 +413,39 @@ fn award_claim_child_bounty() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); // Award child-bounty. // Test for non child-bounty curator. assert_noop!( - ChildBounties::award_child_bounty(Origin::signed(3), 0, 0, 7), + ChildBounties::award_child_bounty(RuntimeOrigin::signed(3), 0, 0, 7), BountiesError::RequireCurator, ); - assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); let expected_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -451,13 +466,13 @@ fn award_claim_child_bounty() { // Claim child-bounty. // Test for Premature condition. assert_noop!( - ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0), + ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0), BountiesError::Premature ); System::set_block_number(9); - assert_ok!(ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); // Ensure child-bounty curator is paid with curator fee & deposit refund. assert_eq!(Balances::free_balance(8), 101 + fee); @@ -489,19 +504,24 @@ fn close_child_bounty_added() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); @@ -509,11 +529,11 @@ fn close_child_bounty_added() { // Close child-bounty. // Wrong origin. - assert_noop!(ChildBounties::close_child_bounty(Origin::signed(7), 0, 0), BadOrigin); - assert_noop!(ChildBounties::close_child_bounty(Origin::signed(8), 0, 0), BadOrigin); + assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(7), 0, 0), BadOrigin); + assert_noop!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(8), 0, 0), BadOrigin); // Correct origin - parent curator. - assert_ok!(ChildBounties::close_child_bounty(Origin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); @@ -541,28 +561,33 @@ fn close_child_bounty_active() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, 2)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); // Close child-bounty in active state. - assert_ok!(ChildBounties::close_child_bounty(Origin::signed(4), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); @@ -594,33 +619,38 @@ fn close_child_bounty_pending() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); let parent_fee = 6; - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, parent_fee)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, parent_fee)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose and accept curator for child-bounty. let child_fee = 4; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, child_fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, child_fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); - assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); // Close child-bounty in pending_payout state. assert_noop!( - ChildBounties::close_child_bounty(Origin::signed(4), 0, 0), + ChildBounties::close_child_bounty(RuntimeOrigin::signed(4), 0, 0), BountiesError::PendingPayout ); @@ -650,25 +680,30 @@ fn child_bounty_added_unassign_curator() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Unassign curator in added state. assert_noop!( - ChildBounties::unassign_curator(Origin::signed(4), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0), BountiesError::UnexpectedStatus ); }); @@ -687,24 +722,29 @@ fn child_bounty_curator_proposed_unassign_curator() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, 2)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, 2)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -718,10 +758,10 @@ fn child_bounty_curator_proposed_unassign_curator() { ); // Random account cannot unassign the curator when in proposed state. - assert_noop!(ChildBounties::unassign_curator(Origin::signed(99), 0, 0), BadOrigin); + assert_noop!(ChildBounties::unassign_curator(RuntimeOrigin::signed(99), 0, 0), BadOrigin); // Unassign curator. - assert_ok!(ChildBounties::unassign_curator(Origin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -759,18 +799,23 @@ fn child_bounty_active_unassign_curator() { Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Create Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(3); @@ -778,8 +823,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty. let fee = 6; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -797,7 +842,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(4); // Unassign curator - from reject origin. - assert_ok!(ChildBounties::unassign_curator(Origin::root(), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::root(), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -817,8 +862,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 7, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -836,7 +881,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(5); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(Origin::signed(4), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(4), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -855,8 +900,8 @@ fn child_bounty_active_unassign_curator() { assert_eq!(Balances::reserved_balance(7), 0); // slashed // Propose and accept curator for child-bounty again. - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 6, 2)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, 2)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); assert_eq!( ChildBounties::child_bounties(0, 0).unwrap(), @@ -873,7 +918,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(6); // Unassign curator again - from child-bounty curator. - assert_ok!(ChildBounties::unassign_curator(Origin::signed(6), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(6), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -893,8 +938,8 @@ fn child_bounty_active_unassign_curator() { // Propose and accept curator for child-bounty one last time. let fee = 2; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 6, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(6), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 6, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(6), 0, 0)); let expected_child_deposit = CuratorDepositMin::get(); assert_eq!( @@ -914,7 +959,7 @@ fn child_bounty_active_unassign_curator() { // Unassign curator again - from non curator; non reject origin; some random guy. // Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(Origin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), BountiesError::Premature ); @@ -922,7 +967,7 @@ fn child_bounty_active_unassign_curator() { >::on_initialize(20); // Unassign child curator from random account after inactivity. - assert_ok!(ChildBounties::unassign_curator(Origin::signed(3), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -960,17 +1005,22 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { Balances::make_free_balance_be(&7, 101); // Child-bounty curator 2. Balances::make_free_balance_be(&8, 101); // Child-bounty curator 3. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Create Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(3); @@ -978,8 +1028,8 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Propose and accept curator for child-bounty. let fee = 8; - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; assert_eq!( @@ -997,7 +1047,7 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(4); // Unassign parent bounty curator. - assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::root(), 0)); System::set_block_number(5); >::on_initialize(5); @@ -1005,12 +1055,12 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { // Try unassign child-bounty curator - from non curator; non reject // origin; some random guy. Bounty update period is not yet complete. assert_noop!( - ChildBounties::unassign_curator(Origin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), Error::::ParentBountyNotActive ); // Unassign curator - from reject origin. - assert_ok!(ChildBounties::unassign_curator(Origin::root(), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::root(), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1032,16 +1082,16 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(6); // Propose and accept curator for parent-bounty again. - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 5, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(5), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 5, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(5), 0)); System::set_block_number(7); >::on_initialize(7); // Propose and accept curator for child-bounty again. let fee = 2; - assert_ok!(ChildBounties::propose_curator(Origin::signed(5), 0, 0, 7, fee)); - assert_ok!(ChildBounties::accept_curator(Origin::signed(7), 0, 0)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(5), 0, 0, 7, fee)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(7), 0, 0)); let expected_deposit = CuratorDepositMin::get(); assert_eq!( @@ -1059,18 +1109,18 @@ fn parent_bounty_inactive_unassign_curator_child_bounty() { >::on_initialize(8); assert_noop!( - ChildBounties::unassign_curator(Origin::signed(3), 0, 0), + ChildBounties::unassign_curator(RuntimeOrigin::signed(3), 0, 0), BountiesError::Premature ); // Unassign parent bounty curator again. - assert_ok!(Bounties::unassign_curator(Origin::signed(5), 0)); + assert_ok!(Bounties::unassign_curator(RuntimeOrigin::signed(5), 0)); System::set_block_number(9); >::on_initialize(9); // Unassign curator again - from parent curator. - assert_ok!(ChildBounties::unassign_curator(Origin::signed(7), 0, 0)); + assert_ok!(ChildBounties::unassign_curator(RuntimeOrigin::signed(7), 0, 0)); // Verify updated child-bounty status. assert_eq!( @@ -1103,24 +1153,29 @@ fn close_parent_with_child_bounty() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); // Try add child-bounty. // Should fail, parent bounty not active yet. assert_noop!( - ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec()), + ChildBounties::add_child_bounty(RuntimeOrigin::signed(4), 0, 10, b"12345-p1".to_vec()), Error::::ParentBountyNotActive ); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(4); @@ -1129,21 +1184,21 @@ fn close_parent_with_child_bounty() { // Try close parent-bounty. // Child bounty active, can't close parent. assert_noop!( - Bounties::close_bounty(Origin::root(), 0), + Bounties::close_bounty(RuntimeOrigin::root(), 0), BountiesError::HasActiveChildBounty ); System::set_block_number(2); // Close child-bounty. - assert_ok!(ChildBounties::close_child_bounty(Origin::root(), 0, 0)); + assert_ok!(ChildBounties::close_child_bounty(RuntimeOrigin::root(), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); // Try close parent-bounty again. // Should pass this time. - assert_ok!(Bounties::close_bounty(Origin::root(), 0)); + assert_ok!(Bounties::close_bounty(RuntimeOrigin::root(), 0)); }); } @@ -1162,17 +1217,22 @@ fn children_curator_fee_calculation_test() { Balances::make_free_balance_be(&4, 101); // Parent-bounty curator. Balances::make_free_balance_be(&8, 101); // Child-bounty curator. - assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); - assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); + assert_ok!(Bounties::propose_bounty(RuntimeOrigin::signed(0), 50, b"12345".to_vec())); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), 0)); System::set_block_number(2); >::on_initialize(2); - assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 6)); - assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); + assert_ok!(Bounties::propose_curator(RuntimeOrigin::root(), 0, 4, 6)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(4), 0)); // Child-bounty. - assert_ok!(ChildBounties::add_child_bounty(Origin::signed(4), 0, 10, b"12345-p1".to_vec())); + assert_ok!(ChildBounties::add_child_bounty( + RuntimeOrigin::signed(4), + 0, + 10, + b"12345-p1".to_vec() + )); assert_eq!(last_event(), ChildBountiesEvent::Added { index: 0, child_index: 0 }); System::set_block_number(4); @@ -1181,13 +1241,13 @@ fn children_curator_fee_calculation_test() { let fee = 6; // Propose curator for child-bounty. - assert_ok!(ChildBounties::propose_curator(Origin::signed(4), 0, 0, 8, fee)); + assert_ok!(ChildBounties::propose_curator(RuntimeOrigin::signed(4), 0, 0, 8, fee)); // Check curator fee added to the sum. assert_eq!(ChildBounties::children_curator_fees(0), fee); // Accept curator for child-bounty. - assert_ok!(ChildBounties::accept_curator(Origin::signed(8), 0, 0)); + assert_ok!(ChildBounties::accept_curator(RuntimeOrigin::signed(8), 0, 0)); // Award child-bounty. - assert_ok!(ChildBounties::award_child_bounty(Origin::signed(8), 0, 0, 7)); + assert_ok!(ChildBounties::award_child_bounty(RuntimeOrigin::signed(8), 0, 0, 7)); let expected_child_deposit = CuratorDepositMultiplier::get() * fee; @@ -1209,18 +1269,18 @@ fn children_curator_fee_calculation_test() { System::set_block_number(9); // Claim child-bounty. - assert_ok!(ChildBounties::claim_child_bounty(Origin::signed(7), 0, 0)); + assert_ok!(ChildBounties::claim_child_bounty(RuntimeOrigin::signed(7), 0, 0)); // Check the child-bounty count. assert_eq!(ChildBounties::parent_child_bounties(0), 0); // Award the parent bounty. - assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 9)); + assert_ok!(Bounties::award_bounty(RuntimeOrigin::signed(4), 0, 9)); System::set_block_number(15); // Claim the parent bounty. - assert_ok!(Bounties::claim_bounty(Origin::signed(9), 0)); + assert_ok!(Bounties::claim_bounty(RuntimeOrigin::signed(9), 0)); // Ensure parent-bounty curator received correctly reduced fee. assert_eq!(Balances::free_balance(4), 101 + 6 - fee); // 101 + 6 - 2 @@ -1247,22 +1307,22 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&Treasury::account_id(), parent_value * 3); Balances::make_free_balance_be(&parent_curator, parent_fee * 100); assert_ok!(Bounties::propose_bounty( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_value, b"12345".to_vec() )); - assert_ok!(Bounties::approve_bounty(Origin::root(), parent_index)); + assert_ok!(Bounties::approve_bounty(RuntimeOrigin::root(), parent_index)); System::set_block_number(2); >::on_initialize(2); assert_ok!(Bounties::propose_curator( - Origin::root(), + RuntimeOrigin::root(), parent_index, parent_curator, parent_fee )); - assert_ok!(Bounties::accept_curator(Origin::signed(parent_curator), parent_index)); + assert_ok!(Bounties::accept_curator(RuntimeOrigin::signed(parent_curator), parent_index)); // Now we can start creating some child bounties. // Case 1: Parent and child curator are not the same. @@ -1275,7 +1335,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1283,14 +1343,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(3); >::on_initialize(3); assert_ok!(ChildBounties::propose_curator( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - Origin::signed(child_curator), + RuntimeOrigin::signed(child_curator), parent_index, child_index )); @@ -1310,7 +1370,7 @@ fn accept_curator_handles_different_deposit_calculations() { let reserved_before = Balances::reserved_balance(&parent_curator); assert_ok!(ChildBounties::add_child_bounty( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1318,14 +1378,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(4); >::on_initialize(4); assert_ok!(ChildBounties::propose_curator( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - Origin::signed(child_curator), + RuntimeOrigin::signed(child_curator), parent_index, child_index )); @@ -1343,7 +1403,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1351,14 +1411,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(5); >::on_initialize(5); assert_ok!(ChildBounties::propose_curator( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - Origin::signed(child_curator), + RuntimeOrigin::signed(child_curator), parent_index, child_index )); @@ -1379,7 +1439,7 @@ fn accept_curator_handles_different_deposit_calculations() { Balances::make_free_balance_be(&child_curator, starting_balance); assert_ok!(ChildBounties::add_child_bounty( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_value, b"12345-p1".to_vec() @@ -1387,14 +1447,14 @@ fn accept_curator_handles_different_deposit_calculations() { System::set_block_number(5); >::on_initialize(5); assert_ok!(ChildBounties::propose_curator( - Origin::signed(parent_curator), + RuntimeOrigin::signed(parent_curator), parent_index, child_index, child_curator, child_fee )); assert_ok!(ChildBounties::accept_curator( - Origin::signed(child_curator), + RuntimeOrigin::signed(child_curator), parent_index, child_index )); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 49da641e3e204..e934924033552 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -181,12 +181,14 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// The outer origin type. - type Origin: From>; + type RuntimeOrigin: From>; /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable>::Origin, PostInfo = PostDispatchInfo> - + From> + + Dispatchable< + RuntimeOrigin = >::RuntimeOrigin, + PostInfo = PostDispatchInfo, + > + From> + GetDispatchInfo; /// The outer event type. diff --git a/frame/collective/src/tests.rs b/frame/collective/src/tests.rs index 13b9a24f7889d..19c54e0493c7b 100644 --- a/frame/collective/src/tests.rs +++ b/frame/collective/src/tests.rs @@ -64,7 +64,7 @@ mod mock_democracy { pub trait Config: frame_system::Config + Sized { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - type ExternalMajorityOrigin: EnsureOrigin; + type ExternalMajorityOrigin: EnsureOrigin; } #[pallet::call] @@ -98,7 +98,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -120,7 +120,7 @@ impl frame_system::Config for Test { type MaxConsumers = ConstU32<16>; } impl Config for Test { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; @@ -130,7 +130,7 @@ impl Config for Test { type WeightInfo = (); } impl Config for Test { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; @@ -144,7 +144,7 @@ impl mock_democracy::Config for Test { type ExternalMajorityOrigin = EnsureProportionAtLeast; } impl Config for Test { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type MotionDuration = ConstU64<3>; @@ -200,22 +200,28 @@ fn close_works() { let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); System::set_block_number(3); assert_noop!( - Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len), + Collective::close(RuntimeOrigin::signed(4), hash, 0, proposal_weight, proposal_len), Error::::TooEarly ); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), @@ -267,17 +273,17 @@ fn proposal_weight_limit_works_on_approve() { // Set 1 as prime voter Prime::::set(Some(1)); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); // With 1's prime vote, this should pass System::set_block_number(4); assert_noop!( Collective::close( - Origin::signed(4), + RuntimeOrigin::signed(4), hash, 0, proposal_weight - Weight::from_ref_time(100), @@ -285,7 +291,13 @@ fn proposal_weight_limit_works_on_approve() { ), Error::::WrongProposalWeight ); - assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); }) } @@ -302,7 +314,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -310,7 +322,7 @@ fn proposal_weight_limit_ignored_on_disapprove() { // No votes, this proposal wont pass System::set_block_number(4); assert_ok!(Collective::close( - Origin::signed(4), + RuntimeOrigin::signed(4), hash, 0, proposal_weight - Weight::from_ref_time(100), @@ -327,23 +339,29 @@ fn close_with_prime_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( - Origin::root(), + RuntimeOrigin::root(), vec![1, 2, 3], Some(3), MaxMembers::get() )); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), @@ -389,23 +407,29 @@ fn close_with_voting_prime_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members( - Origin::root(), + RuntimeOrigin::root(), vec![1, 2, 3], Some(1), MaxMembers::get() )); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), @@ -453,25 +477,25 @@ fn close_with_no_prime_but_majority_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(CollectiveMajority::set_members( - Origin::root(), + RuntimeOrigin::root(), vec![1, 2, 3, 4, 5], Some(5), MaxMembers::get() )); assert_ok!(CollectiveMajority::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 5, Box::new(proposal.clone()), proposal_len )); - assert_ok!(CollectiveMajority::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash, 0, true)); - assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(CollectiveMajority::vote(RuntimeOrigin::signed(3), hash, 0, true)); System::set_block_number(4); assert_ok!(CollectiveMajority::close( - Origin::signed(4), + RuntimeOrigin::signed(4), hash, 0, proposal_weight, @@ -533,13 +557,13 @@ fn removal_of_old_voters_votes_works() { let hash = BlakeTwo256::hash_of(&proposal); let end = 4; assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) @@ -554,13 +578,13 @@ fn removal_of_old_voters_votes_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) @@ -581,18 +605,23 @@ fn removal_of_old_voters_votes_works_with_set_members() { let hash = BlakeTwo256::hash_of(&proposal); let end = 4; assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); + assert_ok!(Collective::set_members( + RuntimeOrigin::root(), + vec![2, 3, 4], + None, + MaxMembers::get() + )); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) @@ -602,18 +631,23 @@ fn removal_of_old_voters_votes_works_with_set_members() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::propose( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(Origin::signed(3), hash, 1, false)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); + assert_ok!(Collective::set_members( + RuntimeOrigin::root(), + vec![2, 4], + None, + MaxMembers::get() + )); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) @@ -629,7 +663,7 @@ fn propose_works() { let hash = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -660,7 +694,7 @@ fn limit_active_proposals() { let proposal = make_proposal(i as u64); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -669,7 +703,12 @@ fn limit_active_proposals() { let proposal = make_proposal(MaxProposals::get() as u64 + 1); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( - Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len), + Collective::propose( + RuntimeOrigin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + ), Error::::TooManyProposals ); }) @@ -684,7 +723,12 @@ fn correct_validate_and_get_proposal() { old_count: MaxMembers::get(), }); let length = proposal.encode().len() as u32; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); + assert_ok!(Collective::propose( + RuntimeOrigin::signed(1), + 3, + Box::new(proposal.clone()), + length + )); let hash = BlakeTwo256::hash_of(&proposal); let weight = proposal.get_dispatch_info().weight; @@ -722,7 +766,12 @@ fn motions_ignoring_non_collective_proposals_works() { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( - Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone()), proposal_len), + Collective::propose( + RuntimeOrigin::signed(42), + 3, + Box::new(proposal.clone()), + proposal_len + ), Error::::NotMember ); }); @@ -735,13 +784,13 @@ fn motions_ignoring_non_collective_votes_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); assert_noop!( - Collective::vote(Origin::signed(42), hash, 0, true), + Collective::vote(RuntimeOrigin::signed(42), hash, 0, true), Error::::NotMember, ); }); @@ -755,13 +804,13 @@ fn motions_ignoring_bad_index_collective_vote_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); assert_noop!( - Collective::vote(Origin::signed(2), hash, 1, true), + Collective::vote(RuntimeOrigin::signed(2), hash, 1, true), Error::::WrongIndex, ); }); @@ -775,7 +824,7 @@ fn motions_vote_after_works() { let hash: H256 = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len @@ -786,25 +835,25 @@ fn motions_vote_after_works() { Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // Cast first aye vote. - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); // Try to cast a duplicate aye vote. assert_noop!( - Collective::vote(Origin::signed(1), hash, 0, true), + Collective::vote(RuntimeOrigin::signed(1), hash, 0, true), Error::::DuplicateVote, ); // Cast a nay vote. - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); // Try to cast a duplicate nay vote. assert_noop!( - Collective::vote(Origin::signed(1), hash, 0, false), + Collective::vote(RuntimeOrigin::signed(1), hash, 0, false), Error::::DuplicateVote, ); @@ -844,7 +893,7 @@ fn motions_all_first_vote_free_works() { let hash: H256 = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len, @@ -856,40 +905,40 @@ fn motions_all_first_vote_free_works() { // For the motion, acc 2's first vote, expecting Ok with Pays::No. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(Origin::signed(2), hash, 0, true); + Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // Duplicate vote, expecting error with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(Origin::signed(2), hash, 0, true); + Collective::vote(RuntimeOrigin::signed(2), hash, 0, true); assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); // Modifying vote, expecting ok with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(Origin::signed(2), hash, 0, false); + Collective::vote(RuntimeOrigin::signed(2), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // For the motion, acc 3's first vote, expecting Ok with Pays::No. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(Origin::signed(3), hash, 0, true); + Collective::vote(RuntimeOrigin::signed(3), hash, 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // acc 3 modify the vote, expecting Ok with Pays::Yes. let vote_rval: DispatchResultWithPostInfo = - Collective::vote(Origin::signed(3), hash, 0, false); + Collective::vote(RuntimeOrigin::signed(3), hash, 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info let proposal_weight = proposal.get_dispatch_info().weight; let close_rval: DispatchResultWithPostInfo = - Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); + Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap().pays_fee, Pays::No); // trying to close the proposal, which is already closed. // Expecting error "ProposalMissing" with Pays::Yes let close_rval: DispatchResultWithPostInfo = - Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len); + Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len); assert_eq!(close_rval.unwrap_err().post_info.pays_fee, Pays::Yes); }); } @@ -902,16 +951,22 @@ fn motions_reproposing_disapproved_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!(*Collective::proposals(), vec![]); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len @@ -933,14 +988,20 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { // // Failed to execute with only 2 yes votes. assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); - assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), vec![ @@ -981,15 +1042,21 @@ fn motions_approval_with_enough_votes_and_lower_voting_threshold_works() { // Executed with 3 yes votes. assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 1, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 1, true)); - assert_ok!(Collective::vote(Origin::signed(3), hash, 1, true)); - assert_ok!(Collective::close(Origin::signed(2), hash, 1, proposal_weight, proposal_len)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 1, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 1, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(3), hash, 1, true)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 1, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), vec![ @@ -1046,14 +1113,20 @@ fn motions_disapproval_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), @@ -1099,14 +1172,20 @@ fn motions_approval_works() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); - assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 0, + proposal_weight, + proposal_len + )); assert_eq!( System::events(), @@ -1154,7 +1233,7 @@ fn motion_with_no_votes_closes_with_disapproval() { let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, Box::new(proposal.clone()), proposal_len @@ -1172,7 +1251,7 @@ fn motion_with_no_votes_closes_with_disapproval() { // Closing the motion too early is not possible because it has neither // an approving or disapproving simple majority due to the lack of votes. assert_noop!( - Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len), + Collective::close(RuntimeOrigin::signed(2), hash, 0, proposal_weight, proposal_len), Error::::TooEarly ); @@ -1180,7 +1259,13 @@ fn motion_with_no_votes_closes_with_disapproval() { let closing_block = System::block_number() + MotionDuration::get(); System::set_block_number(closing_block); // we can successfully close the motion. - assert_ok!(Collective::close(Origin::signed(2), hash, 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + RuntimeOrigin::signed(2), + hash, + 0, + proposal_weight, + proposal_len + )); // Events show that the close ended in a disapproval. assert_eq!( @@ -1208,28 +1293,28 @@ fn close_disapprove_does_not_care_about_weight_or_len() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); // First we make the proposal succeed - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); // It will not close with bad weight/len information assert_noop!( - Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0), + Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), 0), Error::::WrongProposalLength, ); assert_noop!( - Collective::close(Origin::signed(2), hash, 0, Weight::zero(), proposal_len), + Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), proposal_len), Error::::WrongProposalWeight, ); // Now we make the proposal fail - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, false)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, false)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, false)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, false)); // It can close even if the weight/len information is bad - assert_ok!(Collective::close(Origin::signed(2), hash, 0, Weight::zero(), 0)); + assert_ok!(Collective::close(RuntimeOrigin::signed(2), hash, 0, Weight::zero(), 0)); }) } @@ -1240,16 +1325,16 @@ fn disapprove_proposal_works() { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, Box::new(proposal.clone()), proposal_len )); // Proposal would normally succeed - assert_ok!(Collective::vote(Origin::signed(1), hash, 0, true)); - assert_ok!(Collective::vote(Origin::signed(2), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Collective::vote(RuntimeOrigin::signed(2), hash, 0, true)); // But Root can disapprove and remove it anyway - assert_ok!(Collective::disapprove_proposal(Origin::root(), hash)); + assert_ok!(Collective::disapprove_proposal(RuntimeOrigin::root(), hash)); assert_eq!( System::events(), vec![ diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 0fbf821b2e284..2b5cf6353256c 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1306,7 +1306,7 @@ where } fn call_runtime(&self, call: ::RuntimeCall) -> DispatchResultWithPostInfo { - let mut origin: T::Origin = RawOrigin::Signed(self.address().clone()).into(); + let mut origin: T::RuntimeOrigin = RawOrigin::Signed(self.address().clone()).into(); origin.add_filter(T::CallFilter::contains); call.dispatch(origin) } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 842fa02f34529..abe91a9046ce2 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -246,7 +246,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The overarching call type. - type RuntimeCall: Dispatchable + type RuntimeCall: Dispatchable + GetDispatchInfo + codec::Decode + IsType<::RuntimeCall>; diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index d5f4df161b723..12029abd59e46 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -277,7 +277,7 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -489,7 +489,7 @@ fn calling_plain_account_fails() { let base_cost = <::WeightInfo as WeightInfo>::call(); assert_eq!( - Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), + Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, Vec::new()), Err(DispatchErrorWithPostInfo { error: Error::::ContractNotFound.into(), post_info: PostDispatchInfo { @@ -512,14 +512,14 @@ fn instantiate_and_call_and_deposit_event() { // We determine the storage deposit limit after uploading because it depends on ALICEs free // balance which is changed by uploading a module. - assert_ok!(Contracts::upload_code(Origin::signed(ALICE), wasm, None)); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, None)); // Drop previous events initialize_block(2); // Check at the end to get hash on error easily assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), value, GAS_LIMIT, None, @@ -603,7 +603,7 @@ fn deposit_event_max_value_limit() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -615,7 +615,7 @@ fn deposit_event_max_value_limit() { // Call contract with allowed storage value. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer, @@ -626,7 +626,7 @@ fn deposit_event_max_value_limit() { // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, @@ -646,7 +646,7 @@ fn run_out_of_gas() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100 * min_balance, GAS_LIMIT, None, @@ -660,7 +660,7 @@ fn run_out_of_gas() { // loops forever. assert_err_ignore_postinfo!( Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr, // newly created account 0, Weight::from_ref_time(1_000_000_000_000), @@ -680,12 +680,12 @@ fn instantiate_unique_trie_id() { ExtBuilder::default().existential_deposit(500).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - Contracts::upload_code(Origin::signed(ALICE), wasm, None).unwrap(); + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, None).unwrap(); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Instantiate the contract and store its trie id for later comparison. assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -698,7 +698,7 @@ fn instantiate_unique_trie_id() { // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -711,7 +711,7 @@ fn instantiate_unique_trie_id() { // Terminate the contract. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -721,7 +721,7 @@ fn instantiate_unique_trie_id() { // Re-Instantiate after termination. assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -743,7 +743,7 @@ fn storage_max_value_limit() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -756,7 +756,7 @@ fn storage_max_value_limit() { // Call contract with allowed storage value. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT * 2, // we are copying a huge buffer @@ -767,7 +767,7 @@ fn storage_max_value_limit() { // Call contract with too large a storage value. assert_err_ignore_postinfo!( Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, @@ -792,7 +792,7 @@ fn deploy_and_call_other_contract() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -801,7 +801,7 @@ fn deploy_and_call_other_contract() { vec![], )); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -816,7 +816,7 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), caller_addr.clone(), 0, GAS_LIMIT, @@ -917,7 +917,7 @@ fn delegate_call() { // Instantiate the 'caller' assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 300_000, GAS_LIMIT, None, @@ -927,13 +927,13 @@ fn delegate_call() { )); // Only upload 'callee' code assert_ok!(Contracts::upload_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), callee_wasm, Some(codec::Compact(100_000)), )); assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), caller_addr.clone(), 1337, GAS_LIMIT, @@ -951,7 +951,7 @@ fn cannot_self_destruct_through_draning() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 1_000, GAS_LIMIT, None, @@ -967,7 +967,7 @@ fn cannot_self_destruct_through_draning() { // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer was successful assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -992,7 +992,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -1009,7 +1009,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // Create 100 bytes of storage with a price of per byte assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1021,7 +1021,7 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // because the refund removes the reserved existential deposit. This should not happen. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1045,7 +1045,7 @@ fn cannot_self_destruct_by_refund_after_slash() { let min_balance = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -1057,7 +1057,7 @@ fn cannot_self_destruct_by_refund_after_slash() { // create 100 more reserved balance assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1075,7 +1075,7 @@ fn cannot_self_destruct_by_refund_after_slash() { // trigger a refund of 50 which would bring the contract below min when actually refunded assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1131,7 +1131,7 @@ fn cannot_self_destruct_while_live() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1147,7 +1147,14 @@ fn cannot_self_destruct_while_live() { // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![0],), + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + vec![0], + ), Error::::ContractTrapped, ); @@ -1165,7 +1172,7 @@ fn self_destruct_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1183,7 +1190,7 @@ fn self_destruct_works() { // Call BOB without input data which triggers termination. assert_matches!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Ok(_) ); @@ -1258,7 +1265,7 @@ fn destroy_contract_and_transfer_funds() { // Create let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 200_000, GAS_LIMIT, None, @@ -1270,7 +1277,7 @@ fn destroy_contract_and_transfer_funds() { // This deploys the BOB contract, which in turn deploys the CHARLIE contract during // construction. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 200_000, GAS_LIMIT, None, @@ -1286,7 +1293,7 @@ fn destroy_contract_and_transfer_funds() { // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr_bob, 0, GAS_LIMIT, @@ -1308,7 +1315,7 @@ fn cannot_self_destruct_in_constructor() { // Fail to instantiate the BOB because the contructor calls seal_terminate. assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1330,7 +1337,7 @@ fn crypto_hashes() { // Instantiate the CRYPTO_HASHES contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -1379,7 +1386,7 @@ fn transfer_return_code() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1418,7 +1425,7 @@ fn call_return_code() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1444,7 +1451,7 @@ fn call_return_code() { assert_return_code!(result, RuntimeReturnCode::NotCallable); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(CHARLIE), + RuntimeOrigin::signed(CHARLIE), min_balance * 100, GAS_LIMIT, None, @@ -1545,7 +1552,7 @@ fn instantiate_return_code() { let callee_hash = callee_hash.as_ref().to_vec(); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1555,7 +1562,7 @@ fn instantiate_return_code() { ),); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1645,7 +1652,7 @@ fn disabled_chain_extension_wont_deploy() { TestExtension::disable(); assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 3 * min_balance, GAS_LIMIT, None, @@ -1665,7 +1672,7 @@ fn disabled_chain_extension_errors_on_call() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1676,7 +1683,7 @@ fn disabled_chain_extension_errors_on_call() { let addr = Contracts::contract_address(&ALICE, &hash, &[]); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Error::::NoChainExtension, ); }); @@ -1689,7 +1696,7 @@ fn chain_extension_works() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1792,7 +1799,7 @@ fn chain_extension_works() { // We set the MSB part to 2 (instead of 0) which routes the request into the third extension assert_err_ignore_postinfo!( Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1811,7 +1818,7 @@ fn chain_extension_temp_storage_works() { let min_balance = ::Currency::minimum_balance(); let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1845,7 +1852,7 @@ fn lazy_removal_works() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1863,7 +1870,7 @@ fn lazy_removal_works() { // Terminate the contract assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1914,7 +1921,7 @@ fn lazy_batch_removal_works() { for i in 0..3u8 { assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -1933,7 +1940,7 @@ fn lazy_batch_removal_works() { // Terminate the contract. Contract info should be gone, but value should be still there // as the lazy removal did not run, yet. assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -1976,7 +1983,7 @@ fn lazy_removal_partial_remove_works() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2003,7 +2010,7 @@ fn lazy_removal_partial_remove_works() { // Terminate the contract assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2088,7 +2095,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2106,7 +2113,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { // Terminate the contract assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2156,7 +2163,7 @@ fn lazy_removal_does_not_use_all_weight() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2189,7 +2196,7 @@ fn lazy_removal_does_not_use_all_weight() { // Terminate the contract assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -2236,7 +2243,7 @@ fn deletion_queue_full() { let _ = Balances::deposit_creating(&ALICE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2252,7 +2259,7 @@ fn deletion_queue_full() { // Terminate the contract should fail assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), + Contracts::call(RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, vec![],), Error::::DeletionQueueFull, ); @@ -2270,7 +2277,7 @@ fn refcounter() { // Create two contracts with the same code and check that they do in fact share it. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2279,7 +2286,7 @@ fn refcounter() { vec![0], )); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2291,7 +2298,7 @@ fn refcounter() { // Sharing should also work with the usual instantiate call assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2307,18 +2314,39 @@ fn refcounter() { let addr2 = Contracts::contract_address(&ALICE, &code_hash, &[2]); // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call(Origin::signed(ALICE), addr0, 0, GAS_LIMIT, None, vec![])); + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr0, + 0, + GAS_LIMIT, + None, + vec![] + )); assert_refcount!(code_hash, 2); // remove another one - assert_ok!(Contracts::call(Origin::signed(ALICE), addr1, 0, GAS_LIMIT, None, vec![])); + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr1, + 0, + GAS_LIMIT, + None, + vec![] + )); assert_refcount!(code_hash, 1); // Pristine code should still be there crate::PristineCode::::get(code_hash).unwrap(); // remove the last contract - assert_ok!(Contracts::call(Origin::signed(ALICE), addr2, 0, GAS_LIMIT, None, vec![])); + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr2, + 0, + GAS_LIMIT, + None, + vec![] + )); assert_refcount!(code_hash, 0); // refcount is `0` but code should still exists because it needs to be removed manually @@ -2337,7 +2365,7 @@ fn reinstrument_does_charge() { let code_len = wasm.len() as u32; assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2386,7 +2414,7 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2409,7 +2437,7 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2422,7 +2450,7 @@ fn debug_message_logging_disabled() { let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![], false); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging - assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); + assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); assert!(result.debug_message.is_empty()); }); } @@ -2434,7 +2462,7 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 30_000, GAS_LIMIT, None, @@ -2458,7 +2486,7 @@ fn gas_estimation_nested_call_fixed_limit() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2469,7 +2497,7 @@ fn gas_estimation_nested_call_fixed_limit() { let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2527,7 +2555,7 @@ fn gas_estimation_call_runtime() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * min_balance); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2538,7 +2566,7 @@ fn gas_estimation_call_runtime() { let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), min_balance * 100, GAS_LIMIT, None, @@ -2595,7 +2623,7 @@ fn ecdsa_recover() { // Instantiate the ecdsa_recover contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 100_000, GAS_LIMIT, None, @@ -2649,7 +2677,7 @@ fn upload_code_works() { assert!(!>::contains_key(code_hash)); assert_ok!(Contracts::upload_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)) )); @@ -2687,7 +2715,7 @@ fn upload_code_limit_too_low() { initialize_block(2); assert_noop!( - Contracts::upload_code(Origin::signed(ALICE), wasm, Some(codec::Compact(100))), + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(100))), >::StorageDepositLimitExhausted, ); @@ -2706,7 +2734,7 @@ fn upload_code_not_enough_balance() { initialize_block(2); assert_noop!( - Contracts::upload_code(Origin::signed(ALICE), wasm, Some(codec::Compact(1_000))), + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000))), >::StorageDepositNotEnoughFunds, ); @@ -2725,13 +2753,13 @@ fn remove_code_works() { initialize_block(2); assert_ok!(Contracts::upload_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)) )); assert!(>::contains_key(code_hash)); - assert_ok!(Contracts::remove_code(Origin::signed(ALICE), code_hash)); + assert_ok!(Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash)); assert!(!>::contains_key(code_hash)); assert_eq!( @@ -2779,13 +2807,13 @@ fn remove_code_wrong_origin() { initialize_block(2); assert_ok!(Contracts::upload_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000)) )); assert_noop!( - Contracts::remove_code(Origin::signed(BOB), code_hash), + Contracts::remove_code(RuntimeOrigin::signed(BOB), code_hash), sp_runtime::traits::BadOrigin, ); @@ -2818,7 +2846,7 @@ fn remove_code_in_use() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -2831,7 +2859,7 @@ fn remove_code_in_use() { initialize_block(2); assert_noop!( - Contracts::remove_code(Origin::signed(ALICE), code_hash), + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), >::CodeInUse, ); @@ -2850,7 +2878,7 @@ fn remove_code_not_found() { initialize_block(2); assert_noop!( - Contracts::remove_code(Origin::signed(ALICE), code_hash), + Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash), >::CodeNotFound, ); @@ -2870,7 +2898,7 @@ fn instantiate_with_zero_balance_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -2963,7 +2991,7 @@ fn instantiate_with_below_existential_deposit_works() { // Instantiate the BOB contract. assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 50, GAS_LIMIT, None, @@ -3061,7 +3089,7 @@ fn storage_deposit_works() { let mut deposit = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3076,7 +3104,7 @@ fn storage_deposit_works() { // Create storage assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 42, GAS_LIMIT, @@ -3090,7 +3118,7 @@ fn storage_deposit_works() { // Add more storage (but also remove some) assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -3103,7 +3131,7 @@ fn storage_deposit_works() { // Remove more storage (but also add some) assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -3210,7 +3238,7 @@ fn set_code_extrinsic() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3220,7 +3248,7 @@ fn set_code_extrinsic() { )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - assert_ok!(Contracts::upload_code(Origin::signed(ALICE), new_wasm, None,)); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), new_wasm, None,)); // Drop previous events initialize_block(2); @@ -3231,7 +3259,7 @@ fn set_code_extrinsic() { // only root can execute this extrinsic assert_noop!( - Contracts::set_code(Origin::signed(ALICE), addr.clone(), new_code_hash), + Contracts::set_code(RuntimeOrigin::signed(ALICE), addr.clone(), new_code_hash), sp_runtime::traits::BadOrigin, ); assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); @@ -3241,7 +3269,7 @@ fn set_code_extrinsic() { // contract must exist assert_noop!( - Contracts::set_code(Origin::root(), BOB, new_code_hash), + Contracts::set_code(RuntimeOrigin::root(), BOB, new_code_hash), >::ContractNotFound, ); assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); @@ -3251,7 +3279,7 @@ fn set_code_extrinsic() { // new code hash must exist assert_noop!( - Contracts::set_code(Origin::root(), addr.clone(), Default::default()), + Contracts::set_code(RuntimeOrigin::root(), addr.clone(), Default::default()), >::CodeNotFound, ); assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); @@ -3260,7 +3288,7 @@ fn set_code_extrinsic() { assert_eq!(System::events(), vec![],); // successful call - assert_ok!(Contracts::set_code(Origin::root(), addr.clone(), new_code_hash)); + assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); assert_eq!(>::get(&addr).unwrap().code_hash, new_code_hash); assert_refcount!(&code_hash, 0); assert_refcount!(&new_code_hash, 1); @@ -3287,7 +3315,7 @@ fn call_after_killed_account_needs_funding() { let min_balance = ::Currency::minimum_balance(); assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 700, GAS_LIMIT, None, @@ -3312,7 +3340,7 @@ fn call_after_killed_account_needs_funding() { // account in order to send balance there. assert_err_ignore_postinfo!( Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), min_balance - 1, GAS_LIMIT, @@ -3324,7 +3352,7 @@ fn call_after_killed_account_needs_funding() { // Sending zero should work as it does not do a transfer assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, @@ -3334,7 +3362,7 @@ fn call_after_killed_account_needs_funding() { // Sending minimum balance should work assert_ok!(Contracts::call( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), addr.clone(), min_balance, GAS_LIMIT, @@ -3416,12 +3444,12 @@ fn contract_reverted() { let input = (flags.bits(), buffer).encode(); // We just upload the code for later use - assert_ok!(Contracts::upload_code(Origin::signed(ALICE), wasm.clone(), None)); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm.clone(), None)); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( Contracts::instantiate( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3435,7 +3463,7 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 0, GAS_LIMIT, None, @@ -3483,7 +3511,14 @@ fn contract_reverted() { // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, None, input.clone()), + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + None, + input.clone() + ), >::ContractReverted, ); @@ -3503,7 +3538,7 @@ fn code_rejected_error_works() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_noop!( - Contracts::upload_code(Origin::signed(ALICE), wasm.clone(), None), + Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm.clone(), None), >::CodeRejected, ); @@ -3537,7 +3572,7 @@ fn set_code_hash() { // Instantiate the 'caller' assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), + RuntimeOrigin::signed(ALICE), 300_000, GAS_LIMIT, None, @@ -3546,7 +3581,7 @@ fn set_code_hash() { vec![], )); // upload new code - assert_ok!(Contracts::upload_code(Origin::signed(ALICE), new_wasm.clone(), None)); + assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), new_wasm.clone(), None)); System::reset_events(); diff --git a/frame/conviction-voting/src/tests.rs b/frame/conviction-voting/src/tests.rs index 7a97a46086398..7a3f80442014a 100644 --- a/frame/conviction-voting/src/tests.rs +++ b/frame/conviction-voting/src/tests.rs @@ -65,7 +65,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -269,28 +269,28 @@ fn basic_stuff() { #[test] fn basic_voting_works() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(2, 5))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(2, 5))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(Origin::signed(1), None, 3)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Voting::unlock(Origin::signed(1), class(3), 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -298,28 +298,28 @@ fn basic_voting_works() { #[test] fn voting_balance_gets_locked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(2, 5))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(2, 5))); assert_eq!(tally(3), Tally::from_parts(10, 0, 2)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(2, 5))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(2, 5))); assert_eq!(tally(3), Tally::from_parts(0, 10, 0)); assert_eq!(Balances::usable_balance(1), 8); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(5, 1))); assert_eq!(tally(3), Tally::from_parts(5, 0, 5)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(5, 1))); assert_eq!(tally(3), Tally::from_parts(0, 5, 0)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); assert_eq!(tally(3), Tally::from_parts(1, 0, 10)); - assert_ok!(Voting::vote(Origin::signed(1), 3, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, nay(10, 0))); assert_eq!(tally(3), Tally::from_parts(0, 1, 0)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(Origin::signed(1), None, 3)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), None, 3)); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Voting::unlock(Origin::signed(1), class(3), 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), class(3), 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -327,12 +327,12 @@ fn voting_balance_gets_locked() { #[test] fn successful_but_zero_conviction_vote_balance_can_be_unlocked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(1, 1))); - assert_ok!(Voting::vote(Origin::signed(2), 3, nay(20, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(1, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, nay(20, 0))); let c = class(3); Polls::set(vec![(3, Completed(3, false))].into_iter().collect()); - assert_ok!(Voting::remove_vote(Origin::signed(2), Some(c), 3)); - assert_ok!(Voting::unlock(Origin::signed(2), c, 2)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(2), Some(c), 3)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(2), c, 2)); assert_eq!(Balances::usable_balance(2), 20); }); } @@ -340,12 +340,12 @@ fn successful_but_zero_conviction_vote_balance_can_be_unlocked() { #[test] fn unsuccessful_conviction_vote_balance_can_be_unlocked() { new_test_ext().execute_with(|| { - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(1, 1))); - assert_ok!(Voting::vote(Origin::signed(2), 3, nay(20, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(1, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 3, nay(20, 0))); let c = class(3); Polls::set(vec![(3, Completed(3, false))].into_iter().collect()); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(c), 3)); - assert_ok!(Voting::unlock(Origin::signed(1), c, 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(c), 3)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), c, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -354,17 +354,17 @@ fn unsuccessful_conviction_vote_balance_can_be_unlocked() { fn successful_conviction_vote_balance_stays_locked_for_correct_time() { new_test_ext().execute_with(|| { for i in 1..=5 { - assert_ok!(Voting::vote(Origin::signed(i), 3, aye(10, i as u8))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(i), 3, aye(10, i as u8))); } let c = class(3); Polls::set(vec![(3, Completed(3, true))].into_iter().collect()); for i in 1..=5 { - assert_ok!(Voting::remove_vote(Origin::signed(i), Some(c), 3)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(i), Some(c), 3)); } for block in 1..=(3 + 5 * 3) { run_to(block); for i in 1..=5 { - assert_ok!(Voting::unlock(Origin::signed(i), c, i)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(i), c, i)); let expired = block >= (3 << (i - 1)) + 3; assert_eq!(Balances::usable_balance(i), i * 10 - if expired { 0 } else { 10 }); } @@ -385,20 +385,20 @@ fn classwise_delegation_works() { .into_iter() .collect(), ); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 5)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::vote(Origin::signed(2), 0, aye(10, 0))); - assert_ok!(Voting::vote(Origin::signed(2), 1, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(2), 2, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(3), 0, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(3), 1, aye(10, 0))); - assert_ok!(Voting::vote(Origin::signed(3), 2, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(4), 0, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(4), 1, nay(10, 0))); - assert_ok!(Voting::vote(Origin::signed(4), 2, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 0, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 1, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 2, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 0, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 1, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(3), 2, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 0, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 1, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 2, aye(10, 0))); // 4 hasn't voted yet assert_eq!( @@ -414,7 +414,7 @@ fn classwise_delegation_works() { ); // 4 votes nay to 3. - assert_ok!(Voting::vote(Origin::signed(4), 3, nay(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(4), 3, nay(10, 0))); assert_eq!( Polls::get(), vec![ @@ -428,8 +428,8 @@ fn classwise_delegation_works() { ); // Redelegate for class 2 to account 3. - assert_ok!(Voting::undelegate(Origin::signed(1), 2)); - assert_ok!(Voting::delegate(Origin::signed(1), 2, 3, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 3, Conviction::Locked1x, 5)); assert_eq!( Polls::get(), vec![ @@ -443,12 +443,12 @@ fn classwise_delegation_works() { ); // Redelegating with a lower lock does not forget previous lock and updates correctly. - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::undelegate(Origin::signed(1), 1)); - assert_ok!(Voting::undelegate(Origin::signed(1), 2)); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 3)); - assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 3)); - assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 3)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 3)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 3)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 3)); assert_eq!( Polls::get(), vec![ @@ -462,24 +462,24 @@ fn classwise_delegation_works() { ); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); // unlock does nothing since the delegation already took place. assert_eq!(Balances::usable_balance(1), 5); // Redelegating with higher amount extends previous lock. - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 6)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 6)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 4); - assert_ok!(Voting::undelegate(Origin::signed(1), 1)); - assert_ok!(Voting::delegate(Origin::signed(1), 1, 3, Conviction::Locked1x, 7)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 3, Conviction::Locked1x, 7)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); assert_eq!(Balances::usable_balance(1), 3); - assert_ok!(Voting::undelegate(Origin::signed(1), 2)); - assert_ok!(Voting::delegate(Origin::signed(1), 2, 4, Conviction::Locked1x, 8)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 4, Conviction::Locked1x, 8)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 2); assert_eq!( Polls::get(), @@ -499,14 +499,14 @@ fn classwise_delegation_works() { fn redelegation_after_vote_ending_should_keep_lock() { new_test_ext().execute_with(|| { Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::vote(Origin::signed(2), 0, aye(10, 1))); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::vote(RuntimeOrigin::signed(2), 0, aye(10, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 3, Conviction::Locked1x, 3)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 3, Conviction::Locked1x, 3)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); }); } @@ -523,9 +523,9 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); - assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 1))); - assert_ok!(Voting::vote(Origin::signed(1), 2, aye(5, 2))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(5, 2))); assert_eq!(Balances::usable_balance(1), 0); Polls::set( @@ -533,28 +533,28 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { .into_iter() .collect(), ); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 2)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 2)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -562,25 +562,25 @@ fn lock_amalgamation_valid_with_multiple_removed_votes() { #[test] fn lock_amalgamation_valid_with_multiple_delegations() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked2x, 5)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked2x, 5)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -589,32 +589,32 @@ fn lock_amalgamation_valid_with_multiple_delegations() { fn lock_amalgamation_valid_with_move_roundtrip_to_delegation() { new_test_ext().execute_with(|| { Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); Polls::set(vec![(1, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(Origin::signed(1), 1, aye(5, 2))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(5, 2))); Polls::set(vec![(1, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 1)); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -622,33 +622,33 @@ fn lock_amalgamation_valid_with_move_roundtrip_to_delegation() { #[test] fn lock_amalgamation_valid_with_move_roundtrip_to_casting() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 5); Polls::set(vec![(0, Ongoing(Tally::new(0), 0))].into_iter().collect()); - assert_ok!(Voting::vote(Origin::signed(1), 0, aye(10, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 1))); Polls::set(vec![(0, Completed(1, true))].into_iter().collect()); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked2x, 10)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked2x, 10)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert!(Balances::usable_balance(1) <= 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -656,30 +656,30 @@ fn lock_amalgamation_valid_with_move_roundtrip_to_casting() { #[test] fn lock_aggregation_over_different_classes_with_delegation_works() { new_test_ext().execute_with(|| { - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::Locked1x, 5)); - assert_ok!(Voting::delegate(Origin::signed(1), 1, 2, Conviction::Locked2x, 5)); - assert_ok!(Voting::delegate(Origin::signed(1), 2, 2, Conviction::Locked1x, 10)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::Locked1x, 5)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 1, 2, Conviction::Locked2x, 5)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 2, 2, Conviction::Locked1x, 10)); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); - assert_ok!(Voting::undelegate(Origin::signed(1), 1)); - assert_ok!(Voting::undelegate(Origin::signed(1), 2)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 1)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 2)); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -696,34 +696,34 @@ fn lock_aggregation_over_different_classes_with_casting_works() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(Origin::signed(1), 0, aye(5, 1))); - assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 1))); - assert_ok!(Voting::vote(Origin::signed(1), 2, aye(5, 2))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(5, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 1))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(5, 2))); Polls::set( vec![(0, Completed(1, true)), (1, Completed(1, true)), (2, Completed(1, true))] .into_iter() .collect(), ); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(0), 0)); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(1), 1)); - assert_ok!(Voting::remove_vote(Origin::signed(1), Some(2), 2)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 0)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(1), 1)); + assert_ok!(Voting::remove_vote(RuntimeOrigin::signed(1), Some(2), 2)); run_to(3); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 0); run_to(6); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 5); run_to(7); - assert_ok!(Voting::unlock(Origin::signed(1), 0, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 1, 1)); - assert_ok!(Voting::unlock(Origin::signed(1), 2, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 0, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 1, 1)); + assert_ok!(Voting::unlock(RuntimeOrigin::signed(1), 2, 1)); assert_eq!(Balances::usable_balance(1), 10); }); } @@ -731,21 +731,30 @@ fn lock_aggregation_over_different_classes_with_casting_works() { #[test] fn errors_with_vote_work() { new_test_ext().execute_with(|| { - assert_noop!(Voting::vote(Origin::signed(1), 0, aye(10, 0)), Error::::NotOngoing); - assert_noop!(Voting::vote(Origin::signed(1), 1, aye(10, 0)), Error::::NotOngoing); - assert_noop!(Voting::vote(Origin::signed(1), 2, aye(10, 0)), Error::::NotOngoing); assert_noop!( - Voting::vote(Origin::signed(1), 3, aye(11, 0)), + Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 0)), + Error::::NotOngoing + ); + assert_noop!( + Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 0)), + Error::::NotOngoing + ); + assert_noop!( + Voting::vote(RuntimeOrigin::signed(1), 2, aye(10, 0)), + Error::::NotOngoing + ); + assert_noop!( + Voting::vote(RuntimeOrigin::signed(1), 3, aye(11, 0)), Error::::InsufficientFunds ); - assert_ok!(Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 10)); + assert_ok!(Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 10)); assert_noop!( - Voting::vote(Origin::signed(1), 3, aye(10, 0)), + Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0)), Error::::AlreadyDelegating ); - assert_ok!(Voting::undelegate(Origin::signed(1), 0)); + assert_ok!(Voting::undelegate(RuntimeOrigin::signed(1), 0)); Polls::set( vec![ (0, Ongoing(Tally::new(0), 0)), @@ -756,11 +765,11 @@ fn errors_with_vote_work() { .into_iter() .collect(), ); - assert_ok!(Voting::vote(Origin::signed(1), 0, aye(10, 0))); - assert_ok!(Voting::vote(Origin::signed(1), 1, aye(10, 0))); - assert_ok!(Voting::vote(Origin::signed(1), 2, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 0, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 1, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 2, aye(10, 0))); assert_noop!( - Voting::vote(Origin::signed(1), 3, aye(10, 0)), + Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0)), Error::::MaxVotesReached ); }); @@ -770,21 +779,21 @@ fn errors_with_vote_work() { fn errors_with_delegating_work() { new_test_ext().execute_with(|| { assert_noop!( - Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 11), + Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 11), Error::::InsufficientFunds ); assert_noop!( - Voting::delegate(Origin::signed(1), 3, 2, Conviction::None, 10), + Voting::delegate(RuntimeOrigin::signed(1), 3, 2, Conviction::None, 10), Error::::BadClass ); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 0))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 0))); assert_noop!( - Voting::delegate(Origin::signed(1), 0, 2, Conviction::None, 10), + Voting::delegate(RuntimeOrigin::signed(1), 0, 2, Conviction::None, 10), Error::::AlreadyVoting ); - assert_noop!(Voting::undelegate(Origin::signed(1), 0), Error::::NotDelegating); + assert_noop!(Voting::undelegate(RuntimeOrigin::signed(1), 0), Error::::NotDelegating); }); } @@ -792,31 +801,37 @@ fn errors_with_delegating_work() { fn remove_other_vote_works() { new_test_ext().execute_with(|| { assert_noop!( - Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), + Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), Error::::NotVoter ); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 2))); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 2))); assert_noop!( - Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), + Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), Error::::NoPermission ); Polls::set(vec![(3, Completed(1, true))].into_iter().collect()); run_to(6); assert_noop!( - Voting::remove_other_vote(Origin::signed(2), 1, 0, 3), + Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3), Error::::NoPermissionYet ); run_to(7); - assert_ok!(Voting::remove_other_vote(Origin::signed(2), 1, 0, 3)); + assert_ok!(Voting::remove_other_vote(RuntimeOrigin::signed(2), 1, 0, 3)); }); } #[test] fn errors_with_remove_vote_work() { new_test_ext().execute_with(|| { - assert_noop!(Voting::remove_vote(Origin::signed(1), Some(0), 3), Error::::NotVoter); - assert_ok!(Voting::vote(Origin::signed(1), 3, aye(10, 2))); + assert_noop!( + Voting::remove_vote(RuntimeOrigin::signed(1), Some(0), 3), + Error::::NotVoter + ); + assert_ok!(Voting::vote(RuntimeOrigin::signed(1), 3, aye(10, 2))); Polls::set(vec![(3, Completed(1, true))].into_iter().collect()); - assert_noop!(Voting::remove_vote(Origin::signed(1), None, 3), Error::::ClassNeeded); + assert_noop!( + Voting::remove_vote(RuntimeOrigin::signed(1), None, 3), + Error::::ClassNeeded + ); }); } diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index ab4075bc82fc5..ab7ee3331e319 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -198,7 +198,7 @@ benchmarks! { let origin = T::CancellationOrigin::successful_origin(); let referendum_index = add_referendum::(0)?; assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, referendum_index) + }: _(origin, referendum_index) verify { // Referendum has been canceled assert_noop!( @@ -225,7 +225,7 @@ benchmarks! { // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, hash, Some(referendum_index)) + }: _(origin, hash, Some(referendum_index)) verify { // Referendum has been canceled assert_noop!( @@ -250,7 +250,7 @@ benchmarks! { proposal_hash, (T::BlockNumber::zero(), addresses), ); - }: _(origin, proposal_hash) + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -259,7 +259,7 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -268,7 +268,7 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -283,7 +283,7 @@ benchmarks! { let origin_fast_track = T::FastTrackOrigin::successful_origin(); let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) + }: _(origin_fast_track, proposal_hash, voting_period, delay.into()) verify { assert_eq!(Democracy::::referendum_count(), 1, "referendum not created") } @@ -306,7 +306,7 @@ benchmarks! { let origin = T::VetoOrigin::successful_origin(); ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: _(origin, proposal_hash) + }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; @@ -322,7 +322,7 @@ benchmarks! { } let cancel_origin = T::CancelProposalOrigin::successful_origin(); - }: _(cancel_origin, 0) + }: _(cancel_origin, 0) cancel_referendum { let referendum_index = add_referendum::(0)?; diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index f6696065bfc87..3c1be19103998 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -253,7 +253,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { - type Proposal: Parameter + Dispatchable + From>; + type Proposal: Parameter + + Dispatchable + + From>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Currency type for this pallet. @@ -289,25 +291,25 @@ pub mod pallet { /// Origin from which the next tabled referendum may be forced. This is a normal /// "super-majority-required" referendum. - type ExternalOrigin: EnsureOrigin; + type ExternalOrigin: EnsureOrigin; /// Origin from which the next tabled referendum may be forced; this allows for the tabling /// of a majority-carries referendum. - type ExternalMajorityOrigin: EnsureOrigin; + type ExternalMajorityOrigin: EnsureOrigin; /// Origin from which the next tabled referendum may be forced; this allows for the tabling /// of a negative-turnout-bias (default-carries) referendum. - type ExternalDefaultOrigin: EnsureOrigin; + type ExternalDefaultOrigin: EnsureOrigin; /// Origin from which the next majority-carries (or more permissive) referendum may be /// tabled to vote according to the `FastTrackVotingPeriod` asynchronously in a similar /// manner to the emergency origin. It retains its threshold method. - type FastTrackOrigin: EnsureOrigin; + type FastTrackOrigin: EnsureOrigin; /// Origin from which the next majority-carries (or more permissive) referendum may be /// tabled to vote immediately and asynchronously in a similar manner to the emergency /// origin. It retains its threshold method. - type InstantOrigin: EnsureOrigin; + type InstantOrigin: EnsureOrigin; /// Indicator for whether an emergency origin is even allowed to happen. Some chains may /// want to set this permanently to `false`, others may want to condition it on things such @@ -320,13 +322,13 @@ pub mod pallet { type FastTrackVotingPeriod: Get; /// Origin from which any referendum may be cancelled in an emergency. - type CancellationOrigin: EnsureOrigin; + type CancellationOrigin: EnsureOrigin; /// Origin from which proposals may be blacklisted. - type BlacklistOrigin: EnsureOrigin; + type BlacklistOrigin: EnsureOrigin; /// Origin from which a proposal may be cancelled and its backers slashed. - type CancelProposalOrigin: EnsureOrigin; + type CancelProposalOrigin: EnsureOrigin; /// Origin for anyone able to veto proposals. /// @@ -334,7 +336,7 @@ pub mod pallet { /// /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to /// [MAX_VETOERS](./const.MAX_VETOERS.html) - type VetoOrigin: EnsureOrigin; + type VetoOrigin: EnsureOrigin; /// Period in blocks where an external proposal may not be re-submitted after being vetoed. #[pallet::constant] @@ -345,7 +347,7 @@ pub mod pallet { type PreimageByteDeposit: Get>; /// An origin that can provide a preimage using operational extrinsics. - type OperationalPreimageOrigin: EnsureOrigin; + type OperationalPreimageOrigin: EnsureOrigin; /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index e3ed046813e23..17b35ee3c38cd 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -85,7 +85,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -111,7 +111,7 @@ parameter_types! { } impl pallet_scheduler::Config for Test { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; @@ -240,7 +240,7 @@ fn set_balance_proposal_hash(value: u64) -> H256 { fn set_balance_proposal_hash_and_note(value: u64) -> H256 { let p = set_balance_proposal(value); let h = BlakeTwo256::hash(&p[..]); - match Democracy::note_preimage(Origin::signed(6), p) { + match Democracy::note_preimage(RuntimeOrigin::signed(6), p) { Ok(_) => (), Err(x) if x == Error::::DuplicatePreimage.into() => (), Err(x) => panic!("{:?}", x), @@ -249,11 +249,11 @@ fn set_balance_proposal_hash_and_note(value: u64) -> H256 { } fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(Origin::signed(who), set_balance_proposal_hash(value), delay) + Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash(value), delay) } fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(Origin::signed(who), set_balance_proposal_hash_and_note(value), delay) + Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash_and_note(value), delay) } fn next_block() { diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index 9035e17c5c80b..b98e51aa3d4d1 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -28,8 +28,8 @@ fn cancel_referendum_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r.into())); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); @@ -51,14 +51,17 @@ fn cancel_queued_should_work() { // start of 2 => next referendum scheduled. fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); fast_forward_to(4); assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); - assert_noop!(Democracy::cancel_queued(Origin::root(), 1), Error::::ProposalMissing); - assert_ok!(Democracy::cancel_queued(Origin::root(), 0)); + assert_noop!( + Democracy::cancel_queued(RuntimeOrigin::root(), 1), + Error::::ProposalMissing + ); + assert_ok!(Democracy::cancel_queued(RuntimeOrigin::root(), 0)); assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); }); } @@ -75,8 +78,8 @@ fn emergency_cancel_should_work() { ); assert!(Democracy::referendum_status(r).is_ok()); - assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); - assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); + assert_noop!(Democracy::emergency_cancel(RuntimeOrigin::signed(3), r), BadOrigin); + assert_ok!(Democracy::emergency_cancel(RuntimeOrigin::signed(4), r)); assert!(Democracy::referendum_info(r).is_none()); // some time later... @@ -89,7 +92,7 @@ fn emergency_cancel_should_work() { ); assert!(Democracy::referendum_status(r).is_ok()); assert_noop!( - Democracy::emergency_cancel(Origin::signed(4), r), + Democracy::emergency_cancel(RuntimeOrigin::signed(4), r), Error::::AlreadyCanceled, ); }); diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 3551ca8f91123..4c5ee79286055 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -29,33 +29,33 @@ fn single_proposal_should_work_with_delegation() { fast_forward_to(2); // Delegate first vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); // Delegate a second vote. - assert_ok!(Democracy::delegate(Origin::signed(3), 1, Conviction::None, 30)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 1, Conviction::None, 30)); assert_eq!(tally(r), Tally { ayes: 6, nays: 0, turnout: 60 }); // Reduce first vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 10)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 10)); assert_eq!(tally(r), Tally { ayes: 5, nays: 0, turnout: 50 }); // Second vote delegates to first; we don't do tiered delegation, so it doesn't get used. - assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 2, Conviction::None, 30)); assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); // Main voter cancels their vote - assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); // First delegator delegates half funds with conviction; nothing changes yet. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked1x, 10)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked1x, 10)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); // Main voter reinstates their vote - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 11, nays: 0, turnout: 20 }); }); } @@ -64,7 +64,7 @@ fn single_proposal_should_work_with_delegation() { fn self_delegation_not_allowed() { new_test_ext().execute_with(|| { assert_noop!( - Democracy::delegate(Origin::signed(1), 1, Conviction::None, 10), + Democracy::delegate(RuntimeOrigin::signed(1), 1, Conviction::None, 10), Error::::Nonsense, ); }); @@ -80,14 +80,14 @@ fn cyclic_delegation_should_unwind() { fast_forward_to(2); // Check behavior with cycle. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); - assert_ok!(Democracy::delegate(Origin::signed(1), 3, Conviction::None, 10)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(3), 2, Conviction::None, 30)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(1), 3, Conviction::None, 10)); let r = 0; - assert_ok!(Democracy::undelegate(Origin::signed(3))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); - assert_ok!(Democracy::undelegate(Origin::signed(1))); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(3))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3))); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(1))); // Delegated vote is counted. assert_eq!(tally(r), Tally { ayes: 3, nays: 3, turnout: 60 }); @@ -105,13 +105,13 @@ fn single_proposal_should_work_with_vote_and_delegation() { fast_forward_to(2); let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, nay(2))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, nay(2))); assert_eq!(tally(r), Tally { ayes: 1, nays: 2, turnout: 30 }); // Delegate vote. - assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(2), r)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); // Delegated vote replaces the explicit vote. assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); }); @@ -125,12 +125,12 @@ fn single_proposal_should_work_with_undelegation() { assert_ok!(propose_set_balance_and_note(1, 2, 1)); // Delegate and undelegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::undelegate(Origin::signed(2))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(2))); fast_forward_to(2); let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); // Delegated vote is not counted. assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); @@ -143,11 +143,11 @@ fn single_proposal_should_work_with_delegation_and_vote() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate, undelegate and vote. - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - assert_ok!(Democracy::undelegate(Origin::signed(2))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(2))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(2))); // Delegated vote is not counted. assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); }); @@ -159,8 +159,8 @@ fn conviction_should_be_honored_in_delegation() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate and vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); }); @@ -171,8 +171,12 @@ fn split_vote_delegation_should_be_ignored() { // If transactor voted, delegated vote is overwritten. new_test_ext().execute_with(|| { let r = begin_referendum(); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(Origin::signed(1), r, AccountVote::Split { aye: 10, nay: 0 })); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote( + RuntimeOrigin::signed(1), + r, + AccountVote::Split { aye: 10, nay: 0 } + )); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); }); @@ -184,8 +188,8 @@ fn redelegation_keeps_lock() { new_test_ext().execute_with(|| { let r = begin_referendum(); // Delegate and vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); // Delegated vote is huge. assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); @@ -196,14 +200,14 @@ fn redelegation_keeps_lock() { assert_eq!(VotingOf::::get(2).prior(), &prior_lock); // Delegate someone else at a lower conviction and amount - assert_ok!(Democracy::delegate(Origin::signed(2), 3, Conviction::None, 10)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 3, Conviction::None, 10)); // 6x prior should appear w/ locked balance. prior_lock.accumulate(98, 20); assert_eq!(VotingOf::::get(2).prior(), &prior_lock); assert_eq!(VotingOf::::get(2).locked_balance(), 20); // Unlock shouldn't work - assert_ok!(Democracy::unlock(Origin::signed(2), 2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); assert_eq!(VotingOf::::get(2).prior(), &prior_lock); assert_eq!(VotingOf::::get(2).locked_balance(), 20); @@ -211,7 +215,7 @@ fn redelegation_keeps_lock() { // Now unlock can remove the prior lock and reduce the locked amount. assert_eq!(VotingOf::::get(2).prior(), &prior_lock); - assert_ok!(Democracy::unlock(Origin::signed(2), 2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); assert_eq!(VotingOf::::get(2).prior(), &vote::PriorLock::default()); assert_eq!(VotingOf::::get(2).locked_balance(), 10); }); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index 6ac34a38723ce..fda555b9c3459 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -24,53 +24,56 @@ fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(2), )); assert!(>::exists()); let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::veto_external(Origin::signed(3), h)); + assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. assert_noop!( - Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); fast_forward_to(1); // fails as we're still in cooloff period. assert_noop!( - Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); fast_forward_to(2); // works; as we're out of the cooloff period. assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(2), )); assert!(>::exists()); // 3 can't veto the same thing twice. - assert_noop!(Democracy::veto_external(Origin::signed(3), h), Error::::AlreadyVetoed); + assert_noop!( + Democracy::veto_external(RuntimeOrigin::signed(3), h), + Error::::AlreadyVetoed + ); // 4 vetoes. - assert_ok!(Democracy::veto_external(Origin::signed(4), h)); + assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(4), h)); // cancelled again. assert!(!>::exists()); fast_forward_to(3); // same proposal fails as we're still in cooloff assert_noop!( - Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), Error::::ProposalBlacklisted ); // different proposal works fine. assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(3), )); }); @@ -82,18 +85,21 @@ fn external_blacklisting_should_work() { System::set_block_number(0); assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(2), )); let hash = set_balance_proposal_hash(2); - assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); + assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); fast_forward_to(2); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash_and_note(2),), + Democracy::external_propose( + RuntimeOrigin::signed(2), + set_balance_proposal_hash_and_note(2), + ), Error::::ProposalBlacklisted, ); }); @@ -104,15 +110,15 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose(Origin::signed(1), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal_hash(2),), BadOrigin, ); assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(2), )); assert_noop!( - Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(1),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(1),), Error::::DuplicateProposal ); fast_forward_to(2); @@ -134,11 +140,14 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority(Origin::signed(1), set_balance_proposal_hash(2)), + Democracy::external_propose_majority( + RuntimeOrigin::signed(1), + set_balance_proposal_hash(2) + ), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), + RuntimeOrigin::signed(3), set_balance_proposal_hash_and_note(2) )); fast_forward_to(2); @@ -160,11 +169,14 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default(Origin::signed(3), set_balance_proposal_hash(2)), + Democracy::external_propose_default( + RuntimeOrigin::signed(3), + set_balance_proposal_hash(2) + ), BadOrigin, ); assert_ok!(Democracy::external_propose_default( - Origin::signed(1), + RuntimeOrigin::signed(1), set_balance_proposal_hash_and_note(2) )); fast_forward_to(2); @@ -186,7 +198,7 @@ fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(1), )); assert_ok!(propose_set_balance_and_note(6, 2, 2)); @@ -206,7 +218,7 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(3), )); @@ -240,7 +252,7 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(5), )); @@ -259,7 +271,7 @@ fn external_and_public_interleaving_works() { ); // replenish both assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(7), )); assert_ok!(propose_set_balance_and_note(6, 4, 2)); @@ -281,7 +293,7 @@ fn external_and_public_interleaving_works() { assert_ok!(propose_set_balance_and_note(6, 6, 2)); // cancel external let h = set_balance_proposal_hash_and_note(7); - assert_ok!(Democracy::veto_external(Origin::signed(3), h)); + assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); fast_forward_to(12); diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index caf83c6d46120..8fef985c8561c 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -25,15 +25,15 @@ fn fast_track_referendum_works() { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); assert_noop!( - Democracy::fast_track(Origin::signed(5), h, 3, 2), + Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), + RuntimeOrigin::signed(3), set_balance_proposal_hash_and_note(2) )); - assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); - assert_ok!(Democracy::fast_track(Origin::signed(5), h, 2, 0)); + assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); + assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { @@ -53,25 +53,25 @@ fn instant_referendum_works() { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); assert_noop!( - Democracy::fast_track(Origin::signed(5), h, 3, 2), + Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), + RuntimeOrigin::signed(3), set_balance_proposal_hash_and_note(2) )); - assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); + assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); + assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 1, 0), BadOrigin); assert_noop!( - Democracy::fast_track(Origin::signed(6), h, 1, 0), + Democracy::fast_track(RuntimeOrigin::signed(6), h, 1, 0), Error::::InstantNotAllowed ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); assert_noop!( - Democracy::fast_track(Origin::signed(6), h, 0, 0), + Democracy::fast_track(RuntimeOrigin::signed(6), h, 0, 0), Error::::VotingPeriodLow ); - assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); + assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(6), h, 1, 0)); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { @@ -102,13 +102,13 @@ fn instant_next_block_referendum_backed() { // propose with majority origin assert_ok!(Democracy::external_propose_majority( - Origin::signed(majority_origin_id), + RuntimeOrigin::signed(majority_origin_id), proposal_hash )); // fast track with instant origin and voting period pointing to the next block assert_ok!(Democracy::fast_track( - Origin::signed(instant_origin_id), + RuntimeOrigin::signed(instant_origin_id), proposal_hash, voting_period, delay @@ -145,11 +145,11 @@ fn fast_track_referendum_fails_when_no_simple_majority() { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); assert_ok!(Democracy::external_propose( - Origin::signed(2), + RuntimeOrigin::signed(2), set_balance_proposal_hash_and_note(2) )); assert_noop!( - Democracy::fast_track(Origin::signed(5), h, 3, 2), + Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::NotSimpleMajority ); }); diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index 0718734367314..de1137f03fd38 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -47,11 +47,11 @@ fn lock_voting_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::vote(Origin::signed(4), r, aye(2, 40))); - assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, aye(2, 40))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, nay(1, 50))); assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); // All balances are currently locked. @@ -62,20 +62,20 @@ fn lock_voting_should_work() { fast_forward_to(2); // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. - assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 1)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 1)); // Anyone can reap and unlock anyone else's in this context. - assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 5, r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 5)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 5, r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 5)); // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 2, r), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 2, r), Error::::NoPermission ); // However, they can be unvoted by the owner, though it will make no difference to the lock. - assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 2)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(2), r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 2)); assert_eq!(Balances::locks(1), vec![]); assert_eq!(Balances::locks(2), vec![the_lock(20)]); @@ -87,35 +87,35 @@ fn lock_voting_should_work() { fast_forward_to(7); // No change yet... assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 4, r), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 4, r), Error::::NoPermission ); - assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![the_lock(40)]); fast_forward_to(8); // 4 should now be able to reap and unlock - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 4, r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![]); fast_forward_to(13); assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 3, r), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 3, r), Error::::NoPermission ); - assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![the_lock(30)]); fast_forward_to(14); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 3, r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![]); // 2 doesn't need to reap_vote here because it was already done before. fast_forward_to(25); - assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![the_lock(20)]); fast_forward_to(26); - assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(1), 2)); assert_eq!(Balances::locks(2), vec![]); }); } @@ -130,13 +130,13 @@ fn no_locks_without_conviction_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(0, 10))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(0, 10))); fast_forward_to(2); assert_eq!(Balances::free_balance(42), 2); - assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 1, r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 1)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 1, r)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(2), 1)); assert_eq!(Balances::locks(1), vec![]); }); } @@ -150,11 +150,11 @@ fn lock_voting_should_work_with_delegation() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::delegate(Origin::signed(4), 2, Conviction::Locked2x, 40)); - assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(4), 2, Conviction::Locked2x, 40)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, nay(1, 50))); assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); @@ -173,7 +173,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { VoteThreshold::SimpleMajority, 0, ); - assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r1, aye(4, 10))); let r2 = Democracy::inject_referendum( 2, @@ -181,7 +181,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { VoteThreshold::SimpleMajority, 0, ); - assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r2, aye(3, 20))); let r3 = Democracy::inject_referendum( 2, @@ -189,7 +189,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { VoteThreshold::SimpleMajority, 0, ); - assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r3, aye(2, 50))); fast_forward_to(2); @@ -206,36 +206,36 @@ fn prior_lockvotes_should_be_enforced() { fast_forward_to(7); assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 5, r.2), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.2), Error::::NoPermission ); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); fast_forward_to(8); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(13); assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 5, r.1), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.1), Error::::NoPermission ); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(14); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.1)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(25); assert_noop!( - Democracy::remove_other_vote(Origin::signed(1), 5, r.0), + Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.0), Error::::NoPermission ); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(26); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(1), 5, r.0)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -249,27 +249,27 @@ fn single_consolidation_of_lockvotes_should_work_as_before() { // r.2 locked 50 until 2 + 2 * 3 = #8 fast_forward_to(7); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); fast_forward_to(8); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(13); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(14); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(25); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(26); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -282,20 +282,20 @@ fn multi_consolidation_of_lockvotes_should_be_conservative() { // r.1 locked 20 until 2 + 4 * 3 = #14 // r.2 locked 50 until 2 + 2 * 3 = #8 - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); fast_forward_to(8); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -310,32 +310,32 @@ fn locks_should_persist_from_voting_to_delegation() { VoteThreshold::SimpleMajority, 0, ); - assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, aye(4, 10))); fast_forward_to(2); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r)); // locked 10 until #26. - assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked3x, 20)); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(5), 1, Conviction::Locked3x, 20)); // locked 20. assert!(Balances::locks(5)[0].amount == 20); - assert_ok!(Democracy::undelegate(Origin::signed(5))); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(5))); // locked 20 until #14 fast_forward_to(13); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount == 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(25); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -344,8 +344,8 @@ fn locks_should_persist_from_voting_to_delegation() { fn locks_should_persist_from_delegation_to_voting() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked5x, 5)); - assert_ok!(Democracy::undelegate(Origin::signed(5))); + assert_ok!(Democracy::delegate(RuntimeOrigin::signed(5), 1, Conviction::Locked5x, 5)); + assert_ok!(Democracy::undelegate(RuntimeOrigin::signed(5))); // locked 5 until 16 * 3 = #48 let r = setup_three_referenda(); @@ -353,24 +353,24 @@ fn locks_should_persist_from_delegation_to_voting() { // r.1 locked 20 until 2 + 4 * 3 = #14 // r.2 locked 50 until 2 + 2 * 3 = #8 - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r.0)); fast_forward_to(8); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 20); fast_forward_to(14); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 10); fast_forward_to(26); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert!(Balances::locks(5)[0].amount >= 5); fast_forward_to(48); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 21303c8eddae3..39536eab8009b 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -28,7 +28,7 @@ fn missing_preimage_should_fail() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); next_block(); next_block(); @@ -44,9 +44,9 @@ fn preimage_deposit_should_be_required_and_returned() { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); assert_noop!( if operational { - Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) + Democracy::note_preimage_operational(RuntimeOrigin::signed(6), vec![0; 500]) } else { - Democracy::note_preimage(Origin::signed(6), vec![0; 500]) + Democracy::note_preimage(RuntimeOrigin::signed(6), vec![0; 500]) }, BalancesError::::InsufficientBalance, ); @@ -58,7 +58,7 @@ fn preimage_deposit_should_be_required_and_returned() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(Balances::reserved_balance(6), 12); @@ -76,21 +76,25 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { new_test_ext_execute_with_cond(|operational| { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); assert_ok!(if operational { - Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) } else { - Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) }); assert_eq!(Balances::reserved_balance(6), 12); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX), + Democracy::reap_preimage( + RuntimeOrigin::signed(6), + set_balance_proposal_hash(2), + u32::MAX + ), Error::::TooEarly ); next_block(); assert_ok!(Democracy::reap_preimage( - Origin::signed(6), + RuntimeOrigin::signed(6), set_balance_proposal_hash(2), u32::MAX )); @@ -104,15 +108,19 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { fn preimage_deposit_should_be_reapable() { new_test_ext_execute_with_cond(|operational| { assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Democracy::reap_preimage( + RuntimeOrigin::signed(5), + set_balance_proposal_hash(2), + u32::MAX + ), Error::::PreimageMissing ); PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); assert_ok!(if operational { - Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) } else { - Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) }); assert_eq!(Balances::reserved_balance(6), 12); @@ -120,13 +128,17 @@ fn preimage_deposit_should_be_reapable() { next_block(); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Democracy::reap_preimage( + RuntimeOrigin::signed(5), + set_balance_proposal_hash(2), + u32::MAX + ), Error::::TooEarly ); next_block(); assert_ok!(Democracy::reap_preimage( - Origin::signed(5), + RuntimeOrigin::signed(5), set_balance_proposal_hash(2), u32::MAX )); @@ -147,16 +159,16 @@ fn noting_imminent_preimage_for_free_should_work() { VoteThreshold::SuperMajorityApprove, 1, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_noop!( if operational { Democracy::note_imminent_preimage_operational( - Origin::signed(6), + RuntimeOrigin::signed(6), set_balance_proposal(2), ) } else { - Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) + Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) }, Error::::NotImminent ); @@ -164,7 +176,10 @@ fn noting_imminent_preimage_for_free_should_work() { next_block(); // Now we're in the dispatch queue it's all good. - assert_ok!(Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2))); + assert_ok!(Democracy::note_imminent_preimage( + RuntimeOrigin::signed(6), + set_balance_proposal(2) + )); next_block(); @@ -177,11 +192,11 @@ fn reaping_imminent_preimage_should_fail() { new_test_ext().execute_with(|| { let h = set_balance_proposal_hash_and_note(2); let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); next_block(); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), + Democracy::reap_preimage(RuntimeOrigin::signed(6), h, u32::MAX), Error::::Imminent ); }); @@ -198,21 +213,24 @@ fn note_imminent_preimage_can_only_be_successful_once() { VoteThreshold::SuperMajorityApprove, 1, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); next_block(); // First time works - assert_ok!(Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2))); + assert_ok!(Democracy::note_imminent_preimage( + RuntimeOrigin::signed(6), + set_balance_proposal(2) + )); // Second time fails assert_noop!( - Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)), + Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)), Error::::DuplicatePreimage ); // Fails from any user assert_noop!( - Democracy::note_imminent_preimage(Origin::signed(5), set_balance_proposal(2)), + Democracy::note_imminent_preimage(RuntimeOrigin::signed(5), set_balance_proposal(2)), Error::::DuplicatePreimage ); }); diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index 65e5466857dc1..c52533e46ccc5 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -35,10 +35,10 @@ fn backing_for_should_work() { fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -49,10 +49,10 @@ fn deposit_for_proposals_should_be_taken() { fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(Origin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -79,7 +79,7 @@ fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(2, 2, 11)); assert_noop!( - Democracy::second(Origin::signed(1), 0, u32::MAX), + Democracy::second(RuntimeOrigin::signed(1), 0, u32::MAX), BalancesError::::InsufficientBalance ); }); @@ -89,7 +89,10 @@ fn poor_seconder_should_not_work() { fn invalid_seconds_upper_bound_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_noop!(Democracy::second(Origin::signed(2), 0, 0), Error::::WrongUpperBound); + assert_noop!( + Democracy::second(RuntimeOrigin::signed(2), 0, 0), + Error::::WrongUpperBound + ); }); } @@ -98,8 +101,8 @@ fn cancel_proposal_should_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 2)); assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_noop!(Democracy::cancel_proposal(Origin::signed(1), 0), BadOrigin); - assert_ok!(Democracy::cancel_proposal(Origin::root(), 0)); + assert_noop!(Democracy::cancel_proposal(RuntimeOrigin::signed(1), 0), BadOrigin); + assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); System::assert_last_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); @@ -115,8 +118,8 @@ fn blacklisting_should_work() { assert_ok!(propose_set_balance_and_note(1, 2, 2)); assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_noop!(Democracy::blacklist(Origin::signed(1), hash, None), BadOrigin); - assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); + assert_noop!(Democracy::blacklist(RuntimeOrigin::signed(1), hash, None), BadOrigin); + assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); @@ -127,7 +130,7 @@ fn blacklisting_should_work() { let hash = set_balance_proposal_hash(4); assert_ok!(Democracy::referendum_status(0)); - assert_ok!(Democracy::blacklist(Origin::root(), hash, Some(0))); + assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, Some(0))); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); }); } @@ -140,10 +143,10 @@ fn runners_up_should_come_after() { assert_ok!(propose_set_balance_and_note(1, 4, 4)); assert_ok!(propose_set_balance_and_note(1, 3, 3)); fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); fast_forward_to(4); - assert_ok!(Democracy::vote(Origin::signed(1), 1, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 1, aye(1))); fast_forward_to(6); - assert_ok!(Democracy::vote(Origin::signed(1), 2, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 2, aye(1))); }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index d28f24d76bb5b..4b5fe8dd9c1c3 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -28,7 +28,7 @@ fn simple_passing_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); @@ -47,7 +47,7 @@ fn simple_failing_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, nay(1))); assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); next_block(); @@ -73,13 +73,13 @@ fn ooo_inject_referendums_should_work() { 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); next_block(); assert_eq!(Balances::free_balance(42), 2); - assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); next_block(); @@ -96,12 +96,12 @@ fn delayed_enactment_should_work() { VoteThreshold::SuperMajorityApprove, 1, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); - assert_ok!(Democracy::vote(Origin::signed(4), r, aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, aye(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, aye(6))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, aye(2))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, aye(3))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, aye(4))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, aye(5))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, aye(6))); assert_eq!(tally(r), Tally { ayes: 21, nays: 0, turnout: 210 }); @@ -134,10 +134,10 @@ fn lowest_unbaked_should_be_sensible() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); - assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r2, aye(1))); // r3 is canceled - assert_ok!(Democracy::cancel_referendum(Origin::root(), r3.into())); + assert_ok!(Democracy::cancel_referendum(RuntimeOrigin::root(), r3.into())); assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index d4fceaf0ee489..59d0cd6bc50ef 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -24,7 +24,7 @@ fn overvoting_should_fail() { new_test_ext().execute_with(|| { let r = begin_referendum(); assert_noop!( - Democracy::vote(Origin::signed(1), r, aye(2)), + Democracy::vote(RuntimeOrigin::signed(1), r, aye(2)), Error::::InsufficientFunds ); }); @@ -35,9 +35,12 @@ fn split_voting_should_work() { new_test_ext().execute_with(|| { let r = begin_referendum(); let v = AccountVote::Split { aye: 40, nay: 20 }; - assert_noop!(Democracy::vote(Origin::signed(5), r, v), Error::::InsufficientFunds); + assert_noop!( + Democracy::vote(RuntimeOrigin::signed(5), r, v), + Error::::InsufficientFunds + ); let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, v)); assert_eq!(tally(r), Tally { ayes: 3, nays: 2, turnout: 50 }); }); @@ -48,10 +51,10 @@ fn split_vote_cancellation_should_work() { new_test_ext().execute_with(|| { let r = begin_referendum(); let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(Origin::signed(5), r, v)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, v)); + assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(5), r)); assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_ok!(Democracy::unlock(RuntimeOrigin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![]); }); } @@ -66,7 +69,7 @@ fn single_proposal_should_work() { // start of 2 => next referendum scheduled. fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); assert_eq!(Democracy::referendum_count(), 1); assert_eq!( @@ -108,12 +111,12 @@ fn controversial_voting_should_work() { 0, ); - assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, big_nay(2))); - assert_ok!(Democracy::vote(Origin::signed(3), r, big_nay(3))); - assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, big_aye(1))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(2), r, big_nay(2))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(3), r, big_nay(3))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 110, nays: 100, turnout: 210 }); @@ -133,8 +136,8 @@ fn controversial_low_turnout_voting_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 60, nays: 50, turnout: 110 }); @@ -157,9 +160,9 @@ fn passing_low_turnout_voting_should_work() { VoteThreshold::SuperMajorityApprove, 0, ); - assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(RuntimeOrigin::signed(6), r, big_aye(6))); assert_eq!(tally(r), Tally { ayes: 100, nays: 50, turnout: 150 }); next_block(); diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 1188e3353bc96..3c9b15dabb053 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -700,7 +700,7 @@ pub mod pallet { /// Origin that can control this pallet. Note that any action taken by this origin (such) /// as providing an emergency solution is not checked. Thus, it must be a trusted origin. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The configuration of benchmarking. type BenchmarkingConfig: BenchmarkingConfig; @@ -1828,7 +1828,7 @@ mod tests { use crate::{ mock::{ multi_phase_events, raw_solution, roll_to, AccountId, ExtBuilder, MockWeightInfo, - MockedWeightInfo, MultiPhase, Origin, Runtime, SignedMaxSubmissions, System, + MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxSubmissions, System, TargetIndex, Targets, }, Phase, @@ -2030,7 +2030,10 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(crate::mock::Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit( + crate::mock::RuntimeOrigin::signed(99), + Box::new(solution) + )); } // an unexpected call to elect. @@ -2057,7 +2060,10 @@ mod tests { assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); - assert_ok!(MultiPhase::submit(crate::mock::Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit( + crate::mock::RuntimeOrigin::signed(99), + Box::new(solution) + )); roll_to(30); assert_ok!(MultiPhase::elect()); @@ -2098,7 +2104,7 @@ mod tests { // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); assert_ok!(MultiPhase::submit_unsigned( - crate::mock::Origin::none(), + crate::mock::RuntimeOrigin::none(), Box::new(solution), witness )); @@ -2176,12 +2182,12 @@ mod tests { // no single account can trigger this assert_noop!( - MultiPhase::governance_fallback(Origin::signed(99), None, None), + MultiPhase::governance_fallback(RuntimeOrigin::signed(99), None, None), DispatchError::BadOrigin ); // only root can - assert_ok!(MultiPhase::governance_fallback(Origin::root(), None, None)); + assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root(), None, None)); // something is queued now assert!(MultiPhase::queued_solution().is_some()); // next election call with fix everything.; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index efe9d8ea587cb..d7affc14564f5 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -199,7 +199,7 @@ pub fn witness() -> SolutionOrSnapshotSize { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index b9abfdfba14fb..1cf071e6796f1 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -529,7 +529,7 @@ mod tests { use crate::{ mock::{ balances, raw_solution, roll_to, Balances, ExtBuilder, MockedWeightInfo, MultiPhase, - Origin, Runtime, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, + Runtime, RuntimeOrigin, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, }, Error, Perbill, Phase, }; @@ -546,7 +546,7 @@ mod tests { let solution = raw_solution(); assert_noop!( - MultiPhase::submit(Origin::signed(10), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(10), Box::new(solution)), Error::::PreDispatchEarlySubmission, ); }) @@ -561,7 +561,7 @@ mod tests { let solution = raw_solution(); assert_eq!(balances(&99), (100, 0)); - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); @@ -577,7 +577,7 @@ mod tests { let solution = raw_solution(); assert_eq!(balances(&99), (100, 0)); - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert!(MultiPhase::finalize_signed_phase()); @@ -597,7 +597,7 @@ mod tests { // make the solution invalid. solution.score.minimal_stake += 1; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); // no good solution was stored. @@ -618,11 +618,11 @@ mod tests { assert_eq!(balances(&999), (100, 0)); // submit as correct. - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution.clone()))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution.clone()))); // make the solution invalid and weaker. solution.score.minimal_stake -= 1; - assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); assert_eq!(balances(&99), (95, 5)); assert_eq!(balances(&999), (95, 5)); @@ -648,7 +648,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } // weaker. @@ -658,7 +658,7 @@ mod tests { }; assert_noop!( - MultiPhase::submit(Origin::signed(99), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); }) @@ -679,7 +679,7 @@ mod tests { let mut solution = raw_solution(); solution.score.minimal_stake -= s as u128; - assert_ok!(MultiPhase::submit(Origin::signed(account), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); assert_eq!(balances(&account), (95, 5)); } @@ -719,7 +719,7 @@ mod tests { }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); // This is 10% better, so does not meet the 20% threshold and is therefore rejected. solution = RawSolution { @@ -732,7 +732,7 @@ mod tests { }; assert_noop!( - MultiPhase::submit(Origin::signed(99), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); @@ -746,7 +746,7 @@ mod tests { ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); }) } @@ -764,7 +764,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(account), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(account), Box::new(solution))); assert_eq!(balances(&account), (95, 5)); } @@ -781,7 +781,7 @@ mod tests { score: ElectionScore { minimal_stake: 20, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); // the one with score 5 was rejected, the new one inserted. assert_eq!( @@ -809,14 +809,14 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } let solution = RawSolution { score: ElectionScore { minimal_stake: 4, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); assert_eq!( MultiPhase::signed_submissions() @@ -831,7 +831,7 @@ mod tests { score: ElectionScore { minimal_stake: 5, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); // the one with score 5 was rejected, the new one inserted. assert_eq!( @@ -856,7 +856,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } assert_eq!(balances(&99).1, 2 * 5); @@ -867,7 +867,7 @@ mod tests { score: ElectionScore { minimal_stake: 20, ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); // got one bond back. assert_eq!(balances(&99).1, 2 * 4); @@ -886,7 +886,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + i).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } assert_eq!( MultiPhase::signed_submissions() @@ -902,7 +902,7 @@ mod tests { ..Default::default() }; assert_noop!( - MultiPhase::submit(Origin::signed(99), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution)), Error::::SignedQueueFull, ); }) @@ -924,18 +924,18 @@ mod tests { let solution = raw_solution(); // submit a correct one. - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution.clone()))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution.clone()))); // make the solution invalidly better and submit. This ought to be slashed. let mut solution_999 = solution.clone(); solution_999.score.minimal_stake += 1; - assert_ok!(MultiPhase::submit(Origin::signed(999), Box::new(solution_999))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution_999))); // make the solution invalidly worse and submit. This ought to be suppressed and // returned. let mut solution_9999 = solution.clone(); solution_9999.score.minimal_stake -= 1; - assert_ok!(MultiPhase::submit(Origin::signed(9999), Box::new(solution_9999))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(9999), Box::new(solution_9999))); assert_eq!( MultiPhase::signed_submissions().iter().map(|x| x.who).collect::>(), @@ -975,14 +975,14 @@ mod tests { assert_eq!(raw.solution.voter_count(), 5); assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(raw.clone()))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw.clone()))); ::set(Weight::from_ref_time(30)); // note: resubmitting the same solution is technically okay as long as the queue has // space. assert_noop!( - MultiPhase::submit(Origin::signed(99), Box::new(raw)), + MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw)), Error::::SignedTooMuchWeight, ); }) @@ -998,7 +998,7 @@ mod tests { assert_eq!(balances(&123), (0, 0)); assert_noop!( - MultiPhase::submit(Origin::signed(123), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(123), Box::new(solution)), Error::::SignedCannotPayDeposit, ); @@ -1020,7 +1020,7 @@ mod tests { score: ElectionScore { minimal_stake: (5 + s).into(), ..Default::default() }, ..Default::default() }; - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } // this solution has a higher score than any in the queue @@ -1034,7 +1034,7 @@ mod tests { assert_eq!(balances(&123), (0, 0)); assert_noop!( - MultiPhase::submit(Origin::signed(123), Box::new(solution)), + MultiPhase::submit(RuntimeOrigin::signed(123), Box::new(solution)), Error::::SignedCannotPayDeposit, ); @@ -1063,7 +1063,7 @@ mod tests { let solution = raw_solution(); // submit a correct one. - assert_ok!(MultiPhase::submit(Origin::signed(99), Box::new(solution))); + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); // _some_ good solution was stored. assert!(MultiPhase::finalize_signed_phase()); diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 7dadd320abdf8..cf8df237bafb0 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -856,7 +856,7 @@ mod tests { use crate::{ mock::{ roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Origin, Runtime, RuntimeCall as OuterCall, System, + MinerMaxWeight, MultiPhase, Runtime, RuntimeCall as OuterCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, UnsignedPhase, }, CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, @@ -1071,7 +1071,7 @@ mod tests { witness: witness(), }; let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(Origin::none()); + let _ = outer_call.dispatch(RuntimeOrigin::none()); }) } @@ -1097,7 +1097,7 @@ mod tests { witness: correct_witness, }; let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(Origin::none()); + let _ = outer_call.dispatch(RuntimeOrigin::none()); }) } @@ -1116,7 +1116,11 @@ mod tests { // ensure this solution is valid. assert!(MultiPhase::queued_solution().is_none()); - assert_ok!(MultiPhase::submit_unsigned(Origin::none(), Box::new(solution), witness)); + assert_ok!(MultiPhase::submit_unsigned( + RuntimeOrigin::none(), + Box::new(solution), + witness + )); assert!(MultiPhase::queued_solution().is_some()); }) } @@ -1215,7 +1219,7 @@ mod tests { let solution = RawSolution { solution: raw, score, round: MultiPhase::round() }; assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); assert_ok!(MultiPhase::submit_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(solution), witness )); @@ -1276,7 +1280,7 @@ mod tests { // and it is fine assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); assert_ok!(MultiPhase::submit_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(solution), witness )); diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 05bbd11e1ae9d..e477499f3c927 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -211,7 +211,7 @@ mod tests { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountId; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 18210513f8ab6..0616087d975e8 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1182,7 +1182,7 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -1486,11 +1486,11 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: Origin) -> sp_runtime::DispatchResult { + fn submit_candidacy(origin: RuntimeOrigin) -> sp_runtime::DispatchResult { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } - fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { + fn vote(origin: RuntimeOrigin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { // historical note: helper function was created in a period of time in which the API of vote // call was changing. Currently it is a wrapper for the original call and does not do much. // Nonetheless, totally harmless. @@ -1572,8 +1572,8 @@ mod tests { Voter { stake: 20u64, votes: vec![2], deposit: 0 } ); - assert_ok!(Elections::remove_voter(Origin::signed(1))); - assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(1))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert_eq!(Elections::voting(1), Default::default()); assert_eq!(Elections::voting(2), Default::default()); @@ -1666,7 +1666,7 @@ mod tests { assert!(Elections::is_candidate(&2).is_err()); assert_eq!(balances(&1), (10, 0)); - assert_ok!(submit_candidacy(Origin::signed(1))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); assert_eq!(balances(&1), (7, 3)); assert_eq!(candidate_ids(), vec![1]); @@ -1675,7 +1675,7 @@ mod tests { assert!(Elections::is_candidate(&2).is_err()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_eq!(balances(&2), (17, 3)); assert_eq!(candidate_ids(), vec![1, 2]); @@ -1692,15 +1692,15 @@ mod tests { #[test] fn updating_candidacy_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); assert_eq!(Elections::candidates(), vec![(5, 3)]); // a runtime upgrade changes the bond. CANDIDACY_BOND.with(|v| *v.borrow_mut() = 4); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); assert_eq!(Elections::candidates(), vec![(4, 4), (5, 3)]); // once elected, they each hold their candidacy bond, no more. @@ -1724,13 +1724,13 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_eq!(candidate_ids(), vec![3]); - assert_ok!(submit_candidacy(Origin::signed(1))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); assert_eq!(candidate_ids(), vec![1, 3]); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_eq!(candidate_ids(), vec![1, 2, 3]); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); assert_eq!(candidate_ids(), vec![1, 2, 3, 4]); }); } @@ -1739,9 +1739,12 @@ mod tests { fn dupe_candidate_submission_should_not_work() { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); - assert_ok!(submit_candidacy(Origin::signed(1))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); assert_eq!(candidate_ids(), vec![1]); - assert_noop!(submit_candidacy(Origin::signed(1)), Error::::DuplicatedCandidate); + assert_noop!( + submit_candidacy(RuntimeOrigin::signed(1)), + Error::::DuplicatedCandidate + ); }); } @@ -1749,8 +1752,8 @@ mod tests { fn member_candidacy_submission_should_not_work() { // critically important to make sure that outgoing candidates and losers are not mixed up. ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1759,19 +1762,19 @@ mod tests { assert!(Elections::runners_up().is_empty()); assert!(candidate_ids().is_empty()); - assert_noop!(submit_candidacy(Origin::signed(5)), Error::::MemberSubmit); + assert_noop!(submit_candidacy(RuntimeOrigin::signed(5)), Error::::MemberSubmit); }); } #[test] fn runner_candidate_submission_should_not_work() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(2), vec![5, 4], 20)); - assert_ok!(vote(Origin::signed(1), vec![3], 10)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 20)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![3], 10)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1779,7 +1782,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![3]); - assert_noop!(submit_candidacy(Origin::signed(3)), Error::::RunnerUpSubmit); + assert_noop!(submit_candidacy(RuntimeOrigin::signed(3)), Error::::RunnerUpSubmit); }); } @@ -1788,7 +1791,7 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(candidate_ids(), Vec::::new()); assert_noop!( - submit_candidacy(Origin::signed(7)), + submit_candidacy(RuntimeOrigin::signed(7)), Error::::InsufficientCandidateFunds, ); }); @@ -1800,8 +1803,8 @@ mod tests { assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 18); @@ -1814,8 +1817,8 @@ mod tests { assert_eq!(candidate_ids(), Vec::::new()); assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(2), vec![5], 12)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 12)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 12); @@ -1827,9 +1830,9 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); // User only locks up to their free balance. assert_eq!(balances(&2), (18, 2)); @@ -1837,7 +1840,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 18); // can update; different stake; different lock and reserve. - assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 15); assert_eq!(locked_stake_of(&2), 15); @@ -1847,10 +1850,10 @@ mod tests { #[test] fn updated_voting_bond_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); assert_eq!(balances(&2), (20, 0)); - assert_ok!(vote(Origin::signed(2), vec![5], 5)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 5)); assert_eq!(balances(&2), (18, 2)); assert_eq!(voter_deposit(&2), 2); @@ -1860,11 +1863,11 @@ mod tests { // proof that bond changed. assert_eq!(balances(&1), (10, 0)); - assert_ok!(vote(Origin::signed(1), vec![5], 5)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![5], 5)); assert_eq!(balances(&1), (9, 1)); assert_eq!(voter_deposit(&1), 1); - assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert_eq!(balances(&2), (20, 0)); }) } @@ -1874,11 +1877,11 @@ mod tests { ExtBuilder::default().voter_bond_factor(1).build_and_execute(|| { assert_eq!(balances(&2), (20, 0)); - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); // initial vote. - assert_ok!(vote(Origin::signed(2), vec![5], 10)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 10)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); @@ -1887,7 +1890,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 10); // can update; different stake; different lock and reserve. - assert_ok!(vote(Origin::signed(2), vec![5, 4], 15)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 4], 15)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); assert_eq!(Elections::voting(&2).deposit, 4); @@ -1895,7 +1898,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 15); // stay at two votes with different stake. - assert_ok!(vote(Origin::signed(2), vec![5, 3], 18)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5, 3], 18)); // 2 + 2 assert_eq!(balances(&2), (16, 4)); assert_eq!(Elections::voting(&2).deposit, 4); @@ -1903,7 +1906,7 @@ mod tests { assert_eq!(locked_stake_of(&2), 16); // back to 1 vote. - assert_ok!(vote(Origin::signed(2), vec![4], 12)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 12)); // 2 + 1 assert_eq!(balances(&2), (17, 3)); assert_eq!(Elections::voting(&2).deposit, 3); @@ -1915,17 +1918,17 @@ mod tests { #[test] fn cannot_vote_for_no_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_noop!(vote(Origin::signed(2), vec![], 20), Error::::NoVotes); + assert_noop!(vote(RuntimeOrigin::signed(2), vec![], 20), Error::::NoVotes); }); } #[test] fn can_vote_for_old_members_even_when_no_new_candidates() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(Origin::signed(2), vec![4, 5], 20)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4, 5], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1933,22 +1936,22 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(candidate_ids().is_empty()); - assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![4, 5], 10)); }); } #[test] fn prime_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1956,7 +1959,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(candidate_ids().is_empty()); - assert_ok!(vote(Origin::signed(3), vec![4, 5], 10)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![4, 5], 10)); assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); }); } @@ -1964,17 +1967,20 @@ mod tests { #[test] fn prime_votes_for_exiting_members_are_removed() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![4, 3], 10)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(4), + Renouncing::Candidate(3) + )); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -1989,18 +1995,18 @@ mod tests { #[test] fn prime_is_kept_if_other_members_leave() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); assert_eq!(members_ids(), vec![5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); @@ -2010,18 +2016,18 @@ mod tests { #[test] fn prime_is_gone_if_renouncing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Member)); assert_eq!(members_ids(), vec![4]); assert_eq!(PRIME.with(|p| *p.borrow()), None); @@ -2035,29 +2041,29 @@ mod tests { .balance_factor(10) .build_and_execute(|| { // when we have only candidates - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_noop!( // content of the vote is irrelevant. - vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), + vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9999], 5), Error::::TooManyVotes, ); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); // now we have 2 members, 1 runner-up, and 1 new candidate - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9999], 5)); assert_noop!( - vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), + vote(RuntimeOrigin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), Error::::TooManyVotes, ); }); @@ -2066,23 +2072,23 @@ mod tests { #[test] fn cannot_vote_for_less_than_ed() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_noop!(vote(Origin::signed(2), vec![4], 1), Error::::LowBalance); + assert_noop!(vote(RuntimeOrigin::signed(2), vec![4], 1), Error::::LowBalance); }) } #[test] fn can_vote_for_more_than_free_balance_but_moot() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); // User has 100 free and 50 reserved. - assert_ok!(Balances::set_balance(Origin::root(), 2, 100, 50)); + assert_ok!(Balances::set_balance(RuntimeOrigin::root(), 2, 100, 50)); // User tries to vote with 150 tokens. - assert_ok!(vote(Origin::signed(2), vec![4, 5], 150)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4, 5], 150)); // We truncate to only their free balance, after reserving additional for voting. assert_eq!(locked_stake_of(&2), 98); assert_eq!(has_lock(&2), 98); @@ -2092,10 +2098,10 @@ mod tests { #[test] fn remove_voter_should_work() { ExtBuilder::default().voter_bond(8).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); - assert_ok!(vote(Origin::signed(3), vec![5], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![5], 30)); assert_eq_uvec!(all_voters(), vec![2, 3]); assert_eq!(balances(&2), (12, 8)); @@ -2105,7 +2111,7 @@ mod tests { assert_eq!(votes_of(&2), vec![5]); assert_eq!(votes_of(&3), vec![5]); - assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert_eq_uvec!(all_voters(), vec![3]); assert!(votes_of(&2).is_empty()); @@ -2119,35 +2125,41 @@ mod tests { #[test] fn non_voter_remove_should_not_work() { ExtBuilder::default().build_and_execute(|| { - assert_noop!(Elections::remove_voter(Origin::signed(3)), Error::::MustBeVoter); + assert_noop!( + Elections::remove_voter(RuntimeOrigin::signed(3)), + Error::::MustBeVoter + ); }); } #[test] fn dupe_remove_should_fail() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); - assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); assert!(all_voters().is_empty()); - assert_noop!(Elections::remove_voter(Origin::signed(2)), Error::::MustBeVoter); + assert_noop!( + Elections::remove_voter(RuntimeOrigin::signed(2)), + Error::::MustBeVoter + ); }); } #[test] fn removed_voter_should_not_be_counted() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_voter(Origin::signed(4))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2159,13 +2171,13 @@ mod tests { #[test] fn simple_voting_rounds_should_work() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); - assert_ok!(vote(Origin::signed(4), vec![4], 15)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 15)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); assert_eq_uvec!(all_voters(), vec![2, 3, 4]); @@ -2209,11 +2221,11 @@ mod tests { #[test] fn all_outgoing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2225,8 +2237,8 @@ mod tests { assert_eq!(members_and_stake(), vec![(4, 35), (5, 45)]); assert_eq!(runners_up_and_stake(), vec![]); - assert_ok!(Elections::remove_voter(Origin::signed(5))); - assert_ok!(Elections::remove_voter(Origin::signed(4))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2244,11 +2256,11 @@ mod tests { #[test] fn defunct_voter_will_be_counted() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); // This guy's vote is pointless for this round. - assert_ok!(vote(Origin::signed(3), vec![4], 30)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![4], 30)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2257,7 +2269,7 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); // but now it has a valid target. - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2272,15 +2284,15 @@ mod tests { #[test] fn only_desired_seats_are_chosen() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2293,8 +2305,8 @@ mod tests { #[test] fn phragmen_should_not_self_vote() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2312,15 +2324,15 @@ mod tests { #[test] fn runners_up_should_be_kept() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![3], 20)); - assert_ok!(vote(Origin::signed(3), vec![2], 30)); - assert_ok!(vote(Origin::signed(4), vec![5], 40)); - assert_ok!(vote(Origin::signed(5), vec![4], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![2], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2339,22 +2351,22 @@ mod tests { #[test] fn runners_up_should_be_next_candidates() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_and_stake(), vec![(4, 35), (5, 45)]); assert_eq!(runners_up_and_stake(), vec![(2, 15), (3, 25)]); - assert_ok!(vote(Origin::signed(5), vec![5], 10)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2367,13 +2379,13 @@ mod tests { #[test] fn runners_up_lose_bond_once_outgoing() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2381,8 +2393,8 @@ mod tests { assert_eq!(runners_up_ids(), vec![2]); assert_eq!(balances(&2), (15, 5)); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2397,17 +2409,17 @@ mod tests { ExtBuilder::default().build_and_execute(|| { assert_eq!(balances(&5), (50, 0)); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); assert_eq!(balances(&5), (47, 3)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); assert_eq!(balances(&5), (45, 5)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![5]); - assert_ok!(Elections::remove_voter(Origin::signed(5))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); assert_eq!(balances(&5), (47, 3)); System::set_block_number(10); @@ -2421,10 +2433,10 @@ mod tests { #[test] fn candidates_lose_the_bond_when_outgoing() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(4), vec![5], 40)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); assert_eq!(balances(&5), (47, 3)); assert_eq!(balances(&3), (27, 3)); @@ -2444,11 +2456,11 @@ mod tests { #[test] fn current_members_are_always_next_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2456,13 +2468,13 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(Elections::election_rounds(), 1); - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_voter(Origin::signed(4))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); // 5 will persist as candidates despite not being in the list. assert_eq!(candidate_ids(), vec![2, 3]); @@ -2480,15 +2492,15 @@ mod tests { // what I mean by uninterrupted: // given no input or stimulants the same members are re-elected. ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); let check_at_block = |b: u32| { System::set_block_number(b.into()); @@ -2513,11 +2525,11 @@ mod tests { #[test] fn remove_members_triggers_election() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2525,10 +2537,10 @@ mod tests { assert_eq!(Elections::election_rounds(), 1); // a new candidate - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); - assert_ok!(Elections::remove_member(Origin::root(), 4, true, true)); + assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 4, true, true)); assert_eq!(balances(&4), (35, 2)); // slashed assert_eq!(Elections::election_rounds(), 2); // new election round @@ -2539,14 +2551,14 @@ mod tests { #[test] fn seats_should_be_released_when_no_vote() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(2), vec![3], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); assert_eq!(>::decode_len().unwrap(), 3); @@ -2557,10 +2569,10 @@ mod tests { assert_eq!(members_ids(), vec![3, 5]); assert_eq!(Elections::election_rounds(), 1); - assert_ok!(Elections::remove_voter(Origin::signed(2))); - assert_ok!(Elections::remove_voter(Origin::signed(3))); - assert_ok!(Elections::remove_voter(Origin::signed(4))); - assert_ok!(Elections::remove_voter(Origin::signed(5))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(2))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(3))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(4))); + assert_ok!(Elections::remove_voter(RuntimeOrigin::signed(5))); // meanwhile, no one cares to become a candidate again. System::set_block_number(10); @@ -2573,29 +2585,29 @@ mod tests { #[test] fn incoming_outgoing_are_reported() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![4, 5]); - assert_ok!(submit_candidacy(Origin::signed(1))); - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); // 5 will change their vote and becomes an `outgoing` - assert_ok!(vote(Origin::signed(5), vec![4], 8)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 8)); // 4 will stay in the set - assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); // 3 will become a winner - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); // these two are losers. - assert_ok!(vote(Origin::signed(2), vec![2], 20)); - assert_ok!(vote(Origin::signed(1), vec![1], 10)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![1], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2620,12 +2632,12 @@ mod tests { #[test] fn invalid_votes_are_moot() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![10], 50)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![10], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2638,15 +2650,15 @@ mod tests { #[test] fn members_are_sorted_based_on_id_runners_on_merit() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![3], 20)); - assert_ok!(vote(Origin::signed(3), vec![2], 30)); - assert_ok!(vote(Origin::signed(4), vec![5], 40)); - assert_ok!(vote(Origin::signed(5), vec![4], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![3], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![2], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2660,19 +2672,19 @@ mod tests { #[test] fn runner_up_replacement_maintains_members_order() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![2], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![2], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::root(), 2, true, false)); + assert_ok!(Elections::remove_member(RuntimeOrigin::root(), 2, true, false)); assert_eq!(members_ids(), vec![4, 5]); }); } @@ -2680,15 +2692,15 @@ mod tests { #[test] fn can_renounce_candidacy_member_with_runners_bond_is_refunded() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2696,7 +2708,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. assert_eq!(members_ids(), vec![3, 5]); @@ -2707,11 +2719,11 @@ mod tests { #[test] fn can_renounce_candidacy_member_without_runners_bond_is_refunded() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2719,7 +2731,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert!(runners_up_ids().is_empty()); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); + assert_ok!(Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Member)); assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. // no replacement @@ -2731,15 +2743,15 @@ mod tests { #[test] fn can_renounce_candidacy_runner_up() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(5), vec![4], 50)); - assert_ok!(vote(Origin::signed(4), vec![5], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![4], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![5], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2747,7 +2759,10 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(3), + Renouncing::RunnerUp + )); assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. assert_eq!(members_ids(), vec![4, 5]); @@ -2758,22 +2773,25 @@ mod tests { #[test] fn runner_up_replacement_works_when_out_of_order() { ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![5], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![2], 50)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![5], 20)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![2], 50)); System::set_block_number(5); Elections::on_initialize(System::block_number()); assert_eq!(members_ids(), vec![2, 4]); assert_eq!(runners_up_ids(), vec![5, 3]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(3), + Renouncing::RunnerUp + )); assert_eq!(members_ids(), vec![2, 4]); assert_eq!(runners_up_ids(), vec![5]); }); @@ -2782,11 +2800,14 @@ mod tests { #[test] fn can_renounce_candidacy_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); assert_eq!(balances(&5), (47, 3)); assert_eq!(candidate_ids(), vec![5]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(1))); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(5), + Renouncing::Candidate(1) + )); assert_eq!(balances(&5), (50, 0)); assert!(candidate_ids().is_empty()); }) @@ -2796,15 +2817,15 @@ mod tests { fn wrong_renounce_candidacy_should_fail() { ExtBuilder::default().build_and_execute(|| { assert_noop!( - Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(0)), + Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Candidate(0)), Error::::InvalidRenouncing, ); assert_noop!( - Elections::renounce_candidacy(Origin::signed(5), Renouncing::Member), + Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::Member), Error::::InvalidRenouncing, ); assert_noop!( - Elections::renounce_candidacy(Origin::signed(5), Renouncing::RunnerUp), + Elections::renounce_candidacy(RuntimeOrigin::signed(5), Renouncing::RunnerUp), Error::::InvalidRenouncing, ); }) @@ -2813,13 +2834,13 @@ mod tests { #[test] fn non_member_renounce_member_should_fail() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2828,7 +2849,7 @@ mod tests { assert_eq!(runners_up_ids(), vec![3]); assert_noop!( - Elections::renounce_candidacy(Origin::signed(3), Renouncing::Member), + Elections::renounce_candidacy(RuntimeOrigin::signed(3), Renouncing::Member), Error::::InvalidRenouncing, ); }) @@ -2837,13 +2858,13 @@ mod tests { #[test] fn non_runner_up_renounce_runner_up_should_fail() { ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2852,7 +2873,7 @@ mod tests { assert_eq!(runners_up_ids(), vec![3]); assert_noop!( - Elections::renounce_candidacy(Origin::signed(4), Renouncing::RunnerUp), + Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::RunnerUp), Error::::InvalidRenouncing, ); }) @@ -2861,27 +2882,33 @@ mod tests { #[test] fn wrong_candidate_count_renounce_should_fail() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); assert_noop!( - Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2)), + Elections::renounce_candidacy(RuntimeOrigin::signed(4), Renouncing::Candidate(2)), Error::::InvalidWitnessData, ); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(3))); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(4), + Renouncing::Candidate(3) + )); }) } #[test] fn renounce_candidacy_count_can_overestimate() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); // while we have only 3 candidates. - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(4))); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(4), + Renouncing::Candidate(4) + )); }) } @@ -2891,13 +2918,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 5)); - assert_ok!(vote(Origin::signed(3), vec![3], 15)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 5)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 15)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2905,8 +2932,8 @@ mod tests { assert_eq!(members_ids(), vec![5]); assert_eq!(runners_up_ids(), vec![4, 3]); - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 10)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 10)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2927,13 +2954,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2946,8 +2973,8 @@ mod tests { assert_eq!(balances(&2), (15, 5)); // this guy will shift everyone down. - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -2970,13 +2997,13 @@ mod tests { .desired_runners_up(2) .desired_members(1) .build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -2989,8 +3016,8 @@ mod tests { assert_eq!(balances(&2), (15, 5)); // swap some votes. - assert_ok!(vote(Origin::signed(4), vec![2], 40)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![2], 40)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![4], 20)); System::set_block_number(10); Elections::on_initialize(System::block_number()); @@ -3010,13 +3037,13 @@ mod tests { #[test] fn remove_and_replace_member_works() { let setup = || { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5], 50)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3037,7 +3064,10 @@ mod tests { // member removed, no replacement found. ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { setup(); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(3), + Renouncing::RunnerUp + )); assert_eq!(Elections::remove_and_replace_member(&4, false), Ok(false)); assert_eq!(members_ids(), vec![5]); @@ -3060,15 +3090,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3086,15 +3116,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3112,15 +3142,15 @@ mod tests { .build_and_execute(|| { assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 40)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2], 20)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3135,17 +3165,17 @@ mod tests { #[test] fn dupe_vote_is_moot() { ExtBuilder::default().desired_members(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(submit_candidacy(Origin::signed(1))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(2))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(1))); // all these duplicate votes will not cause 2 to win. - assert_ok!(vote(Origin::signed(1), vec![2, 2, 2, 2], 5)); - assert_ok!(vote(Origin::signed(2), vec![2, 2, 2, 2], 20)); + assert_ok!(vote(RuntimeOrigin::signed(1), vec![2, 2, 2, 2], 5)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![2, 2, 2, 2], 20)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 30)); System::set_block_number(5); Elections::on_initialize(System::block_number()); @@ -3157,24 +3187,33 @@ mod tests { #[test] fn remove_defunct_voter_works() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(5))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(4))); + assert_ok!(submit_candidacy(RuntimeOrigin::signed(3))); // defunct - assert_ok!(vote(Origin::signed(5), vec![5, 4], 5)); + assert_ok!(vote(RuntimeOrigin::signed(5), vec![5, 4], 5)); // defunct - assert_ok!(vote(Origin::signed(4), vec![4], 5)); + assert_ok!(vote(RuntimeOrigin::signed(4), vec![4], 5)); // ok - assert_ok!(vote(Origin::signed(3), vec![3], 5)); + assert_ok!(vote(RuntimeOrigin::signed(3), vec![3], 5)); // ok - assert_ok!(vote(Origin::signed(2), vec![3, 4], 5)); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(5), Renouncing::Candidate(3))); - assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Candidate(2))); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::Candidate(1))); - - assert_ok!(Elections::clean_defunct_voters(Origin::root(), 4, 2)); + assert_ok!(vote(RuntimeOrigin::signed(2), vec![3, 4], 5)); + + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(5), + Renouncing::Candidate(3) + )); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(4), + Renouncing::Candidate(2) + )); + assert_ok!(Elections::renounce_candidacy( + RuntimeOrigin::signed(3), + Renouncing::Candidate(1) + )); + + assert_ok!(Elections::clean_defunct_voters(RuntimeOrigin::root(), 4, 2)); }) } } diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index caa2d825262db..f754348782cec 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -655,7 +655,7 @@ pub mod pallet { impl Pallet { // Add public immutables and private mutables. #[allow(dead_code)] - fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { + fn accumulate_foo(origin: T::RuntimeOrigin, increase_by: T::Balance) -> DispatchResult { let _sender = ensure_signed(origin)?; let prev = >::get(); diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index 793cb4287f505..db4787eaa0faa 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -60,7 +60,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -128,12 +128,12 @@ fn it_works_for_optional_value() { assert_eq!(Example::dummy(), Some(val1)); // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), val2)); + assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val2)); assert_eq!(Example::dummy(), Some(val1 + val2)); // Check that accumulate works when we Dummy has None in it. >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), val1)); + assert_ok!(Example::accumulate_dummy(RuntimeOrigin::signed(1), val1)); assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); }); } @@ -142,7 +142,7 @@ fn it_works_for_optional_value() { fn it_works_for_default_value() { new_test_ext().execute_with(|| { assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_ok!(Example::accumulate_foo(RuntimeOrigin::signed(1), 1)); assert_eq!(Example::foo(), 25); }); } @@ -151,7 +151,7 @@ fn it_works_for_default_value() { fn set_dummy_works() { new_test_ext().execute_with(|| { let test_val = 133; - assert_ok!(Example::set_dummy(Origin::root(), test_val.into())); + assert_ok!(Example::set_dummy(RuntimeOrigin::root(), test_val.into())); assert_eq!(Example::dummy(), Some(test_val)); }); } diff --git a/frame/examples/offchain-worker/src/tests.rs b/frame/examples/offchain-worker/src/tests.rs index 2c189935ac19d..72c001fd6e6cc 100644 --- a/frame/examples/offchain-worker/src/tests.rs +++ b/frame/examples/offchain-worker/src/tests.rs @@ -60,7 +60,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; @@ -134,10 +134,10 @@ fn it_aggregates_the_price() { sp_io::TestExternalities::default().execute_with(|| { assert_eq!(Example::average_price(), None); - assert_ok!(Example::submit_price(Origin::signed(test_pub()), 27)); + assert_ok!(Example::submit_price(RuntimeOrigin::signed(test_pub()), 27)); assert_eq!(Example::average_price(), Some(27)); - assert_ok!(Example::submit_price(Origin::signed(test_pub()), 43)); + assert_ok!(Example::submit_price(RuntimeOrigin::signed(test_pub()), 43)); assert_eq!(Example::average_price(), Some(35)); }); } diff --git a/frame/examples/parallel/src/tests.rs b/frame/examples/parallel/src/tests.rs index 80c8213fdfb1c..fdef24a39ae36 100644 --- a/frame/examples/parallel/src/tests.rs +++ b/frame/examples/parallel/src/tests.rs @@ -45,7 +45,7 @@ parameter_types! { impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type PalletInfo = PalletInfo; type Index = u64; @@ -76,8 +76,8 @@ fn test_pub(n: u8) -> sp_core::sr25519::Public { sp_core::sr25519::Public::from_raw([n; 32]) } -fn test_origin(n: u8) -> Origin { - Origin::signed(test_pub(n)) +fn test_origin(n: u8) -> RuntimeOrigin { + RuntimeOrigin::signed(test_pub(n)) } #[test] @@ -103,7 +103,7 @@ fn it_can_enlist() { }, ]; - Example::enlist_participants(Origin::signed(test_pub(1)), participants) + Example::enlist_participants(RuntimeOrigin::signed(test_pub(1)), participants) .expect("Failed to enlist"); assert_eq!(Example::participants().len(), 2); diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 88cefb17c2895..4926384e42018 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -138,7 +138,7 @@ use sp_std::{marker::PhantomData, prelude::*}; pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; -pub type OriginOf = as Dispatchable>::Origin; +pub type OriginOf = as Dispatchable>::RuntimeOrigin; /// Main entry point for certain runtime actions as e.g. `execute_block`. /// @@ -789,7 +789,7 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type RuntimeCall = RuntimeCall; type BlockNumber = u64; diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 3df08372f499b..1767a8da4def0 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -77,7 +77,7 @@ benchmarks! { set_target { let origin = T::AdminOrigin::successful_origin(); - }: _(origin, Default::default()) + }: _(origin, Default::default()) verify {} thaw { diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index 157aa8284f2f1..28a0f5fd56e67 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -116,7 +116,7 @@ pub mod pallet { + MaxEncodedLen; /// Origin required for setting the target proportion to be under gilt. - type AdminOrigin: EnsureOrigin; + type AdminOrigin: EnsureOrigin; /// Unbalanced handler to account for funds created (in case of a higher total issuance over /// freezing period). diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index a9cca0ff22780..e1cdf6507ef58 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -49,7 +49,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/gilt/src/tests.rs b/frame/gilt/src/tests.rs index 486601b5b2f21..2ac369dd3b8b3 100644 --- a/frame/gilt/src/tests.rs +++ b/frame/gilt/src/tests.rs @@ -49,8 +49,8 @@ fn set_target_works() { new_test_ext().execute_with(|| { run_to_block(1); let e = DispatchError::BadOrigin; - assert_noop!(Gilt::set_target(Origin::signed(2), Perquintill::from_percent(50)), e); - assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(50))); + assert_noop!(Gilt::set_target(RuntimeOrigin::signed(2), Perquintill::from_percent(50)), e); + assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(50))); assert_eq!( ActiveTotal::::get(), @@ -68,13 +68,19 @@ fn set_target_works() { fn place_bid_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_noop!(Gilt::place_bid(Origin::signed(1), 1, 2), Error::::AmountTooSmall); assert_noop!( - Gilt::place_bid(Origin::signed(1), 101, 2), + Gilt::place_bid(RuntimeOrigin::signed(1), 1, 2), + Error::::AmountTooSmall + ); + assert_noop!( + Gilt::place_bid(RuntimeOrigin::signed(1), 101, 2), BalancesError::::InsufficientBalance ); - assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 4), Error::::DurationTooBig); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_noop!( + Gilt::place_bid(RuntimeOrigin::signed(1), 10, 4), + Error::::DurationTooBig + ); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 10); assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); @@ -85,16 +91,16 @@ fn place_bid_works() { fn place_bid_queuing_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 20, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 5, 2)); - assert_noop!(Gilt::place_bid(Origin::signed(1), 5, 2), Error::::BidTooLow); - assert_ok!(Gilt::place_bid(Origin::signed(1), 15, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 20, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 5, 2)); + assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(1), 5, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 15, 2)); assert_eq!(Balances::reserved_balance(1), 45); - assert_ok!(Gilt::place_bid(Origin::signed(1), 25, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 25, 2)); assert_eq!(Balances::reserved_balance(1), 60); - assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 2), Error::::BidTooLow); + assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2), Error::::BidTooLow); assert_eq!( Queues::::get(2), vec![ @@ -111,11 +117,11 @@ fn place_bid_queuing_works() { fn place_bid_fails_when_queue_full() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(3), 10, 2)); - assert_noop!(Gilt::place_bid(Origin::signed(4), 10, 2), Error::::BidTooLow); - assert_ok!(Gilt::place_bid(Origin::signed(4), 10, 3)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 10, 2)); + assert_noop!(Gilt::place_bid(RuntimeOrigin::signed(4), 10, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(4), 10, 3)); }); } @@ -123,11 +129,11 @@ fn place_bid_fails_when_queue_full() { fn multiple_place_bids_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 3)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 3)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); assert_eq!(Balances::reserved_balance(1), 40); assert_eq!(Balances::reserved_balance(2), 10); @@ -149,9 +155,9 @@ fn multiple_place_bids_works() { fn retract_single_item_queue_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 1)); assert_eq!(Balances::reserved_balance(1), 10); assert_eq!(Queues::::get(1), vec![]); @@ -164,12 +170,12 @@ fn retract_single_item_queue_works() { fn retract_with_other_and_duplicate_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 10, 2)); - assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 20); assert_eq!(Balances::reserved_balance(2), 10); assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); @@ -185,11 +191,11 @@ fn retract_with_other_and_duplicate_works() { fn retract_non_existent_item_fails() { new_test_ext().execute_with(|| { run_to_block(1); - assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 1), Error::::NotFound); - assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); - assert_noop!(Gilt::retract_bid(Origin::signed(1), 20, 1), Error::::NotFound); - assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 2), Error::::NotFound); - assert_noop!(Gilt::retract_bid(Origin::signed(2), 10, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 1), Error::::NotFound); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 10, 1)); + assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 20, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(1), 10, 2), Error::::NotFound); + assert_noop!(Gilt::retract_bid(RuntimeOrigin::signed(2), 10, 1), Error::::NotFound); }); } @@ -197,8 +203,8 @@ fn retract_non_existent_item_fails() { fn basic_enlarge_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); Gilt::enlarge(40, 2); // Takes 2/2, then stopped because it reaches its max amount @@ -228,10 +234,10 @@ fn basic_enlarge_works() { fn enlarge_respects_bids_limit() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(4), 40, 3)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 40, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(4), 40, 3)); Gilt::enlarge(100, 2); // Should have taken 4/3 and 2/2, then stopped because it's only allowed 2. @@ -269,7 +275,7 @@ fn enlarge_respects_bids_limit() { fn enlarge_respects_amount_limit_and_will_split() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 80, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 80, 1)); Gilt::enlarge(40, 2); // Takes 2/2, then stopped because it reaches its max amount @@ -296,14 +302,14 @@ fn enlarge_respects_amount_limit_and_will_split() { fn basic_thaw_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); Gilt::enlarge(40, 1); run_to_block(3); - assert_noop!(Gilt::thaw(Origin::signed(1), 0), Error::::NotExpired); + assert_noop!(Gilt::thaw(RuntimeOrigin::signed(1), 0), Error::::NotExpired); run_to_block(4); - assert_noop!(Gilt::thaw(Origin::signed(1), 1), Error::::Unknown); - assert_noop!(Gilt::thaw(Origin::signed(2), 0), Error::::NotOwner); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_noop!(Gilt::thaw(RuntimeOrigin::signed(1), 1), Error::::Unknown); + assert_noop!(Gilt::thaw(RuntimeOrigin::signed(2), 0), Error::::NotOwner); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); assert_eq!( ActiveTotal::::get(), @@ -324,7 +330,7 @@ fn basic_thaw_works() { fn thaw_when_issuance_higher_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Everybody else's balances goes up by 50% @@ -333,7 +339,7 @@ fn thaw_when_issuance_higher_works() { Balances::make_free_balance_be(&4, 150); run_to_block(4); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 150); assert_eq!(Balances::reserved_balance(1), 0); @@ -347,16 +353,16 @@ fn thaw_with_ignored_issuance_works() { // Give account zero some balance. Balances::make_free_balance_be(&0, 200); - assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Account zero transfers 50 into everyone else's accounts. - assert_ok!(Balances::transfer(Origin::signed(0), 2, 50)); - assert_ok!(Balances::transfer(Origin::signed(0), 3, 50)); - assert_ok!(Balances::transfer(Origin::signed(0), 4, 50)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 2, 50)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 3, 50)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(0), 4, 50)); run_to_block(4); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); // Account zero changes have been ignored. assert_eq!(Balances::free_balance(1), 150); @@ -368,7 +374,7 @@ fn thaw_with_ignored_issuance_works() { fn thaw_when_issuance_lower_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 100, 1)); Gilt::enlarge(100, 1); // Everybody else's balances goes down by 25% @@ -377,7 +383,7 @@ fn thaw_when_issuance_lower_works() { Balances::make_free_balance_be(&4, 75); run_to_block(4); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 75); assert_eq!(Balances::reserved_balance(1), 0); @@ -388,9 +394,9 @@ fn thaw_when_issuance_lower_works() { fn multiple_thaws_works() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 50, 1)); Gilt::enlarge(200, 3); // Double everyone's free balances. @@ -399,9 +405,9 @@ fn multiple_thaws_works() { Balances::make_free_balance_be(&4, 200); run_to_block(4); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); - assert_ok!(Gilt::thaw(Origin::signed(1), 1)); - assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 1)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(2), 2)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); @@ -412,9 +418,9 @@ fn multiple_thaws_works() { fn multiple_thaws_works_in_alternative_thaw_order() { new_test_ext().execute_with(|| { run_to_block(1); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 50, 1)); Gilt::enlarge(200, 3); // Double everyone's free balances. @@ -423,9 +429,9 @@ fn multiple_thaws_works_in_alternative_thaw_order() { Balances::make_free_balance_be(&4, 200); run_to_block(4); - assert_ok!(Gilt::thaw(Origin::signed(2), 2)); - assert_ok!(Gilt::thaw(Origin::signed(1), 1)); - assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(2), 2)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 1)); + assert_ok!(Gilt::thaw(RuntimeOrigin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(2), 200); @@ -436,12 +442,12 @@ fn multiple_thaws_works_in_alternative_thaw_order() { fn enlargement_to_target_works() { new_test_ext().execute_with(|| { run_to_block(2); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); - assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); - assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 3)); - assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 3)); - assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(40))); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(1), 40, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(2), 40, 3)); + assert_ok!(Gilt::place_bid(RuntimeOrigin::signed(3), 40, 3)); + assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(40))); run_to_block(3); assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 },]); @@ -540,7 +546,7 @@ fn enlargement_to_target_works() { ); // Set target a bit higher to use up the remaining bid. - assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(60))); + assert_ok!(Gilt::set_target(RuntimeOrigin::signed(1), Perquintill::from_percent(60))); run_to_block(10); // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 19caf0766a3e0..a26000c0f7f2c 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -79,7 +79,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index baf71dfff71dc..775eda58c03e0 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -359,7 +359,7 @@ fn report_equivocation_current_set_works() { // report the equivocation and the tx should be dispatched successfully assert_ok!(Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ),); @@ -437,7 +437,7 @@ fn report_equivocation_old_set_works() { // report the equivocation using the key ownership proof generated on // the old set, the tx should be dispatched successfully assert_ok!(Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ),); @@ -500,7 +500,7 @@ fn report_equivocation_invalid_set_id() { // the call for reporting the equivocation should error assert_err!( Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -541,7 +541,7 @@ fn report_equivocation_invalid_session() { // proof from the previous set, the session should be invalid. assert_err!( Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ), @@ -586,7 +586,7 @@ fn report_equivocation_invalid_key_owner_proof() { // proof for a different key than the one in the equivocation proof. assert_err!( Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), invalid_key_owner_proof, ), @@ -617,7 +617,7 @@ fn report_equivocation_invalid_equivocation_proof() { let assert_invalid_equivocation_proof = |equivocation_proof| { assert_err!( Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof.clone(), ), @@ -723,7 +723,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { // we submit the report Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) @@ -861,7 +861,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation. let post_info = Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof.clone()), key_owner_proof.clone(), ) @@ -875,7 +875,7 @@ fn valid_equivocation_reports_dont_pay_fees() { // report the equivocation again which is invalid now since it is // duplicate. let post_info = Grandpa::report_equivocation_unsigned( - Origin::none(), + RuntimeOrigin::none(), Box::new(equivocation_proof), key_owner_proof, ) diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 7ab5d416eb533..c66658b92b0d2 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -123,7 +123,7 @@ benchmarks! { ensure!(Registrars::::get().len() as u32 == r, "Registrars not set up correctly."); let origin = T::RegistrarOrigin::successful_origin(); let account = T::Lookup::unlookup(account("registrar", r + 1, SEED)); - }: _(origin, account) + }: _(origin, account) verify { ensure!(Registrars::::get().len() as u32 == r + 1, "Registrars not added."); } @@ -135,7 +135,7 @@ benchmarks! { // The target user let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin: ::RuntimeOrigin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Add an initial identity @@ -193,7 +193,7 @@ benchmarks! { clear_identity { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -208,7 +208,7 @@ benchmarks! { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); Identity::::set_identity(caller_origin.clone(), Box::new(info.clone()))?; // User requests judgement from all the registrars, and they approve @@ -237,7 +237,7 @@ benchmarks! { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; }; }: _(RawOrigin::Signed(caller.clone()), r - 1, 10u32.into()) @@ -247,7 +247,7 @@ benchmarks! { cancel_request { let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; @@ -255,7 +255,7 @@ benchmarks! { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + let caller_origin = ::RuntimeOrigin::from(RawOrigin::Signed(caller)); Identity::::set_identity(caller_origin, Box::new(info))?; }; @@ -323,7 +323,7 @@ benchmarks! { provide_judgement { // The user let user: T::AccountId = account("user", r, SEED); - let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_origin = ::RuntimeOrigin::from(RawOrigin::Signed(user.clone())); let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); @@ -352,7 +352,7 @@ benchmarks! { let x in 1 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); - let target_origin: ::Origin = RawOrigin::Signed(target.clone()).into(); + let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); let target_lookup = T::Lookup::unlookup(target.clone()); let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); @@ -373,7 +373,7 @@ benchmarks! { } ensure!(IdentityOf::::contains_key(&target), "Identity not set"); let origin = T::ForceOrigin::successful_origin(); - }: _(origin, target_lookup) + }: _(origin, target_lookup) verify { ensure!(!IdentityOf::::contains_key(&target), "Identity not removed"); } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index c4f35a06e830a..8b22ecedaec19 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -142,10 +142,10 @@ pub mod pallet { type Slashed: OnUnbalanced>; /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The origin which may add or remove registrars. Root can always do this. - type RegistrarOrigin: EnsureOrigin; + type RegistrarOrigin: EnsureOrigin; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 0fb5d4587af16..747d1e9f27106 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -56,7 +56,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -148,41 +148,44 @@ fn editing_subaccounts_should_work() { new_test_ext().execute_with(|| { let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); - assert_noop!(Identity::add_sub(Origin::signed(10), 20, data(1)), Error::::NoIdentity); + assert_noop!( + Identity::add_sub(RuntimeOrigin::signed(10), 20, data(1)), + Error::::NoIdentity + ); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); // first sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); + assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); assert_eq!(Balances::free_balance(10), 80); // second sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 2, data(2))); + assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 2, data(2))); assert_eq!(SuperOf::::get(1), Some((10, data(1)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 70); // third sub account is too many assert_noop!( - Identity::add_sub(Origin::signed(10), 3, data(3)), + Identity::add_sub(RuntimeOrigin::signed(10), 3, data(3)), Error::::TooManySubAccounts ); // rename first sub account - assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); + assert_ok!(Identity::rename_sub(RuntimeOrigin::signed(10), 1, data(11))); assert_eq!(SuperOf::::get(1), Some((10, data(11)))); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 70); // remove first sub account - assert_ok!(Identity::remove_sub(Origin::signed(10), 1)); + assert_ok!(Identity::remove_sub(RuntimeOrigin::signed(10), 1)); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(Balances::free_balance(10), 80); // add third sub account - assert_ok!(Identity::add_sub(Origin::signed(10), 3, data(3))); + assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 3, data(3))); assert_eq!(SuperOf::::get(1), None); assert_eq!(SuperOf::::get(2), Some((10, data(2)))); assert_eq!(SuperOf::::get(3), Some((10, data(3)))); @@ -195,27 +198,27 @@ fn resolving_subaccount_ownership_works() { new_test_ext().execute_with(|| { let data = |x| Data::Raw(vec![x; 1].try_into().unwrap()); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); - assert_ok!(Identity::set_identity(Origin::signed(20), Box::new(twenty()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(20), Box::new(twenty()))); // 10 claims 1 as a subaccount - assert_ok!(Identity::add_sub(Origin::signed(10), 1, data(1))); + assert_ok!(Identity::add_sub(RuntimeOrigin::signed(10), 1, data(1))); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 20); // 20 cannot claim 1 now assert_noop!( - Identity::add_sub(Origin::signed(20), 1, data(1)), + Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1)), Error::::AlreadyClaimed ); // 1 wants to be with 20 so it quits from 10 - assert_ok!(Identity::quit_sub(Origin::signed(1))); + assert_ok!(Identity::quit_sub(RuntimeOrigin::signed(1))); // 1 gets the 10 that 10 paid. assert_eq!(Balances::free_balance(1), 20); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 10); // 20 can claim 1 now - assert_ok!(Identity::add_sub(Origin::signed(20), 1, data(1))); + assert_ok!(Identity::add_sub(RuntimeOrigin::signed(20), 1, data(1))); }); } @@ -232,10 +235,10 @@ fn trailing_zeros_decodes_into_default_data() { #[test] fn adding_registrar_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); - assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); + assert_ok!(Identity::set_fields(RuntimeOrigin::signed(3), 0, fields)); assert_eq!( Identity::registrars(), vec![Some(RegistrarInfo { account: 3, fee: 10, fields })] @@ -247,11 +250,11 @@ fn adding_registrar_should_work() { fn amount_of_registrars_is_limited() { new_test_ext().execute_with(|| { for i in 1..MaxRegistrars::get() + 1 { - assert_ok!(Identity::add_registrar(Origin::signed(1), i as u64)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), i as u64)); } let last_registrar = MaxRegistrars::get() as u64 + 1; assert_noop!( - Identity::add_registrar(Origin::signed(1), last_registrar), + Identity::add_registrar(RuntimeOrigin::signed(1), last_registrar), Error::::TooManyRegistrars ); }); @@ -260,18 +263,18 @@ fn amount_of_registrars_is_limited() { #[test] fn registration_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); let mut three_fields = ten(); three_fields.additional.try_push(Default::default()).unwrap(); three_fields.additional.try_push(Default::default()).unwrap(); assert_eq!(three_fields.additional.try_push(Default::default()), Err(())); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_eq!(Identity::identity(10).unwrap().info, ten()); assert_eq!(Balances::free_balance(10), 90); - assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); assert_eq!(Balances::free_balance(10), 100); - assert_noop!(Identity::clear_identity(Origin::signed(10)), Error::::NotNamed); + assert_noop!(Identity::clear_identity(RuntimeOrigin::signed(10)), Error::::NotNamed); }); } @@ -280,7 +283,7 @@ fn uninvited_judgement_should_work() { new_test_ext().execute_with(|| { assert_noop!( Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, @@ -289,10 +292,10 @@ fn uninvited_judgement_should_work() { Error::::InvalidIndex ); - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); assert_noop!( Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, @@ -301,10 +304,10 @@ fn uninvited_judgement_should_work() { Error::::InvalidTarget ); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_noop!( Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, @@ -317,7 +320,7 @@ fn uninvited_judgement_should_work() { assert_noop!( Identity::provide_judgement( - Origin::signed(10), + RuntimeOrigin::signed(10), 0, 10, Judgement::Reasonable, @@ -327,7 +330,7 @@ fn uninvited_judgement_should_work() { ); assert_noop!( Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::FeePaid(1), @@ -337,7 +340,7 @@ fn uninvited_judgement_should_work() { ); assert_ok!(Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, @@ -350,16 +353,16 @@ fn uninvited_judgement_should_work() { #[test] fn clearing_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_ok!(Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, BlakeTwo256::hash_of(&ten()) )); - assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); assert_eq!(Identity::identity(10), None); }); } @@ -367,12 +370,15 @@ fn clearing_judgement_should_work() { #[test] fn killing_slashing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); - assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); - assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_noop!(Identity::kill_identity(RuntimeOrigin::signed(1), 10), BadOrigin); + assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); assert_eq!(Identity::identity(10), None); assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::kill_identity(Origin::signed(2), 10), Error::::NotNamed); + assert_noop!( + Identity::kill_identity(RuntimeOrigin::signed(2), 10), + Error::::NotNamed + ); }); } @@ -380,17 +386,20 @@ fn killing_slashing_should_work() { fn setting_subaccounts_should_work() { new_test_ext().execute_with(|| { let mut subs = vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))]; - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); + assert_noop!( + Identity::set_subs(RuntimeOrigin::signed(10), subs.clone()), + Error::::NotFound + ); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 80); assert_eq!(Identity::subs_of(10), (10, vec![20].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); // push another item and re-set it. subs.push((30, Data::Raw(vec![50; 1].try_into().unwrap()))); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); assert_eq!(Identity::subs_of(10), (20, vec![20, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1].try_into().unwrap())))); @@ -398,7 +407,7 @@ fn setting_subaccounts_should_work() { // switch out one of the items and re-set. subs[0] = (40, Data::Raw(vec![60; 1].try_into().unwrap())); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); // no change in the balance assert_eq!(Identity::subs_of(10), (20, vec![40, 30].try_into().unwrap())); assert_eq!(Identity::super_of(20), None); @@ -406,7 +415,7 @@ fn setting_subaccounts_should_work() { assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1].try_into().unwrap())))); // clear - assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); + assert_ok!(Identity::set_subs(RuntimeOrigin::signed(10), vec![])); assert_eq!(Balances::free_balance(10), 90); assert_eq!(Identity::subs_of(10), (0, BoundedVec::default())); assert_eq!(Identity::super_of(30), None); @@ -414,7 +423,7 @@ fn setting_subaccounts_should_work() { subs.push((20, Data::Raw(vec![40; 1].try_into().unwrap()))); assert_noop!( - Identity::set_subs(Origin::signed(10), subs.clone()), + Identity::set_subs(RuntimeOrigin::signed(10), subs.clone()), Error::::TooManySubAccounts ); }); @@ -423,12 +432,12 @@ fn setting_subaccounts_should_work() { #[test] fn clearing_account_should_remove_subaccounts_and_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_ok!(Identity::set_subs( - Origin::signed(10), + RuntimeOrigin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); - assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_ok!(Identity::clear_identity(RuntimeOrigin::signed(10))); assert_eq!(Balances::free_balance(10), 100); assert!(Identity::super_of(20).is_none()); }); @@ -437,12 +446,12 @@ fn clearing_account_should_remove_subaccounts_and_refund() { #[test] fn killing_account_should_remove_subaccounts_and_not_refund() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_ok!(Identity::set_subs( - Origin::signed(10), + RuntimeOrigin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] )); - assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); + assert_ok!(Identity::kill_identity(RuntimeOrigin::signed(2), 10)); assert_eq!(Balances::free_balance(10), 80); assert!(Identity::super_of(20).is_none()); }); @@ -451,24 +460,30 @@ fn killing_account_should_remove_subaccounts_and_not_refund() { #[test] fn cancelling_requested_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_noop!( + Identity::cancel_request(RuntimeOrigin::signed(10), 0), + Error::::NoIdentity + ); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); + assert_ok!(Identity::cancel_request(RuntimeOrigin::signed(10), 0)); assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); + assert_noop!( + Identity::cancel_request(RuntimeOrigin::signed(10), 0), + Error::::NotFound + ); assert_ok!(Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Reasonable, BlakeTwo256::hash_of(&ten()) )); assert_noop!( - Identity::cancel_request(Origin::signed(10), 0), + Identity::cancel_request(RuntimeOrigin::signed(10), 0), Error::::JudgementGiven ); }); @@ -477,24 +492,24 @@ fn cancelling_requested_judgement_should_work() { #[test] fn requesting_judgement_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_noop!( - Identity::request_judgement(Origin::signed(10), 0, 9), + Identity::request_judgement(RuntimeOrigin::signed(10), 0, 9), Error::::FeeChanged ); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); // 10 for the judgement request, 10 for the identity. assert_eq!(Balances::free_balance(10), 80); // Re-requesting won't work as we already paid. assert_noop!( - Identity::request_judgement(Origin::signed(10), 0, 10), + Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10), Error::::StickyJudgement ); assert_ok!(Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::Erroneous, @@ -505,33 +520,33 @@ fn requesting_judgement_should_work() { // Re-requesting still won't work as it's erroneous. assert_noop!( - Identity::request_judgement(Origin::signed(10), 0, 10), + Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10), Error::::StickyJudgement ); // Requesting from a second registrar still works. - assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); - assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 4)); + assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 1, 10)); // Re-requesting after the judgement has been reduced works. assert_ok!(Identity::provide_judgement( - Origin::signed(3), + RuntimeOrigin::signed(3), 0, 10, Judgement::OutOfDate, BlakeTwo256::hash_of(&ten()) )); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); }); } #[test] fn field_deposit_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); assert_ok!(Identity::set_identity( - Origin::signed(10), + RuntimeOrigin::signed(10), Box::new(IdentityInfo { additional: vec![ ( @@ -555,23 +570,23 @@ fn field_deposit_should_work() { #[test] fn setting_account_id_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); // account 4 cannot change the first registrar's identity since it's owned by 3. assert_noop!( - Identity::set_account_id(Origin::signed(4), 0, 3), + Identity::set_account_id(RuntimeOrigin::signed(4), 0, 3), Error::::InvalidIndex ); // account 3 can, because that's the registrar's current account. - assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); + assert_ok!(Identity::set_account_id(RuntimeOrigin::signed(3), 0, 4)); // account 4 can now, because that's their new ID. - assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); + assert_ok!(Identity::set_account_id(RuntimeOrigin::signed(4), 0, 3)); }); } #[test] fn test_has_identity() { new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), Box::new(ten()))); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert!(Identity::has_identity(&10, IdentityField::Display as u64)); assert!(Identity::has_identity(&10, IdentityField::Legal as u64)); assert!(Identity::has_identity( diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index a193843a3d661..5782e1a615b8e 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -111,7 +111,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { result.execute_with(|| { for i in 1..=6 { System::inc_providers(&i); - assert_eq!(Session::set_keys(Origin::signed(i), (i - 1).into(), vec![]), Ok(())); + assert_eq!(Session::set_keys(RuntimeOrigin::signed(i), (i - 1).into(), vec![]), Ok(())); } }); result @@ -127,7 +127,7 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index e6154a634f4a4..366119278d836 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -141,7 +141,7 @@ fn heartbeat( "invalid validators len", e @ _ => <&'static str>::from(e), })?; - ImOnline::heartbeat(Origin::none(), heartbeat, signature) + ImOnline::heartbeat(RuntimeOrigin::none(), heartbeat, signature) } #[test] diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 255080f345eee..fd2e9fff16885 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -52,7 +52,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index 4e6c59703ca36..bed6cfffaa825 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -104,7 +104,7 @@ fn transfer_index_on_accounts_should_work() { fn force_transfer_index_on_preowned_should_work() { new_test_ext().execute_with(|| { assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); + assert_ok!(Indices::force_transfer(RuntimeOrigin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); @@ -114,7 +114,7 @@ fn force_transfer_index_on_preowned_should_work() { #[test] fn force_transfer_index_on_free_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Indices::force_transfer(Origin::root(), Id(3), 0, false)); + assert_ok!(Indices::force_transfer(RuntimeOrigin::root(), Id(3), 0, false)); assert_eq!(Balances::reserved_balance(3), 0); assert_eq!(Indices::lookup_index(0), Some(3)); }); diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index 1c850e66f9c6e..fba722a07fabd 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -78,7 +78,7 @@ benchmarks! { let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; let origin = T::ManagerOrigin::successful_origin(); assert!(CallIndices::::get().is_empty()); - }: _(origin, calls) + }: _(origin, calls) verify { if !n.is_zero() { assert!(!CallIndices::::get().is_empty()); @@ -90,7 +90,7 @@ benchmarks! { let end = 10u32.into(); let payout = 5u32.into(); let origin = T::ManagerOrigin::successful_origin(); - }: _(origin, price, end, payout, true) + }: _(origin, price, end, payout, true) verify { assert!(crate::Lottery::::get().is_some()); } @@ -99,7 +99,7 @@ benchmarks! { setup_lottery::(true)?; assert_eq!(crate::Lottery::::get().unwrap().repeat, true); let origin = T::ManagerOrigin::successful_origin(); - }: _(origin) + }: _(origin) verify { assert_eq!(crate::Lottery::::get().unwrap().repeat, false); } diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 505dd1bb98caa..c501a30ef5f4a 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -135,7 +135,7 @@ pub mod pallet { /// A dispatchable call. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From>; @@ -149,7 +149,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The manager origin. - type ManagerOrigin: EnsureOrigin; + type ManagerOrigin: EnsureOrigin; /// The max number of calls available in a single lottery. #[pallet::constant] diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index b521b83e7ce2b..1977da5959d39 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -57,7 +57,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type RuntimeCall = RuntimeCall; type BlockNumber = u64; diff --git a/frame/lottery/src/tests.rs b/frame/lottery/src/tests.rs index ff3111b52db77..0eaf080564008 100644 --- a/frame/lottery/src/tests.rs +++ b/frame/lottery/src/tests.rs @@ -20,8 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, Lottery, Origin, RuntimeCall, SystemCall, - Test, + new_test_ext, run_to_block, Balances, BalancesCall, Lottery, RuntimeCall, RuntimeOrigin, + SystemCall, Test, }; use pallet_balances::Error as BalancesError; use sp_runtime::traits::BadOrigin; @@ -49,15 +49,15 @@ fn basic_end_to_end_works() { ]; // Set calls for the lottery - assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); // Start lottery, it repeats - assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, true)); assert!(crate::Lottery::::get().is_some()); assert_eq!(Balances::free_balance(&1), 100); let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); // 20 from the transfer, 10 from buying a ticket assert_eq!(Balances::free_balance(&1), 100 - 20 - 10); assert_eq!(Participants::::get(&1).1.len(), 1); @@ -66,14 +66,14 @@ fn basic_end_to_end_works() { assert_eq!(Tickets::::get(0), Some(1)); // More ticket purchases - assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); - assert_ok!(Lottery::buy_ticket(Origin::signed(3), call.clone())); - assert_ok!(Lottery::buy_ticket(Origin::signed(4), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(3), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(4), call.clone())); assert_eq!(TicketsCount::::get(), 4); // Go to end run_to_block(20); - assert_ok!(Lottery::buy_ticket(Origin::signed(5), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(5), call.clone())); // Ticket isn't bought assert_eq!(TicketsCount::::get(), 4); @@ -100,15 +100,15 @@ fn stop_repeat_works() { let delay = 5; // Set no calls for the lottery. - assert_ok!(Lottery::set_calls(Origin::root(), vec![])); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); // Start lottery, it repeats. - assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, true)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, true)); // Non-manager fails to `stop_repeat`. - assert_noop!(Lottery::stop_repeat(Origin::signed(1)), DispatchError::BadOrigin); + assert_noop!(Lottery::stop_repeat(RuntimeOrigin::signed(1)), DispatchError::BadOrigin); // Manager can `stop_repeat`, even twice. - assert_ok!(Lottery::stop_repeat(Origin::root())); - assert_ok!(Lottery::stop_repeat(Origin::root())); + assert_ok!(Lottery::stop_repeat(RuntimeOrigin::root())); + assert_ok!(Lottery::stop_repeat(RuntimeOrigin::root())); // Lottery still exists. assert!(crate::Lottery::::get().is_some()); @@ -132,7 +132,7 @@ fn set_calls_works() { RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); assert!(CallIndices::::exists()); let too_many_calls = vec![ @@ -142,12 +142,12 @@ fn set_calls_works() { ]; assert_noop!( - Lottery::set_calls(Origin::root(), too_many_calls), + Lottery::set_calls(RuntimeOrigin::root(), too_many_calls), Error::::TooManyCalls, ); // Clear calls - assert_ok!(Lottery::set_calls(Origin::root(), vec![])); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); assert!(CallIndices::::get().is_empty()); }); } @@ -182,16 +182,16 @@ fn start_lottery_works() { // Setup ignores bad origin assert_noop!( - Lottery::start_lottery(Origin::signed(1), price, length, delay, false), + Lottery::start_lottery(RuntimeOrigin::signed(1), price, length, delay, false), BadOrigin, ); // All good - assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false)); // Can't open another one if lottery is already present assert_noop!( - Lottery::start_lottery(Origin::root(), price, length, delay, false), + Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false), Error::::InProgress, ); }); @@ -205,7 +205,7 @@ fn buy_ticket_works_as_simple_passthrough() { // No lottery set up let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 20 })); // This is just a basic transfer then - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20); assert_eq!(TicketsCount::::get(), 0); @@ -214,11 +214,11 @@ fn buy_ticket_works_as_simple_passthrough() { RuntimeCall::Balances(BalancesCall::force_transfer { source: 0, dest: 0, value: 0 }), RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); // Ticket price of 60 would kill the user's account - assert_ok!(Lottery::start_lottery(Origin::root(), 60, 10, 5, false)); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 60, 10, 5, false)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(Balances::free_balance(&1), 100 - 20 - 20); assert_eq!(TicketsCount::::get(), 0); @@ -226,7 +226,7 @@ fn buy_ticket_works_as_simple_passthrough() { let fail_call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1000 })); assert_noop!( - Lottery::buy_ticket(Origin::signed(1), fail_call), + Lottery::buy_ticket(RuntimeOrigin::signed(1), fail_call), BalancesError::::InsufficientBalance, ); @@ -235,17 +235,17 @@ fn buy_ticket_works_as_simple_passthrough() { dest: 0, value: 0, })); - assert_noop!(Lottery::buy_ticket(Origin::signed(1), bad_origin_call), BadOrigin,); + assert_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), bad_origin_call), BadOrigin,); // User can call other txs, but doesn't get a ticket let remark_call = Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); - assert_ok!(Lottery::buy_ticket(Origin::signed(2), remark_call)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), remark_call)); assert_eq!(TicketsCount::::get(), 0); let successful_call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); - assert_ok!(Lottery::buy_ticket(Origin::signed(2), successful_call)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), successful_call)); assert_eq!(TicketsCount::::get(), 1); }); } @@ -258,40 +258,40 @@ fn buy_ticket_works() { RuntimeCall::System(SystemCall::remark { remark: vec![] }), RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 }), ]; - assert_ok!(Lottery::set_calls(Origin::root(), calls)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls)); // Can't buy ticket before start let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 2, value: 1 })); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 0); // Start lottery - assert_ok!(Lottery::start_lottery(Origin::root(), 1, 20, 5, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 20, 5, false)); // Go to start, buy ticket for transfer run_to_block(5); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); // Can't buy another of the same ticket (even if call is slightly changed) let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 3, value: 30 })); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); assert_eq!(TicketsCount::::get(), 1); // Buy ticket for remark let call = Box::new(RuntimeCall::System(SystemCall::remark { remark: b"hello, world!".to_vec() })); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); assert_eq!(TicketsCount::::get(), 2); // Go to end, can't buy tickets anymore run_to_block(20); - assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 2); // Go to payout, can't buy tickets when there is no lottery open run_to_block(25); - assert_ok!(Lottery::buy_ticket(Origin::signed(2), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(2), call.clone())); assert_eq!(TicketsCount::::get(), 0); assert_eq!(LotteryIndex::::get(), 1); }); @@ -303,8 +303,8 @@ fn buy_ticket_works() { fn do_buy_ticket_already_participating() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); // Buying once works. assert_ok!(Lottery::do_buy_ticket(&1, &calls[0])); @@ -318,15 +318,15 @@ fn do_buy_ticket_already_participating() { fn buy_ticket_already_participating() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(Origin::root(), 1, 10, 10, false)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 1, 10, 10, false)); // Buying once works. let call = Box::new(calls[0].clone()); - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call.clone())); // Buying the same ticket again returns Ok, but changes nothing. - assert_storage_noop!(Lottery::buy_ticket(Origin::signed(1), call).unwrap()); + assert_storage_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call).unwrap()); // Exactly one ticket exists. assert_eq!(TicketsCount::::get(), 1); @@ -338,13 +338,13 @@ fn buy_ticket_already_participating() { fn buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 100. - assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); let call = Box::new(calls[0].clone()); // Buying a ticket returns Ok, but changes nothing. - assert_storage_noop!(Lottery::buy_ticket(Origin::signed(1), call).unwrap()); + assert_storage_noop!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call).unwrap()); assert!(TicketsCount::::get().is_zero()); }); } @@ -353,9 +353,9 @@ fn buy_ticket_insufficient_balance() { fn do_buy_ticket_insufficient_balance() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 101. - assert_ok!(Lottery::start_lottery(Origin::root(), 101, 10, 10, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 101, 10, 10, false)); // Buying fails with InsufficientBalance. assert_noop!( @@ -370,9 +370,9 @@ fn do_buy_ticket_insufficient_balance() { fn do_buy_ticket_keep_alive() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); // Price set to 100. - assert_ok!(Lottery::start_lottery(Origin::root(), 100, 10, 10, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 100, 10, 10, false)); // Buying fails with KeepAlive. assert_noop!(Lottery::do_buy_ticket(&1, &calls[0]), BalancesError::::KeepAlive); @@ -388,9 +388,9 @@ fn no_participants_works() { let delay = 5; // Set no calls for the lottery. - assert_ok!(Lottery::set_calls(Origin::root(), vec![])); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), vec![])); // Start lottery. - assert_ok!(Lottery::start_lottery(Origin::root(), 10, length, delay, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, length, delay, false)); // End the lottery, no one wins. run_to_block(length + delay); @@ -405,7 +405,7 @@ fn start_lottery_will_create_account() { let delay = 5; assert_eq!(Balances::total_balance(&Lottery::account_id()), 0); - assert_ok!(Lottery::start_lottery(Origin::root(), price, length, delay, false)); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), price, length, delay, false)); assert_eq!(Balances::total_balance(&Lottery::account_id()), 1); }); } @@ -422,12 +422,12 @@ fn choose_ticket_trivial_cases() { fn choose_account_one_participant() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::Balances(BalancesCall::transfer { dest: 0, value: 0 })]; - assert_ok!(Lottery::set_calls(Origin::root(), calls.clone())); - assert_ok!(Lottery::start_lottery(Origin::root(), 10, 10, 10, false)); + assert_ok!(Lottery::set_calls(RuntimeOrigin::root(), calls.clone())); + assert_ok!(Lottery::start_lottery(RuntimeOrigin::root(), 10, 10, 10, false)); let call = Box::new(calls[0].clone()); // Buy one ticket with account 1. - assert_ok!(Lottery::buy_ticket(Origin::signed(1), call)); + assert_ok!(Lottery::buy_ticket(RuntimeOrigin::signed(1), call)); // Account 1 is always the winner. assert_eq!(Lottery::choose_account().unwrap(), 1); }); diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index a3f79ab2052e9..af2f2ddcf42f0 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -59,19 +59,19 @@ pub mod pallet { + IsType<::RuntimeEvent>; /// Required origin for adding a member (though can always be Root). - type AddOrigin: EnsureOrigin; + type AddOrigin: EnsureOrigin; /// Required origin for removing a member (though can always be Root). - type RemoveOrigin: EnsureOrigin; + type RemoveOrigin: EnsureOrigin; /// Required origin for adding and removing a member in a single action. - type SwapOrigin: EnsureOrigin; + type SwapOrigin: EnsureOrigin; /// Required origin for resetting membership. - type ResetOrigin: EnsureOrigin; + type ResetOrigin: EnsureOrigin; /// Required origin for setting or resetting the prime member. - type PrimeOrigin: EnsureOrigin; + type PrimeOrigin: EnsureOrigin; /// The receiver of the signal for when the membership has been initialized. This happens /// pre-genesis and will usually be the same as `MembershipChanged`. If you need to do @@ -543,7 +543,7 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -647,13 +647,16 @@ mod tests { #[test] fn prime_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::set_prime(Origin::signed(4), 20), BadOrigin); - assert_noop!(Membership::set_prime(Origin::signed(5), 15), Error::::NotMember); - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_noop!(Membership::set_prime(RuntimeOrigin::signed(4), 20), BadOrigin); + assert_noop!( + Membership::set_prime(RuntimeOrigin::signed(5), 15), + Error::::NotMember + ); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::clear_prime(Origin::signed(5))); + assert_ok!(Membership::clear_prime(RuntimeOrigin::signed(5))); assert_eq!(Membership::prime(), None); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); @@ -662,12 +665,12 @@ mod tests { #[test] fn add_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); + assert_noop!(Membership::add_member(RuntimeOrigin::signed(5), 15), BadOrigin); assert_noop!( - Membership::add_member(Origin::signed(1), 10), + Membership::add_member(RuntimeOrigin::signed(1), 10), Error::::AlreadyMember ); - assert_ok!(Membership::add_member(Origin::signed(1), 15)); + assert_ok!(Membership::add_member(RuntimeOrigin::signed(1), 15)); assert_eq!(Membership::members(), vec![10, 15, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -676,13 +679,13 @@ mod tests { #[test] fn remove_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); + assert_noop!(Membership::remove_member(RuntimeOrigin::signed(5), 20), BadOrigin); assert_noop!( - Membership::remove_member(Origin::signed(2), 15), + Membership::remove_member(RuntimeOrigin::signed(2), 15), Error::::NotMember ); - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_ok!(Membership::remove_member(Origin::signed(2), 20)); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); + assert_ok!(Membership::remove_member(RuntimeOrigin::signed(2), 20)); assert_eq!(Membership::members(), vec![10, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); @@ -693,24 +696,24 @@ mod tests { #[test] fn swap_member_works() { new_test_ext().execute_with(|| { - assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); + assert_noop!(Membership::swap_member(RuntimeOrigin::signed(5), 10, 25), BadOrigin); assert_noop!( - Membership::swap_member(Origin::signed(3), 15, 25), + Membership::swap_member(RuntimeOrigin::signed(3), 15, 25), Error::::NotMember ); assert_noop!( - Membership::swap_member(Origin::signed(3), 10, 30), + Membership::swap_member(RuntimeOrigin::signed(3), 10, 30), Error::::AlreadyMember ); - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); + assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 20, 20)); assert_eq!(Membership::members(), vec![10, 20, 30]); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::set_prime(Origin::signed(5), 10)); - assert_ok!(Membership::swap_member(Origin::signed(3), 10, 25)); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); + assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 25)); assert_eq!(Membership::members(), vec![20, 25, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); @@ -721,7 +724,7 @@ mod tests { #[test] fn swap_member_works_that_does_not_change_order() { new_test_ext().execute_with(|| { - assert_ok!(Membership::swap_member(Origin::signed(3), 10, 5)); + assert_ok!(Membership::swap_member(RuntimeOrigin::signed(3), 10, 5)); assert_eq!(Membership::members(), vec![5, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -730,16 +733,16 @@ mod tests { #[test] fn change_key_works() { new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(Origin::signed(5), 10)); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 10)); assert_noop!( - Membership::change_key(Origin::signed(3), 25), + Membership::change_key(RuntimeOrigin::signed(3), 25), Error::::NotMember ); assert_noop!( - Membership::change_key(Origin::signed(10), 20), + Membership::change_key(RuntimeOrigin::signed(10), 20), Error::::AlreadyMember ); - assert_ok!(Membership::change_key(Origin::signed(10), 40)); + assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 40)); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), Some(40)); @@ -750,7 +753,7 @@ mod tests { #[test] fn change_key_works_that_does_not_change_order() { new_test_ext().execute_with(|| { - assert_ok!(Membership::change_key(Origin::signed(10), 5)); + assert_ok!(Membership::change_key(RuntimeOrigin::signed(10), 5)); assert_eq!(Membership::members(), vec![5, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); }); @@ -759,19 +762,19 @@ mod tests { #[test] fn reset_members_works() { new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_ok!(Membership::set_prime(RuntimeOrigin::signed(5), 20)); assert_noop!( - Membership::reset_members(Origin::signed(1), bounded_vec![20, 40, 30]), + Membership::reset_members(RuntimeOrigin::signed(1), bounded_vec![20, 40, 30]), BadOrigin ); - assert_ok!(Membership::reset_members(Origin::signed(4), vec![20, 40, 30])); + assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![20, 40, 30])); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), Some(20)); assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - assert_ok!(Membership::reset_members(Origin::signed(4), vec![10, 40, 30])); + assert_ok!(Membership::reset_members(RuntimeOrigin::signed(4), vec![10, 40, 30])); assert_eq!(Membership::members(), vec![10, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members().to_vec()); assert_eq!(Membership::prime(), None); diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index abaa7b106d539..16f0922633088 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -47,7 +47,7 @@ frame_support::construct_runtime!( impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 0c7e931e5ea2e..3bdb47ffc4568 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -122,7 +122,7 @@ pub mod pallet { /// The overarching call type. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From>; diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 7eb9a9c3aae32..b24a06f454368 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -57,7 +57,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -138,15 +138,15 @@ fn call_transfer(dest: u64, value: u64) -> RuntimeCall { fn multisig_deposit_is_taken_and_returned() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -158,7 +158,7 @@ fn multisig_deposit_is_taken_and_returned() { assert_eq!(Balances::reserved_balance(1), 3); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -175,16 +175,16 @@ fn multisig_deposit_is_taken_and_returned() { fn multisig_deposit_is_taken_and_returned_with_call_storage() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -196,7 +196,7 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { assert_eq!(Balances::reserved_balance(1), 5); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -212,9 +212,9 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; @@ -222,7 +222,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -233,7 +233,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -247,7 +247,7 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Multisig::approve_as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 3, vec![1, 2], Some(now()), @@ -267,7 +267,7 @@ fn cancel_multisig_returns_deposit() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -275,7 +275,7 @@ fn cancel_multisig_returns_deposit() { Weight::zero() )); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -284,7 +284,7 @@ fn cancel_multisig_returns_deposit() { )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); + assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -294,16 +294,16 @@ fn cancel_multisig_returns_deposit() { fn timepoint_checking_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -314,7 +314,7 @@ fn timepoint_checking_works() { ); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -324,7 +324,7 @@ fn timepoint_checking_works() { assert_noop!( Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], None, @@ -337,7 +337,7 @@ fn timepoint_checking_works() { let later = Timepoint { index: 1, ..now() }; assert_noop!( Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(later), @@ -354,16 +354,16 @@ fn timepoint_checking_works() { fn multisig_2_of_3_works_with_call_storing() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -374,7 +374,7 @@ fn multisig_2_of_3_works_with_call_storing() { assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -389,16 +389,16 @@ fn multisig_2_of_3_works_with_call_storing() { fn multisig_2_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -408,7 +408,7 @@ fn multisig_2_of_3_works() { assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -424,16 +424,16 @@ fn multisig_2_of_3_works() { fn multisig_3_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -441,7 +441,7 @@ fn multisig_3_of_3_works() { Weight::zero() )); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -451,7 +451,7 @@ fn multisig_3_of_3_works() { assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 3, vec![1, 2], Some(now()), @@ -469,7 +469,7 @@ fn cancel_multisig_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -477,7 +477,7 @@ fn cancel_multisig_works() { Weight::zero() )); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -485,10 +485,10 @@ fn cancel_multisig_works() { Weight::zero() )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), + Multisig::cancel_as_multi(RuntimeOrigin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); + assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash),); }); } @@ -498,7 +498,7 @@ fn cancel_multisig_with_call_storage_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -508,7 +508,7 @@ fn cancel_multisig_with_call_storage_works() { )); assert_eq!(Balances::free_balance(1), 4); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -516,10 +516,10 @@ fn cancel_multisig_with_call_storage_works() { Weight::zero() )); assert_noop!( - Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash), + Multisig::cancel_as_multi(RuntimeOrigin::signed(2), 3, vec![1, 3], now(), hash), Error::::NotOwner, ); - assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash),); + assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash),); assert_eq!(Balances::free_balance(1), 10); }); } @@ -530,7 +530,7 @@ fn cancel_multisig_with_alt_call_storage_works() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -539,7 +539,7 @@ fn cancel_multisig_with_alt_call_storage_works() { )); assert_eq!(Balances::free_balance(1), 6); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -548,7 +548,7 @@ fn cancel_multisig_with_alt_call_storage_works() { Weight::zero() )); assert_eq!(Balances::free_balance(2), 8); - assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); + assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); }); @@ -558,15 +558,15 @@ fn cancel_multisig_with_alt_call_storage_works() { fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -577,7 +577,7 @@ fn multisig_2_of_3_as_multi_works() { assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -593,9 +593,9 @@ fn multisig_2_of_3_as_multi_works() { fn multisig_2_of_3_as_multi_with_many_calls_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; @@ -605,7 +605,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { let data2 = call2.encode(); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -614,7 +614,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { Weight::zero() )); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], None, @@ -623,7 +623,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { Weight::zero() )); assert_ok!(Multisig::as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 2, vec![1, 2], Some(now()), @@ -632,7 +632,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { call1_weight )); assert_ok!(Multisig::as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 2, vec![1, 2], Some(now()), @@ -650,16 +650,16 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { fn multisig_2_of_3_cannot_reissue_same_call() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -668,7 +668,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { Weight::zero() )); assert_ok!(Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -679,7 +679,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { assert_eq!(Balances::free_balance(multi), 5); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -688,7 +688,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { Weight::zero() )); assert_ok!(Multisig::as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 2, vec![1, 2], Some(now()), @@ -717,7 +717,7 @@ fn minimum_threshold_check_works() { let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 0, vec![2], None, @@ -729,7 +729,7 @@ fn minimum_threshold_check_works() { ); assert_noop!( Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 1, vec![2], None, @@ -748,7 +748,7 @@ fn too_many_signatories_fails() { let call = call_transfer(6, 15).encode(); assert_noop!( Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3, 4], None, @@ -767,7 +767,7 @@ fn duplicate_approvals_are_ignored() { let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -776,7 +776,7 @@ fn duplicate_approvals_are_ignored() { )); assert_noop!( Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], Some(now()), @@ -786,7 +786,7 @@ fn duplicate_approvals_are_ignored() { Error::::AlreadyApproved, ); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -795,7 +795,7 @@ fn duplicate_approvals_are_ignored() { )); assert_noop!( Multisig::approve_as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 2, vec![1, 2], Some(now()), @@ -811,15 +811,15 @@ fn duplicate_approvals_are_ignored() { fn multisig_1_of_3_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 1); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15).encode(); let hash = blake2_256(&call); assert_noop!( Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 1, vec![2, 3], None, @@ -830,7 +830,7 @@ fn multisig_1_of_3_works() { ); assert_noop!( Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 1, vec![2, 3], None, @@ -841,7 +841,11 @@ fn multisig_1_of_3_works() { Error::::MinimumThreshold, ); let boxed_call = Box::new(call_transfer(6, 15)); - assert_ok!(Multisig::as_multi_threshold_1(Origin::signed(1), vec![2, 3], boxed_call)); + assert_ok!(Multisig::as_multi_threshold_1( + RuntimeOrigin::signed(1), + vec![2, 3], + boxed_call + )); assert_eq!(Balances::free_balance(6), 15); }); @@ -852,7 +856,7 @@ fn multisig_filters() { new_test_ext().execute_with(|| { let call = Box::new(RuntimeCall::System(frame_system::Call::set_code { code: vec![] })); assert_noop!( - Multisig::as_multi_threshold_1(Origin::signed(1), vec![2], call.clone()), + Multisig::as_multi_threshold_1(RuntimeOrigin::signed(1), vec![2], call.clone()), DispatchError::from(frame_system::Error::::CallFiltered), ); }); @@ -862,14 +866,14 @@ fn multisig_filters() { fn weight_check_works() { new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let data = call.encode(); assert_ok!(Multisig::as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, vec![2, 3], None, @@ -881,7 +885,7 @@ fn weight_check_works() { assert_noop!( Multisig::as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 2, vec![1, 3], Some(now()), @@ -901,16 +905,16 @@ fn multisig_handles_no_preimage_after_all_approve() { // the call will go through. new_test_ext().execute_with(|| { let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); assert_ok!(Multisig::approve_as_multi( - Origin::signed(1), + RuntimeOrigin::signed(1), 3, vec![2, 3], None, @@ -918,7 +922,7 @@ fn multisig_handles_no_preimage_after_all_approve() { Weight::zero() )); assert_ok!(Multisig::approve_as_multi( - Origin::signed(2), + RuntimeOrigin::signed(2), 3, vec![1, 3], Some(now()), @@ -926,7 +930,7 @@ fn multisig_handles_no_preimage_after_all_approve() { Weight::zero() )); assert_ok!(Multisig::approve_as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 3, vec![1, 2], Some(now()), @@ -936,7 +940,7 @@ fn multisig_handles_no_preimage_after_all_approve() { assert_eq!(Balances::free_balance(6), 0); assert_ok!(Multisig::as_multi( - Origin::signed(3), + RuntimeOrigin::signed(3), 3, vec![1, 2], Some(now()), diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 0ab9a509a568f..953cf39cd12db 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -71,7 +71,7 @@ pub mod pallet { type Slashed: OnUnbalanced>; /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// The minimum length a name may be. #[pallet::constant] @@ -280,7 +280,7 @@ mod tests { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -338,9 +338,9 @@ mod tests { #[test] fn kill_name_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); + assert_ok!(Nicks::set_name(RuntimeOrigin::signed(2), b"Dave".to_vec())); assert_eq!(Balances::total_balance(&2), 10); - assert_ok!(Nicks::kill_name(Origin::signed(1), 2)); + assert_ok!(Nicks::kill_name(RuntimeOrigin::signed(1), 2)); assert_eq!(Balances::total_balance(&2), 8); assert_eq!(>::get(2), None); }); @@ -350,17 +350,21 @@ mod tests { fn force_name_should_work() { new_test_ext().execute_with(|| { assert_noop!( - Nicks::set_name(Origin::signed(2), b"Dr. David Brubeck, III".to_vec()), + Nicks::set_name(RuntimeOrigin::signed(2), b"Dr. David Brubeck, III".to_vec()), Error::::TooLong, ); - assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); + assert_ok!(Nicks::set_name(RuntimeOrigin::signed(2), b"Dave".to_vec())); assert_eq!(Balances::reserved_balance(2), 2); assert_noop!( - Nicks::force_name(Origin::signed(1), 2, b"Dr. David Brubeck, III".to_vec()), + Nicks::force_name(RuntimeOrigin::signed(1), 2, b"Dr. David Brubeck, III".to_vec()), Error::::TooLong, ); - assert_ok!(Nicks::force_name(Origin::signed(1), 2, b"Dr. Brubeck, III".to_vec())); + assert_ok!(Nicks::force_name( + RuntimeOrigin::signed(1), + 2, + b"Dr. Brubeck, III".to_vec() + )); assert_eq!(Balances::reserved_balance(2), 2); let (name, amount) = >::get(2).unwrap(); assert_eq!(name, b"Dr. Brubeck, III".to_vec()); @@ -371,17 +375,17 @@ mod tests { #[test] fn normal_operation_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(Origin::signed(1), b"Gav".to_vec())); + assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Gav".to_vec())); assert_eq!(Balances::reserved_balance(1), 2); assert_eq!(Balances::free_balance(1), 8); assert_eq!(>::get(1).unwrap().0, b"Gav".to_vec()); - assert_ok!(Nicks::set_name(Origin::signed(1), b"Gavin".to_vec())); + assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Gavin".to_vec())); assert_eq!(Balances::reserved_balance(1), 2); assert_eq!(Balances::free_balance(1), 8); assert_eq!(>::get(1).unwrap().0, b"Gavin".to_vec()); - assert_ok!(Nicks::clear_name(Origin::signed(1))); + assert_ok!(Nicks::clear_name(RuntimeOrigin::signed(1))); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(1), 10); }); @@ -390,24 +394,27 @@ mod tests { #[test] fn error_catching_should_work() { new_test_ext().execute_with(|| { - assert_noop!(Nicks::clear_name(Origin::signed(1)), Error::::Unnamed); + assert_noop!(Nicks::clear_name(RuntimeOrigin::signed(1)), Error::::Unnamed); assert_noop!( - Nicks::set_name(Origin::signed(3), b"Dave".to_vec()), + Nicks::set_name(RuntimeOrigin::signed(3), b"Dave".to_vec()), pallet_balances::Error::::InsufficientBalance ); assert_noop!( - Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), + Nicks::set_name(RuntimeOrigin::signed(1), b"Ga".to_vec()), Error::::TooShort ); assert_noop!( - Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), + Nicks::set_name(RuntimeOrigin::signed(1), b"Gavin James Wood, Esquire".to_vec()), Error::::TooLong ); - assert_ok!(Nicks::set_name(Origin::signed(1), b"Dave".to_vec())); - assert_noop!(Nicks::kill_name(Origin::signed(2), 1), BadOrigin); - assert_noop!(Nicks::force_name(Origin::signed(2), 1, b"Whatever".to_vec()), BadOrigin); + assert_ok!(Nicks::set_name(RuntimeOrigin::signed(1), b"Dave".to_vec())); + assert_noop!(Nicks::kill_name(RuntimeOrigin::signed(2), 1), BadOrigin); + assert_noop!( + Nicks::force_name(RuntimeOrigin::signed(2), 1, b"Whatever".to_vec()), + BadOrigin + ); }); } } diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index 42f1fcf7b55e6..638a96eb3321a 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -78,16 +78,16 @@ pub mod pallet { type MaxPeerIdLength: Get; /// The origin which can add a well known node. - type AddOrigin: EnsureOrigin; + type AddOrigin: EnsureOrigin; /// The origin which can remove a well known node. - type RemoveOrigin: EnsureOrigin; + type RemoveOrigin: EnsureOrigin; /// The origin which can swap the well known nodes. - type SwapOrigin: EnsureOrigin; + type SwapOrigin: EnsureOrigin; /// The origin which can reset the well known nodes. - type ResetOrigin: EnsureOrigin; + type ResetOrigin: EnsureOrigin; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs index 448f8cef80e28..fcf7ff0189332 100644 --- a/frame/node-authorization/src/mock.rs +++ b/frame/node-authorization/src/mock.rs @@ -52,7 +52,7 @@ impl frame_system::Config for Test { type DbWeight = (); type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/node-authorization/src/tests.rs b/frame/node-authorization/src/tests.rs index ba78d14912133..d9db09db4f46f 100644 --- a/frame/node-authorization/src/tests.rs +++ b/frame/node-authorization/src/tests.rs @@ -26,19 +26,27 @@ use sp_runtime::traits::BadOrigin; fn add_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(2), test_node(15), 15), + NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(2), test_node(15), 15), BadOrigin ); assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), PeerId(vec![1, 2, 3]), 15), + NodeAuthorization::add_well_known_node( + RuntimeOrigin::signed(1), + PeerId(vec![1, 2, 3]), + 15 + ), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(20), 20), + NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(1), test_node(20), 20), Error::::AlreadyJoined ); - assert_ok!(NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15)); + assert_ok!(NodeAuthorization::add_well_known_node( + RuntimeOrigin::signed(1), + test_node(15), + 15 + )); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) @@ -49,7 +57,7 @@ fn add_well_known_node_works() { assert_eq!(Owners::::get(test_node(15)), Some(15)); assert_noop!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(25), 25), + NodeAuthorization::add_well_known_node(RuntimeOrigin::signed(1), test_node(25), 25), Error::::TooManyNodes ); }); @@ -59,15 +67,18 @@ fn add_well_known_node_works() { fn remove_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(3), test_node(20)), + NodeAuthorization::remove_well_known_node(RuntimeOrigin::signed(3), test_node(20)), BadOrigin ); assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), PeerId(vec![1, 2, 3])), + NodeAuthorization::remove_well_known_node( + RuntimeOrigin::signed(2), + PeerId(vec![1, 2, 3]) + ), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(40)), + NodeAuthorization::remove_well_known_node(RuntimeOrigin::signed(2), test_node(40)), Error::::NotExist ); @@ -77,7 +88,10 @@ fn remove_well_known_node_works() { ); assert!(AdditionalConnections::::contains_key(test_node(20))); - assert_ok!(NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20))); + assert_ok!(NodeAuthorization::remove_well_known_node( + RuntimeOrigin::signed(2), + test_node(20) + )); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(30)]) @@ -91,12 +105,16 @@ fn remove_well_known_node_works() { fn swap_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::swap_well_known_node(Origin::signed(4), test_node(20), test_node(5)), + NodeAuthorization::swap_well_known_node( + RuntimeOrigin::signed(4), + test_node(20), + test_node(5) + ), BadOrigin ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), + RuntimeOrigin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) ), @@ -104,7 +122,7 @@ fn swap_well_known_node_works() { ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), + RuntimeOrigin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) ), @@ -112,7 +130,7 @@ fn swap_well_known_node_works() { ); assert_ok!(NodeAuthorization::swap_well_known_node( - Origin::signed(3), + RuntimeOrigin::signed(3), test_node(20), test_node(20) )); @@ -122,12 +140,16 @@ fn swap_well_known_node_works() { ); assert_noop!( - NodeAuthorization::swap_well_known_node(Origin::signed(3), test_node(15), test_node(5)), + NodeAuthorization::swap_well_known_node( + RuntimeOrigin::signed(3), + test_node(15), + test_node(5) + ), Error::::NotExist ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), + RuntimeOrigin::signed(3), test_node(20), test_node(30) ), @@ -139,7 +161,7 @@ fn swap_well_known_node_works() { BTreeSet::from_iter(vec![test_node(15)]), ); assert_ok!(NodeAuthorization::swap_well_known_node( - Origin::signed(3), + RuntimeOrigin::signed(3), test_node(20), test_node(5) )); @@ -162,14 +184,14 @@ fn reset_well_known_nodes_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::reset_well_known_nodes( - Origin::signed(3), + RuntimeOrigin::signed(3), vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] ), BadOrigin ); assert_noop!( NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), + RuntimeOrigin::signed(4), vec![ (test_node(15), 15), (test_node(5), 5), @@ -181,7 +203,7 @@ fn reset_well_known_nodes_works() { ); assert_ok!(NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), + RuntimeOrigin::signed(4), vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] )); assert_eq!( @@ -198,15 +220,15 @@ fn reset_well_known_nodes_works() { fn claim_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), PeerId(vec![1, 2, 3])), + NodeAuthorization::claim_node(RuntimeOrigin::signed(1), PeerId(vec![1, 2, 3])), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::claim_node(Origin::signed(1), test_node(20)), + NodeAuthorization::claim_node(RuntimeOrigin::signed(1), test_node(20)), Error::::AlreadyClaimed ); - assert_ok!(NodeAuthorization::claim_node(Origin::signed(15), test_node(15))); + assert_ok!(NodeAuthorization::claim_node(RuntimeOrigin::signed(15), test_node(15))); assert_eq!(Owners::::get(test_node(15)), Some(15)); }); } @@ -215,21 +237,21 @@ fn claim_node_works() { fn remove_claim_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), PeerId(vec![1, 2, 3])), + NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3])), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(15)), + NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(15)), Error::::NotClaimed ); assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(15), test_node(20)), + NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(20)), Error::::NotOwner ); assert_noop!( - NodeAuthorization::remove_claim(Origin::signed(20), test_node(20)), + NodeAuthorization::remove_claim(RuntimeOrigin::signed(20), test_node(20)), Error::::PermissionDenied ); @@ -238,7 +260,7 @@ fn remove_claim_works() { test_node(15), BTreeSet::from_iter(vec![test_node(20)]), ); - assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); + assert_ok!(NodeAuthorization::remove_claim(RuntimeOrigin::signed(15), test_node(15))); assert!(!Owners::::contains_key(test_node(15))); assert!(!AdditionalConnections::::contains_key(test_node(15))); }); @@ -248,20 +270,20 @@ fn remove_claim_works() { fn transfer_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), PeerId(vec![1, 2, 3]), 10), + NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3]), 10), Error::::PeerIdTooLong ); assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(15), 10), + NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), test_node(15), 10), Error::::NotClaimed ); assert_noop!( - NodeAuthorization::transfer_node(Origin::signed(15), test_node(20), 10), + NodeAuthorization::transfer_node(RuntimeOrigin::signed(15), test_node(20), 10), Error::::NotOwner ); - assert_ok!(NodeAuthorization::transfer_node(Origin::signed(20), test_node(20), 15)); + assert_ok!(NodeAuthorization::transfer_node(RuntimeOrigin::signed(20), test_node(20), 15)); assert_eq!(Owners::::get(test_node(20)), Some(15)); }); } @@ -271,7 +293,7 @@ fn add_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] ), @@ -279,7 +301,7 @@ fn add_connections_works() { ); assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), test_node(15), vec![test_node(5)] ), @@ -288,7 +310,7 @@ fn add_connections_works() { assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), test_node(20), vec![test_node(5)] ), @@ -296,7 +318,7 @@ fn add_connections_works() { ); assert_ok!(NodeAuthorization::add_connections( - Origin::signed(20), + RuntimeOrigin::signed(20), test_node(20), vec![test_node(15), test_node(5), test_node(25), test_node(20)] )); @@ -312,7 +334,7 @@ fn remove_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] ), @@ -320,7 +342,7 @@ fn remove_connections_works() { ); assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), test_node(15), vec![test_node(5)] ), @@ -329,7 +351,7 @@ fn remove_connections_works() { assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), + RuntimeOrigin::signed(15), test_node(20), vec![test_node(5)] ), @@ -341,7 +363,7 @@ fn remove_connections_works() { BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), ); assert_ok!(NodeAuthorization::remove_connections( - Origin::signed(20), + RuntimeOrigin::signed(20), test_node(20), vec![test_node(15), test_node(5)] )); diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 18b081367d135..040a9fa8e828e 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -26,7 +26,7 @@ mod mock; use frame_benchmarking::{account, frame_support::traits::Currency, vec, whitelist_account, Vec}; use frame_election_provider_support::SortedListProvider; use frame_support::{assert_ok, ensure, traits::Get}; -use frame_system::RawOrigin as Origin; +use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ BalanceOf, BondExtra, BondedPoolInner, BondedPools, ConfigOp, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, @@ -80,7 +80,7 @@ fn create_pool_account( let pool_creator_lookup = T::Lookup::unlookup(pool_creator.clone()); Pools::::create( - Origin::Signed(pool_creator.clone()).into(), + RuntimeOrigin::Signed(pool_creator.clone()).into(), balance, pool_creator_lookup.clone(), pool_creator_lookup.clone(), @@ -203,7 +203,7 @@ impl ListScenario { maybe_pool.as_mut().map(|pool| pool.points -= amount) }); - Pools::::join(Origin::Signed(joiner.clone()).into(), amount, 1).unwrap(); + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), amount, 1).unwrap(); // check that the vote weight is still the same as the original bonded let weight_of = pallet_staking::Pallet::::weight_of_fn(); @@ -236,7 +236,7 @@ frame_benchmarking::benchmarks! { = create_funded_user_with_balance::("joiner", 0, joiner_free); whitelist_account!(joiner); - }: _(Origin::Signed(joiner.clone()), max_additional, 1) + }: _(RuntimeOrigin::Signed(joiner.clone()), max_additional, 1) verify { assert_eq!(CurrencyOf::::free_balance(&joiner), joiner_free - max_additional); assert_eq!( @@ -252,7 +252,7 @@ frame_benchmarking::benchmarks! { // creator of the src pool will bond-extra, bumping itself to dest bag. - }: bond_extra(Origin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) + }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= @@ -270,7 +270,7 @@ frame_benchmarking::benchmarks! { assert!(extra >= CurrencyOf::::minimum_balance()); CurrencyOf::::deposit_creating(&reward_account1, extra); - }: bond_extra(Origin::Signed(scenario.creator1.clone()), BondExtra::Rewards) + }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::Rewards) verify { assert!( T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= @@ -294,7 +294,7 @@ frame_benchmarking::benchmarks! { ); whitelist_account!(depositor); - }:_(Origin::Signed(depositor.clone())) + }:_(RuntimeOrigin::Signed(depositor.clone())) verify { assert_eq!( CurrencyOf::::free_balance(&depositor), @@ -318,7 +318,7 @@ frame_benchmarking::benchmarks! { let member_id_lookup = T::Lookup::unlookup(member_id.clone()); let all_points = PoolMembers::::get(&member_id).unwrap().points; whitelist_account!(member_id); - }: _(Origin::Signed(member_id.clone()), member_id_lookup, all_points) + }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) verify { let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag @@ -346,7 +346,7 @@ frame_benchmarking::benchmarks! { // Add a new member let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); - Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); // Sanity check join worked @@ -357,7 +357,7 @@ frame_benchmarking::benchmarks! { assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); // Unbond the new member - Pools::::fully_unbond(Origin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( @@ -371,7 +371,7 @@ frame_benchmarking::benchmarks! { // Add `s` count of slashing spans to storage. pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(pool_account); - }: _(Origin::Signed(pool_account.clone()), 1, s) + }: _(RuntimeOrigin::Signed(pool_account.clone()), 1, s) verify { // The joiners funds didn't change assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); @@ -389,7 +389,7 @@ frame_benchmarking::benchmarks! { let min_join_bond = MinJoinBond::::get().max(CurrencyOf::::minimum_balance()); let joiner = create_funded_user_with_balance::("joiner", 0, min_join_bond * 2u32.into()); let joiner_lookup = T::Lookup::unlookup(joiner.clone()); - Pools::::join(Origin::Signed(joiner.clone()).into(), min_join_bond, 1) + Pools::::join(RuntimeOrigin::Signed(joiner.clone()).into(), min_join_bond, 1) .unwrap(); // Sanity check join worked @@ -401,7 +401,7 @@ frame_benchmarking::benchmarks! { // Unbond the new member pallet_staking::CurrentEra::::put(0); - Pools::::fully_unbond(Origin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(joiner.clone()).into(), joiner.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( @@ -415,7 +415,7 @@ frame_benchmarking::benchmarks! { pallet_staking::benchmarking::add_slashing_spans::(&pool_account, s); whitelist_account!(joiner); - }: withdraw_unbonded(Origin::Signed(joiner.clone()), joiner_lookup, s) + }: withdraw_unbonded(RuntimeOrigin::Signed(joiner.clone()), joiner_lookup, s) verify { assert_eq!( CurrencyOf::::free_balance(&joiner), @@ -448,7 +448,7 @@ frame_benchmarking::benchmarks! { // up when unbonding. let reward_account = Pools::::create_reward_account(1); assert!(frame_system::Account::::contains_key(&reward_account)); - Pools::::fully_unbond(Origin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); + Pools::::fully_unbond(RuntimeOrigin::Signed(depositor.clone()).into(), depositor.clone()).unwrap(); // Sanity check that unbond worked assert_eq!( @@ -473,7 +473,7 @@ frame_benchmarking::benchmarks! { assert!(frame_system::Account::::contains_key(&reward_account)); whitelist_account!(depositor); - }: withdraw_unbonded(Origin::Signed(depositor.clone()), depositor_lookup, s) + }: withdraw_unbonded(RuntimeOrigin::Signed(depositor.clone()), depositor_lookup, s) verify { // Pool removal worked assert!(!pallet_staking::Ledger::::contains_key(&pool_account)); @@ -506,7 +506,7 @@ frame_benchmarking::benchmarks! { whitelist_account!(depositor); }: _( - Origin::Signed(depositor.clone()), + RuntimeOrigin::Signed(depositor.clone()), min_create_bond, depositor_lookup.clone(), depositor_lookup.clone(), @@ -550,7 +550,7 @@ frame_benchmarking::benchmarks! { .collect(); whitelist_account!(depositor); - }:_(Origin::Signed(depositor.clone()), 1, validators) + }:_(RuntimeOrigin::Signed(depositor.clone()), 1, validators) verify { assert_eq!(RewardPools::::count(), 1); assert_eq!(BondedPools::::count(), 1); @@ -586,7 +586,7 @@ frame_benchmarking::benchmarks! { let caller = account("caller", 0, USER_SEED); whitelist_account!(caller); - }:_(Origin::Signed(caller), 1, PoolState::Destroying) + }:_(RuntimeOrigin::Signed(caller), 1, PoolState::Destroying) verify { assert_eq!(BondedPools::::get(1).unwrap().state, PoolState::Destroying); } @@ -601,14 +601,14 @@ frame_benchmarking::benchmarks! { let metadata: Vec = (0..n).map(|_| 42).collect(); whitelist_account!(depositor); - }:_(Origin::Signed(depositor), 1, metadata.clone()) + }:_(RuntimeOrigin::Signed(depositor), 1, metadata.clone()) verify { assert_eq!(Metadata::::get(&1), metadata); } set_configs { }:_( - Origin::Root, + RuntimeOrigin::Root, ConfigOp::Set(BalanceOf::::max_value()), ConfigOp::Set(BalanceOf::::max_value()), ConfigOp::Set(u32::MAX), @@ -627,7 +627,7 @@ frame_benchmarking::benchmarks! { let (root, _) = create_pool_account::(0, min_create_bond::() * 2u32.into()); let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); }:_( - Origin::Signed(root.clone()), + RuntimeOrigin::Signed(root.clone()), first_id, ConfigOp::Set(random.clone()), ConfigOp::Set(random.clone()), @@ -653,11 +653,11 @@ frame_benchmarking::benchmarks! { .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(Pools::::nominate(Origin::Signed(depositor.clone()).into(), 1, validators)); + assert_ok!(Pools::::nominate(RuntimeOrigin::Signed(depositor.clone()).into(), 1, validators)); assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_some()); whitelist_account!(depositor); - }:_(Origin::Signed(depositor.clone()), 1) + }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_none()); } diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 2dcac34d39a11..7841686114dfd 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -33,7 +33,7 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/nomination-pools/src/migration.rs b/frame/nomination-pools/src/migration.rs index 4d78f2fa76c96..b73141c95f72c 100644 --- a/frame/nomination-pools/src/migration.rs +++ b/frame/nomination-pools/src/migration.rs @@ -119,7 +119,7 @@ pub mod v2 { ExtBuilder::default().build_and_execute(|| { let join = |x| { Balances::make_free_balance_be(&x, Balances::minimum_balance() + 10); - frame_support::assert_ok!(Pools::join(Origin::signed(x), 10, 1)); + frame_support::assert_ok!(Pools::join(RuntimeOrigin::signed(x), 10, 1)); }; assert_eq!(BondedPool::::get(1).unwrap().points, 10); diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 13df09c47fa92..1b3372dae56ee 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -128,7 +128,7 @@ impl sp_staking::StakingInterface for StakingMock { impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; @@ -291,7 +291,7 @@ impl ExtBuilder { let amount_to_bond = Pools::depositor_min_bond(); Balances::make_free_balance_be(&10, amount_to_bond * 5); assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); - assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); + assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); let last_pool = LastPoolId::::get(); for (account_id, bonded) in self.members { Balances::make_free_balance_be(&account_id, bonded * 2); @@ -353,7 +353,7 @@ pub fn fully_unbond_permissioned(member: AccountId) -> DispatchResult { let points = PoolMembers::::get(&member) .map(|d| d.active_points()) .unwrap_or_default(); - Pools::unbond(Origin::signed(member), member, points) + Pools::unbond(RuntimeOrigin::signed(member), member, points) } #[cfg(test)] diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index bcc735a9cef99..7b8d14164d63f 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -443,7 +443,7 @@ mod join { assert!(!PoolMembers::::contains_key(&11)); // When - assert_ok!(Pools::join(Origin::signed(11), 2, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(11), 2, 1)); // Then @@ -471,7 +471,7 @@ mod join { assert!(!PoolMembers::::contains_key(&12)); // When - assert_ok!(Pools::join(Origin::signed(12), 12, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(12), 12, 1)); // Then assert_eq!( @@ -494,15 +494,21 @@ mod join { assert_eq!(PoolMembers::::get(&10).unwrap().pool_id, 1); assert_noop!( - Pools::join(Origin::signed(10), 420, 123), + Pools::join(RuntimeOrigin::signed(10), 420, 123), Error::::AccountBelongsToOtherPool ); - assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::PoolNotFound); + assert_noop!( + Pools::join(RuntimeOrigin::signed(11), 420, 123), + Error::::PoolNotFound + ); // Force the pools bonded balance to 0, simulating a 100% slash StakingMock::set_bonded_balance(Pools::create_bonded_account(1), 0); - assert_noop!(Pools::join(Origin::signed(11), 420, 1), Error::::OverflowRisk); + assert_noop!( + Pools::join(RuntimeOrigin::signed(11), 420, 1), + Error::::OverflowRisk + ); // Given a mocked bonded pool BondedPool:: { @@ -527,27 +533,33 @@ mod join { Pools::create_bonded_account(123), max_points_to_balance, ); - assert_noop!(Pools::join(Origin::signed(11), 420, 123), Error::::OverflowRisk); + assert_noop!( + Pools::join(RuntimeOrigin::signed(11), 420, 123), + Error::::OverflowRisk + ); StakingMock::set_bonded_balance( Pools::create_bonded_account(123), Balance::MAX / max_points_to_balance, ); // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` - assert_noop!(Pools::join(Origin::signed(11), 5, 123), Error::::OverflowRisk); + assert_noop!( + Pools::join(RuntimeOrigin::signed(11), 5, 123), + Error::::OverflowRisk + ); StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); // Cannot join a pool that isn't open unsafe_set_state(123, PoolState::Blocked); assert_noop!( - Pools::join(Origin::signed(11), max_points_to_balance, 123), + Pools::join(RuntimeOrigin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); unsafe_set_state(123, PoolState::Destroying); assert_noop!( - Pools::join(Origin::signed(11), max_points_to_balance, 123), + Pools::join(RuntimeOrigin::signed(11), max_points_to_balance, 123), Error::::NotOpen ); @@ -556,7 +568,7 @@ mod join { // Then assert_noop!( - Pools::join(Origin::signed(11), 99, 123), + Pools::join(RuntimeOrigin::signed(11), 99, 123), Error::::MinimumBondNotMet ); }); @@ -578,7 +590,7 @@ mod join { }, } .put(); - let _ = Pools::join(Origin::signed(11), 420, 123); + let _ = Pools::join(RuntimeOrigin::signed(11), 420, 123); }); } @@ -591,7 +603,7 @@ mod join { let account = i + 100; Balances::make_free_balance_be(&account, 100 + Balances::minimum_balance()); - assert_ok!(Pools::join(Origin::signed(account), 100, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(account), 100, 1)); } Balances::make_free_balance_be(&103, 100 + Balances::minimum_balance()); @@ -608,7 +620,7 @@ mod join { ); assert_noop!( - Pools::join(Origin::signed(103), 100, 1), + Pools::join(RuntimeOrigin::signed(103), 100, 1), Error::::MaxPoolMembers ); @@ -617,7 +629,7 @@ mod join { assert_eq!(MaxPoolMembers::::get(), Some(4)); Balances::make_free_balance_be(&104, 100 + Balances::minimum_balance()); - assert_ok!(Pools::create(Origin::signed(104), 100, 104, 104, 104)); + assert_ok!(Pools::create(RuntimeOrigin::signed(104), 100, 104, 104, 104)); let pool_account = BondedPools::::iter() .find(|(_, bonded_pool)| bonded_pool.roles.depositor == 104) @@ -634,7 +646,7 @@ mod join { ); assert_noop!( - Pools::join(Origin::signed(103), 100, pool_account), + Pools::join(RuntimeOrigin::signed(103), 100, pool_account), Error::::MaxPoolMembers ); }); @@ -692,7 +704,7 @@ mod claim_payout { let _ = pool_events_since_last_call(); // When - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then assert_eq!( @@ -708,7 +720,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 90); // When - assert_ok!(Pools::claim_payout(Origin::signed(40))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); // Then assert_eq!( @@ -721,7 +733,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 50); // When - assert_ok!(Pools::claim_payout(Origin::signed(50))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); // Then assert_eq!( @@ -737,7 +749,7 @@ mod claim_payout { assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += 50)); // When - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then assert_eq!( @@ -750,7 +762,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 45); // When - assert_ok!(Pools::claim_payout(Origin::signed(40))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); // Then assert_eq!( @@ -767,7 +779,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 75); // When - assert_ok!(Pools::claim_payout(Origin::signed(50))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); // Then assert_eq!( @@ -780,7 +792,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 25); // When - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then assert_eq!( @@ -797,7 +809,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 420); // When - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then assert_eq!( @@ -816,7 +828,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 400); // When - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); // Then assert_eq!( @@ -829,7 +841,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 398); // When - assert_ok!(Pools::claim_payout(Origin::signed(40))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(40))); // Then assert_eq!( @@ -842,7 +854,7 @@ mod claim_payout { assert_eq!(Balances::free_balance(&default_reward_account()), ed + 210); // When - assert_ok!(Pools::claim_payout(Origin::signed(50))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(50))); // Then assert_eq!( @@ -862,7 +874,10 @@ mod claim_payout { // fully unbond the member. assert_ok!(fully_unbond_permissioned(11)); - assert_noop!(Pools::claim_payout(Origin::signed(11)), Error::::FullyUnbonding); + assert_noop!( + Pools::claim_payout(RuntimeOrigin::signed(11)), + Error::::FullyUnbonding + ); assert_eq!( pool_events_since_last_call(), @@ -1141,14 +1156,14 @@ mod claim_payout { // 20 joins afterwards. Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); - assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); // reward by another 20 Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); // 10 should claim 10 + 10, 20 should claim 20 / 2. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ @@ -1163,8 +1178,8 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 20).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1184,13 +1199,13 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 3).unwrap(); Balances::make_free_balance_be(&20, Balances::minimum_balance() + 10); - assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 6).unwrap(); // 10 should claim 3, 20 should claim 3 + 3. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1206,8 +1221,8 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 8).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1220,8 +1235,8 @@ mod claim_payout { // uneven upcoming rewards are shared equally, rounded down. Balances::mutate_account(&default_reward_account(), |f| f.free += 7).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1241,19 +1256,19 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); Balances::make_free_balance_be(&20, ed + 10); - assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); Balances::make_free_balance_be(&30, ed + 10); - assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // 10 should claim 10, 20 should claim nothing. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1271,9 +1286,9 @@ mod claim_payout { // any upcoming rewards are shared equally. Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1297,7 +1312,7 @@ mod claim_payout { assert_eq!(Pools::pending_rewards(20), None); Balances::make_free_balance_be(&20, ed + 10); - assert_ok!(Pools::join(Origin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); assert_eq!(Pools::pending_rewards(10), Some(30)); assert_eq!(Pools::pending_rewards(20), Some(0)); @@ -1309,7 +1324,7 @@ mod claim_payout { assert_eq!(Pools::pending_rewards(30), None); Balances::make_free_balance_be(&30, ed + 10); - assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); assert_eq!(Pools::pending_rewards(10), Some(30 + 50)); assert_eq!(Pools::pending_rewards(20), Some(50)); @@ -1322,17 +1337,17 @@ mod claim_payout { assert_eq!(Pools::pending_rewards(30), Some(20)); // 10 should claim 10, 20 should claim nothing. - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!(Pools::pending_rewards(10), Some(0)); assert_eq!(Pools::pending_rewards(20), Some(50 + 20)); assert_eq!(Pools::pending_rewards(30), Some(20)); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!(Pools::pending_rewards(10), Some(0)); assert_eq!(Pools::pending_rewards(20), Some(0)); assert_eq!(Pools::pending_rewards(30), Some(20)); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!(Pools::pending_rewards(10), Some(0)); assert_eq!(Pools::pending_rewards(20), Some(0)); assert_eq!(Pools::pending_rewards(30), Some(0)); @@ -1345,16 +1360,16 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); Balances::make_free_balance_be(&30, ed + 20); - assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // everyone claims. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1370,14 +1385,14 @@ mod claim_payout { ); // 30 now bumps itself to be like 20. - assert_ok!(Pools::bond_extra(Origin::signed(30), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(30), BondExtra::FreeBalance(10))); // more rewards come in. Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1397,13 +1412,13 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); // everyone claims. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1417,13 +1432,13 @@ mod claim_payout { ); // 20 unbonds to be equal to 10 (10 points each). - assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); // more rewards come in. Balances::mutate_account(&default_reward_account(), |f| f.free += 100).unwrap(); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1442,16 +1457,16 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 20); - assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); Balances::make_free_balance_be(&30, ed + 20); - assert_ok!(Pools::join(Origin::signed(30), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1469,8 +1484,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1484,8 +1499,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 80).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1496,7 +1511,7 @@ mod claim_payout { ); // now 30 claims all at once - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1511,13 +1526,13 @@ mod claim_payout { let ed = Balances::minimum_balance(); Balances::make_free_balance_be(&20, ed + 200); - assert_ok!(Pools::join(Origin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); // 10 gets 10, 20 gets 20, 30 gets 10 Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); // some claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), @@ -1533,11 +1548,11 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // and 20 bonds more -- they should not have more share of this reward. - assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::FreeBalance(10))); // everyone claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1553,8 +1568,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 60).unwrap(); // everyone claim. - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -1584,7 +1599,7 @@ mod claim_payout { // create pool 2 Balances::make_free_balance_be(&20, 100); - assert_ok!(Pools::create(Origin::signed(20), 10, 20, 20, 20)); + assert_ok!(Pools::create(RuntimeOrigin::signed(20), 10, 20, 20, 20)); // has no impact -- initial let (member_20, _, reward_pool_20) = Pools::get_member_with_pools(&20).unwrap(); @@ -1600,7 +1615,7 @@ mod claim_payout { // create pool 3 Balances::make_free_balance_be(&30, 100); - assert_ok!(Pools::create(Origin::signed(30), 10, 30, 30, 30)); + assert_ok!(Pools::create(RuntimeOrigin::signed(30), 10, 30, 30, 30)); // reward counter is still the same. let (member_30, _, reward_pool_30) = Pools::get_member_with_pools(&30).unwrap(); @@ -1616,7 +1631,7 @@ mod claim_payout { assert_eq!(member_30.last_recorded_reward_counter, 0.into()); // and 30 can claim the reward now. - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), @@ -1640,7 +1655,7 @@ mod claim_payout { MaxPoolMembersPerPool::::set(None); let join = |x, y| { Balances::make_free_balance_be(&x, y + Balances::minimum_balance()); - assert_ok!(Pools::join(Origin::signed(x), y, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(x), y, 1)); }; { @@ -1722,7 +1737,10 @@ mod claim_payout { // 10 bonds extra without any rewards. { - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra( + RuntimeOrigin::signed(10), + BondExtra::FreeBalance(10) + )); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); assert_eq!(member.last_recorded_reward_counter, 0.into()); assert_eq!(reward_pool.last_recorded_total_payouts, 0); @@ -1734,7 +1752,10 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 30).unwrap(); { - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra( + RuntimeOrigin::signed(10), + BondExtra::FreeBalance(10) + )); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); // explanation: before bond_extra takes place, there is 40 points and 30 balance in // the system, RewardCounter is therefore 7.5 @@ -1748,7 +1769,10 @@ mod claim_payout { // 20 bonds extra again, without further rewards. { - assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra( + RuntimeOrigin::signed(20), + BondExtra::FreeBalance(10) + )); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!(member.last_recorded_reward_counter, RewardCounter::from_float(0.75)); assert_eq!( @@ -1786,7 +1810,7 @@ mod claim_payout { // 10 cashes it out, and bonds it. { - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); // there is 30 points and 30 reward points in the system RC is 1. assert_eq!(member.last_recorded_reward_counter, 1.into()); @@ -1803,7 +1827,7 @@ mod claim_payout { // 20 re-bonds it. { - assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::Rewards)); let (member, _, reward_pool) = Pools::get_member_with_pools(&10).unwrap(); assert_eq!(member.last_recorded_reward_counter, 1.into()); assert_eq!(reward_pool.total_rewards_claimed, 30); @@ -1843,7 +1867,7 @@ mod claim_payout { // 20 unbonds without any rewards. { - assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!(member.last_recorded_reward_counter, 0.into()); assert_eq!(reward_pool.last_recorded_total_payouts, 0); @@ -1855,7 +1879,7 @@ mod claim_payout { // and 30 also unbonds half. { - assert_ok!(Pools::unbond(Origin::signed(30), 30, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 10)); let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); // 30 reward in the system, and 40 points before this unbond to collect it, // RewardCounter is 3/4. @@ -1872,7 +1896,7 @@ mod claim_payout { // 30 unbonds again, not change this time. { - assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 5)); let (member, _, reward_pool) = Pools::get_member_with_pools(&30).unwrap(); assert_eq!( member.last_recorded_reward_counter, @@ -1887,7 +1911,7 @@ mod claim_payout { // 20 unbonds again, not change this time, just collecting their reward. { - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); let (member, _, reward_pool) = Pools::get_member_with_pools(&20).unwrap(); assert_eq!( member.last_recorded_reward_counter, @@ -1901,7 +1925,7 @@ mod claim_payout { } // trigger 10's reward as well to see all of the payouts. - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), @@ -1937,8 +1961,8 @@ mod claim_payout { Balances::mutate_account(&default_reward_account(), |f| f.free += 40).unwrap(); // everyone claims - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); // some dust (1) remains in the reward account. assert_eq!( @@ -1953,15 +1977,15 @@ mod claim_payout { ); // start dismantling the pool. - assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); assert_ok!(fully_unbond_permissioned(20)); CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); assert_ok!(fully_unbond_permissioned(10)); CurrentEra::set(6); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -2000,10 +2024,10 @@ mod claim_payout { .unwrap(); // everyone claims - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); - assert_ok!(Pools::claim_payout(Origin::signed(21))); - assert_ok!(Pools::claim_payout(Origin::signed(22))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(21))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(22))); assert_eq!( pool_events_since_last_call(), @@ -2057,18 +2081,18 @@ mod unbond { .add_members(vec![(20, 20)]) .build_and_execute(|| { // can unbond to above limit - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 5); // cannot go to below 10: assert_noop!( - Pools::unbond(Origin::signed(20), 20, 10), + Pools::unbond(RuntimeOrigin::signed(20), 20, 10), Error::::MinimumBondNotMet ); // but can go to 0 - assert_ok!(Pools::unbond(Origin::signed(20), 20, 15)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 15)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2089,23 +2113,23 @@ mod unbond { // cannot be kicked to above the limit. assert_noop!( - Pools::unbond(Origin::signed(kicker), 20, 5), + Pools::unbond(RuntimeOrigin::signed(kicker), 20, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // cannot go to below 10: assert_noop!( - Pools::unbond(Origin::signed(kicker), 20, 15), + Pools::unbond(RuntimeOrigin::signed(kicker), 20, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // but they themselves can do an unbond - assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); // can be kicked to 0. - assert_ok!(Pools::unbond(Origin::signed(kicker), 20, 18)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(kicker), 20, 18)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2126,23 +2150,23 @@ mod unbond { // cannot be kicked to above the limit. assert_noop!( - Pools::unbond(Origin::signed(random), 20, 5), + Pools::unbond(RuntimeOrigin::signed(random), 20, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // cannot go to below 10: assert_noop!( - Pools::unbond(Origin::signed(random), 20, 15), + Pools::unbond(RuntimeOrigin::signed(random), 20, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // but they themselves can do an unbond - assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 18); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 2); // but can go to 0 - assert_ok!(Pools::unbond(Origin::signed(random), 20, 18)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(random), 20, 18)); assert_eq!(PoolMembers::::get(20).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_points(), 20); }) @@ -2155,19 +2179,25 @@ mod unbond { // - depositor cannot unbond to below limit or 0 ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // can unbond to above the limit. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // cannot go to below 10: - assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 10), + Error::::MinimumBondNotMet + ); // cannot go to 0 either. - assert_noop!(Pools::unbond(Origin::signed(10), 10, 15), Error::::MinimumBondNotMet); + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 15), + Error::::MinimumBondNotMet + ); }) } @@ -2177,7 +2207,7 @@ mod unbond { // - depositor can never be kicked. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage @@ -2186,24 +2216,27 @@ mod unbond { // cannot be kicked to above limit. assert_noop!( - Pools::unbond(Origin::signed(kicker), 10, 5), + Pools::unbond(RuntimeOrigin::signed(kicker), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // or below the limit assert_noop!( - Pools::unbond(Origin::signed(kicker), 10, 15), + Pools::unbond(RuntimeOrigin::signed(kicker), 10, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // or 0. assert_noop!( - Pools::unbond(Origin::signed(kicker), 10, 20), + Pools::unbond(RuntimeOrigin::signed(kicker), 10, 20), Error::::DoesNotHavePermission ); // they themselves cannot do it either - assert_noop!(Pools::unbond(Origin::signed(10), 10, 20), Error::::MinimumBondNotMet); + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 20), + Error::::MinimumBondNotMet + ); }) } @@ -2212,7 +2245,7 @@ mod unbond { // depositor can never be permissionlessly unbonded. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage @@ -2221,24 +2254,24 @@ mod unbond { // cannot be kicked to above limit. assert_noop!( - Pools::unbond(Origin::signed(random), 10, 5), + Pools::unbond(RuntimeOrigin::signed(random), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly ); // or below the limit assert_noop!( - Pools::unbond(Origin::signed(random), 10, 15), + Pools::unbond(RuntimeOrigin::signed(random), 10, 15), Error::::PartialUnbondNotAllowedPermissionlessly ); // or 0. assert_noop!( - Pools::unbond(Origin::signed(random), 10, 20), + Pools::unbond(RuntimeOrigin::signed(random), 10, 20), Error::::DoesNotHavePermission ); // they themselves can do it in this case though. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); }) } @@ -2251,26 +2284,29 @@ mod unbond { .add_members(vec![(20, 20)]) .build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra( + RuntimeOrigin::signed(10), + BondExtra::FreeBalance(10) + )); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage unsafe_set_state(1, PoolState::Destroying); // can go above the limit - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // but not below the limit assert_noop!( - Pools::unbond(Origin::signed(10), 10, 10), + Pools::unbond(RuntimeOrigin::signed(10), 10, 10), Error::::MinimumBondNotMet ); // and certainly not zero assert_noop!( - Pools::unbond(Origin::signed(10), 10, 15), + Pools::unbond(RuntimeOrigin::signed(10), 10, 15), Error::::MinimumBondNotMet ); }) @@ -2284,22 +2320,25 @@ mod unbond { // - depositor can unbond to 0 if last and destroying. ExtBuilder::default().min_join_bond(10).build_and_execute(|| { // give the depositor some extra funds. - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); assert_eq!(PoolMembers::::get(10).unwrap().points, 20); // set the stage unsafe_set_state(1, PoolState::Destroying); // can unbond to above the limit. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 15); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 5); // still cannot go to below limit - assert_noop!(Pools::unbond(Origin::signed(10), 10, 10), Error::::MinimumBondNotMet); + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(10), 10, 10), + Error::::MinimumBondNotMet + ); // can go to 0 too. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 15)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 15)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 20); }) @@ -2426,8 +2465,8 @@ mod unbond { // When CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 40, 0)); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 550, 0)); assert_ok!(fully_unbond_permissioned(10)); // Then @@ -2525,12 +2564,12 @@ mod unbond { // When the nominator tries to kick, then its a noop assert_noop!( - Pools::fully_unbond(Origin::signed(901), 100), + Pools::fully_unbond(RuntimeOrigin::signed(901), 100), Error::::NotKickerOrDestroying ); // When the root kicks then its ok - assert_ok!(Pools::fully_unbond(Origin::signed(900), 100)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(900), 100)); assert_eq!( pool_events_since_last_call(), @@ -2550,7 +2589,7 @@ mod unbond { ); // When the state toggler kicks then its ok - assert_ok!(Pools::fully_unbond(Origin::signed(902), 200)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(902), 200)); assert_eq!( pool_events_since_last_call(), @@ -2601,13 +2640,13 @@ mod unbond { // A permissionless unbond attempt errors assert_noop!( - Pools::fully_unbond(Origin::signed(420), 100), + Pools::fully_unbond(RuntimeOrigin::signed(420), 100), Error::::NotKickerOrDestroying ); // permissionless unbond must be full assert_noop!( - Pools::unbond(Origin::signed(420), 100, 80), + Pools::unbond(RuntimeOrigin::signed(420), 100, 80), Error::::PartialUnbondNotAllowedPermissionlessly, ); @@ -2616,12 +2655,12 @@ mod unbond { // The depositor cannot be fully unbonded until they are the last member assert_noop!( - Pools::fully_unbond(Origin::signed(10), 10), + Pools::fully_unbond(RuntimeOrigin::signed(10), 10), Error::::MinimumBondNotMet, ); // Any account can unbond a member that is not the depositor - assert_ok!(Pools::fully_unbond(Origin::signed(420), 100)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(420), 100)); assert_eq!( pool_events_since_last_call(), @@ -2635,7 +2674,7 @@ mod unbond { // still permissionless unbond must be full assert_noop!( - Pools::unbond(Origin::signed(420), 100, 80), + Pools::unbond(RuntimeOrigin::signed(420), 100, 80), Error::::PartialUnbondNotAllowedPermissionlessly, ); @@ -2644,7 +2683,7 @@ mod unbond { // The depositor cannot be unbonded assert_noop!( - Pools::fully_unbond(Origin::signed(420), 10), + Pools::fully_unbond(RuntimeOrigin::signed(420), 10), Error::::DoesNotHavePermission ); @@ -2653,27 +2692,27 @@ mod unbond { // The depositor cannot be unbonded yet. assert_noop!( - Pools::fully_unbond(Origin::signed(420), 10), + Pools::fully_unbond(RuntimeOrigin::signed(420), 10), Error::::DoesNotHavePermission, ); // but when everyone is unbonded it can.. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 100, 0)); // still permissionless unbond must be full. assert_noop!( - Pools::unbond(Origin::signed(420), 10, 5), + Pools::unbond(RuntimeOrigin::signed(420), 10, 5), Error::::PartialUnbondNotAllowedPermissionlessly, ); // depositor can never be unbonded permissionlessly . assert_noop!( - Pools::fully_unbond(Origin::signed(420), 10), + Pools::fully_unbond(RuntimeOrigin::signed(420), 10), Error::::DoesNotHavePermission ); // but depositor itself can do it. - assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(10), 10)); assert_eq!(BondedPools::::get(1).unwrap().points, 0); assert_eq!( @@ -2696,7 +2735,7 @@ mod unbond { fn unbond_errors_correctly() { ExtBuilder::default().build_and_execute(|| { assert_noop!( - Pools::fully_unbond(Origin::signed(11), 11), + Pools::fully_unbond(RuntimeOrigin::signed(11), 11), Error::::PoolMemberNotFound ); @@ -2704,7 +2743,7 @@ mod unbond { let member = PoolMember { pool_id: 2, points: 10, ..Default::default() }; PoolMembers::::insert(11, member); - let _ = Pools::fully_unbond(Origin::signed(11), 11); + let _ = Pools::fully_unbond(RuntimeOrigin::signed(11), 11); }); } @@ -2726,7 +2765,7 @@ mod unbond { } .put(); - let _ = Pools::fully_unbond(Origin::signed(11), 11); + let _ = Pools::fully_unbond(RuntimeOrigin::signed(11), 11); }); } @@ -2756,7 +2795,7 @@ mod unbond { unsafe_set_state(1, PoolState::Destroying); // when: casual unbond - assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 9); @@ -2785,7 +2824,7 @@ mod unbond { ); // when: casual further unbond, same era. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 5)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 4); @@ -2811,7 +2850,7 @@ mod unbond { // when: casual further unbond, next era. CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 3); @@ -2839,14 +2878,14 @@ mod unbond { // when: unbonding more than our active: error assert_noop!( frame_support::storage::with_storage_layer(|| Pools::unbond( - Origin::signed(10), + RuntimeOrigin::signed(10), 10, 5 )), Error::::MinimumBondNotMet ); // instead: - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); // then assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 0); @@ -2879,9 +2918,9 @@ mod unbond { MaxUnbonding::set(2); // given - assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 3)); assert_eq!( PoolMembers::::get(20).unwrap().unbonding_eras, member_unbonding_eras!(3 => 2, 4 => 3) @@ -2891,7 +2930,7 @@ mod unbond { CurrentEra::set(2); assert_noop!( frame_support::storage::with_storage_layer(|| Pools::unbond( - Origin::signed(20), + RuntimeOrigin::signed(20), 20, 4 )), @@ -2900,7 +2939,7 @@ mod unbond { // when MaxUnbonding::set(3); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 1)); assert_eq!( PoolMembers::::get(20).unwrap().unbonding_eras, @@ -2933,13 +2972,13 @@ mod unbond { assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 0); // can unbond a bit.. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); assert_eq!(PoolMembers::::get(10).unwrap().active_points(), 7); assert_eq!(PoolMembers::::get(10).unwrap().unbonding_points(), 3); // but not less than 2 assert_noop!( - Pools::unbond(Origin::signed(10), 10, 6), + Pools::unbond(RuntimeOrigin::signed(10), 10, 6), Error::::MinimumBondNotMet ); @@ -2967,7 +3006,7 @@ mod unbond { // cannot unbond even 7, because the value of shares is now less. assert_noop!( - Pools::unbond(Origin::signed(10), 10, 7), + Pools::unbond(RuntimeOrigin::signed(10), 10, 7), Error::::MinimumBondNotMet ); }); @@ -2985,7 +3024,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 2)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 2)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3004,7 +3043,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 3)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3020,7 +3059,7 @@ mod unbond { 4 * Balances::minimum_balance(), ); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3051,7 +3090,7 @@ mod pool_withdraw_unbonded { assert_eq!(Balances::free_balance(&default_bonded_account()), 10); // When - assert_ok!(Pools::pool_withdraw_unbonded(Origin::signed(10), 1, 0)); + assert_ok!(Pools::pool_withdraw_unbonded(RuntimeOrigin::signed(10), 1, 0)); // Then there unbonding balance is no longer locked assert_eq!(StakingMock::active_stake(&default_bonded_account()), Some(5)); @@ -3075,8 +3114,8 @@ mod withdraw_unbonded { // Given assert_eq!(StakingMock::bonding_duration(), 3); - assert_ok!(Pools::fully_unbond(Origin::signed(550), 550)); - assert_ok!(Pools::fully_unbond(Origin::signed(40), 40)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(550), 550)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(40), 40)); assert_eq!(Balances::free_balance(&default_bonded_account()), 600); let mut current_era = 1; @@ -3151,7 +3190,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(Origin::signed(550), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0)); // Then assert_eq!( @@ -3171,7 +3210,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(Origin::signed(40), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0)); // Then assert_eq!( @@ -3199,7 +3238,7 @@ mod withdraw_unbonded { CurrentEra::set(current_era); // when - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3271,7 +3310,7 @@ mod withdraw_unbonded { CurrentEra::set(StakingMock::bonding_duration()); // When - assert_ok!(Pools::withdraw_unbonded(Origin::signed(40), 40, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(40), 40, 0)); // Then assert_eq!( @@ -3292,7 +3331,7 @@ mod withdraw_unbonded { ); // When - assert_ok!(Pools::withdraw_unbonded(Origin::signed(550), 550, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(550), 550, 0)); // Then assert_eq!( @@ -3321,11 +3360,11 @@ mod withdraw_unbonded { CurrentEra::set(CurrentEra::get() + 3); // set metadata to check that it's being removed on dissolve - assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); + assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); assert!(Metadata::::contains_key(1)); // when - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); // then assert_eq!(Balances::free_balance(&10), 10 + 35); @@ -3361,7 +3400,7 @@ mod withdraw_unbonded { assert_eq!(Balances::free_balance(&10), 35); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); unsafe_set_state(1, PoolState::Destroying); - assert_ok!(Pools::fully_unbond(Origin::signed(10), 10)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(10), 10)); // Simulate a slash that is not accounted for in the sub pools. Balances::make_free_balance_be(&default_bonded_account(), 5); @@ -3374,7 +3413,7 @@ mod withdraw_unbonded { CurrentEra::set(0 + 3); // When - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); // Then assert_eq!(Balances::free_balance(10), 10 + 35); @@ -3393,7 +3432,7 @@ mod withdraw_unbonded { SubPoolsStorage::::insert(1, sub_pools.clone()); assert_noop!( - Pools::withdraw_unbonded(Origin::signed(11), 11, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), Error::::PoolMemberNotFound ); @@ -3406,7 +3445,7 @@ mod withdraw_unbonded { // We are still in the bonding duration assert_noop!( - Pools::withdraw_unbonded(Origin::signed(11), 11, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), Error::::CannotWithdrawAny ); @@ -3423,8 +3462,8 @@ mod withdraw_unbonded { .add_members(vec![(100, 100), (200, 200)]) .build_and_execute(|| { // Given - assert_ok!(Pools::fully_unbond(Origin::signed(100), 100)); - assert_ok!(Pools::fully_unbond(Origin::signed(200), 200)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(100), 100)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(200), 200)); assert_eq!( BondedPool::::get(1).unwrap(), BondedPool { @@ -3441,7 +3480,7 @@ mod withdraw_unbonded { // Cannot kick when pool is open assert_noop!( - Pools::withdraw_unbonded(Origin::signed(902), 100, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(902), 100, 0), Error::::NotKickerOrDestroying ); assert_eq!( @@ -3473,15 +3512,15 @@ mod withdraw_unbonded { // Cannot kick as a nominator assert_noop!( - Pools::withdraw_unbonded(Origin::signed(901), 100, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(901), 100, 0), Error::::NotKickerOrDestroying ); // Can kick as root - assert_ok!(Pools::withdraw_unbonded(Origin::signed(900), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(900), 100, 0)); // Can kick as state toggler - assert_ok!(Pools::withdraw_unbonded(Origin::signed(900), 200, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(900), 200, 0)); assert_eq!(Balances::free_balance(100), 100 + 100); assert_eq!(Balances::free_balance(200), 200 + 200); @@ -3504,7 +3543,7 @@ mod withdraw_unbonded { fn withdraw_unbonded_destroying_permissionless() { ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // Given - assert_ok!(Pools::fully_unbond(Origin::signed(100), 100)); + assert_ok!(Pools::fully_unbond(RuntimeOrigin::signed(100), 100)); assert_eq!( BondedPool::::get(1).unwrap(), BondedPool { @@ -3522,7 +3561,7 @@ mod withdraw_unbonded { // Cannot permissionlessly withdraw assert_noop!( - Pools::fully_unbond(Origin::signed(420), 100), + Pools::fully_unbond(RuntimeOrigin::signed(420), 100), Error::::NotKickerOrDestroying ); @@ -3530,7 +3569,7 @@ mod withdraw_unbonded { unsafe_set_state(1, PoolState::Destroying); // Can permissionlesly withdraw a member that is not the depositor - assert_ok!(Pools::withdraw_unbonded(Origin::signed(420), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(420), 100, 0)); assert_eq!(SubPoolsStorage::::get(1).unwrap(), Default::default(),); assert_eq!(Balances::free_balance(100), 100 + 100); @@ -3552,13 +3591,13 @@ mod withdraw_unbonded { #[test] fn partial_withdraw_unbonded_depositor() { ExtBuilder::default().ed(1).build_and_execute(|| { - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); unsafe_set_state(1, PoolState::Destroying); // given - assert_ok!(Pools::unbond(Origin::signed(10), 10, 6)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 6)); CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 1)); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, member_unbonding_eras!(3 => 6, 4 => 1) @@ -3589,13 +3628,13 @@ mod withdraw_unbonded { // when CurrentEra::set(2); assert_noop!( - Pools::withdraw_unbonded(Origin::signed(10), 10, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0), Error::::CannotWithdrawAny ); // when CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); // then assert_eq!( @@ -3618,7 +3657,7 @@ mod withdraw_unbonded { // when CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); // then assert_eq!( @@ -3633,7 +3672,7 @@ mod withdraw_unbonded { // when repeating: assert_noop!( - Pools::withdraw_unbonded(Origin::signed(10), 10, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0), Error::::CannotWithdrawAny ); }); @@ -3643,9 +3682,9 @@ mod withdraw_unbonded { fn partial_withdraw_unbonded_non_depositor() { ExtBuilder::default().add_members(vec![(11, 10)]).build_and_execute(|| { // given - assert_ok!(Pools::unbond(Origin::signed(11), 11, 6)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(11), 11, 6)); CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(11), 11, 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(11), 11, 1)); assert_eq!( PoolMembers::::get(11).unwrap().unbonding_eras, member_unbonding_eras!(3 => 6, 4 => 1) @@ -3676,13 +3715,13 @@ mod withdraw_unbonded { // when CurrentEra::set(2); assert_noop!( - Pools::withdraw_unbonded(Origin::signed(11), 11, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), Error::::CannotWithdrawAny ); // when CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(11), 11, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0)); // then assert_eq!( @@ -3705,7 +3744,7 @@ mod withdraw_unbonded { // when CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(11), 11, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0)); // then assert_eq!( @@ -3720,7 +3759,7 @@ mod withdraw_unbonded { // when repeating: assert_noop!( - Pools::withdraw_unbonded(Origin::signed(11), 11, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(11), 11, 0), Error::::CannotWithdrawAny ); }); @@ -3730,7 +3769,7 @@ mod withdraw_unbonded { fn full_multi_step_withdrawing_non_depositor() { ExtBuilder::default().add_members(vec![(100, 100)]).build_and_execute(|| { // given - assert_ok!(Pools::unbond(Origin::signed(100), 100, 75)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(100), 100, 75)); assert_eq!( PoolMembers::::get(100).unwrap().unbonding_eras, member_unbonding_eras!(3 => 75) @@ -3738,20 +3777,20 @@ mod withdraw_unbonded { // progress one era and unbond the leftover. CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(100), 100, 25)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(100), 100, 25)); assert_eq!( PoolMembers::::get(100).unwrap().unbonding_eras, member_unbonding_eras!(3 => 75, 4 => 25) ); assert_noop!( - Pools::withdraw_unbonded(Origin::signed(100), 100, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0), Error::::CannotWithdrawAny ); // now the 75 should be free. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(100), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3770,7 +3809,7 @@ mod withdraw_unbonded { // the 25 should be free now, and the member removed. CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(100), 100, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(100), 100, 0)); assert_eq!( pool_events_since_last_call(), vec![ @@ -3793,8 +3832,8 @@ mod withdraw_unbonded { System::reset_events(); // when - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); - assert_ok!(Pools::unbond(Origin::signed(30), 30, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(30), 30, 5)); // then member-local unbonding is pretty much in sync with the global pools. assert_eq!( @@ -3824,7 +3863,7 @@ mod withdraw_unbonded { // when CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); // then still member-local unbonding is pretty much in sync with the global pools. assert_eq!( @@ -3848,7 +3887,7 @@ mod withdraw_unbonded { // when CurrentEra::set(2); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); // then still member-local unbonding is pretty much in sync with the global pools. assert_eq!( @@ -3873,7 +3912,7 @@ mod withdraw_unbonded { // when CurrentEra::set(5); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 5)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 5)); // then assert_eq!( @@ -3900,7 +3939,7 @@ mod withdraw_unbonded { // now we start withdrawing unlocked bonds. // when - assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); // then assert_eq!( PoolMembers::::get(20).unwrap().unbonding_eras, @@ -3922,7 +3961,7 @@ mod withdraw_unbonded { ); // when - assert_ok!(Pools::withdraw_unbonded(Origin::signed(30), 30, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(30), 30, 0)); // then assert_eq!( PoolMembers::::get(30).unwrap().unbonding_eras, @@ -3950,14 +3989,14 @@ mod withdraw_unbonded { ExtBuilder::default().ed(1).build_and_execute(|| { // depositor now has 20, they can unbond to 10. assert_eq!(Pools::depositor_min_bond(), 10); - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); // now they can. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 7)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 7)); // progress one era and unbond the leftover. CurrentEra::set(1); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 3)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 3)); assert_eq!( PoolMembers::::get(10).unwrap().unbonding_eras, @@ -3966,27 +4005,27 @@ mod withdraw_unbonded { // they can't unbond to a value below 10 other than 0.. assert_noop!( - Pools::unbond(Origin::signed(10), 10, 5), + Pools::unbond(RuntimeOrigin::signed(10), 10, 5), Error::::MinimumBondNotMet ); // but not even full, because they pool is not yet destroying. assert_noop!( - Pools::unbond(Origin::signed(10), 10, 10), + Pools::unbond(RuntimeOrigin::signed(10), 10, 10), Error::::MinimumBondNotMet ); // but now they can. unsafe_set_state(1, PoolState::Destroying); assert_noop!( - Pools::unbond(Origin::signed(10), 10, 5), + Pools::unbond(RuntimeOrigin::signed(10), 10, 5), Error::::MinimumBondNotMet ); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); // now the 7 should be free. CurrentEra::set(3); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -4007,7 +4046,7 @@ mod withdraw_unbonded { // the 13 should be free now, and the member removed. CurrentEra::set(4); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); assert_eq!( pool_events_since_last_call(), @@ -4039,7 +4078,7 @@ mod create { Balances::make_free_balance_be(&11, StakingMock::minimum_bond() + ed); assert_ok!(Pools::create( - Origin::signed(11), + RuntimeOrigin::signed(11), StakingMock::minimum_bond(), 123, 456, @@ -4097,7 +4136,7 @@ mod create { fn create_errors_correctly() { ExtBuilder::default().with_check(0).build_and_execute(|| { assert_noop!( - Pools::create(Origin::signed(10), 420, 123, 456, 789), + Pools::create(RuntimeOrigin::signed(10), 420, 123, 456, 789), Error::::AccountBelongsToOtherPool ); @@ -4107,7 +4146,7 @@ mod create { // Then assert_noop!( - Pools::create(Origin::signed(11), 9, 123, 456, 789), + Pools::create(RuntimeOrigin::signed(11), 9, 123, 456, 789), Error::::MinimumBondNotMet ); @@ -4116,7 +4155,7 @@ mod create { // Then assert_noop!( - Pools::create(Origin::signed(11), 19, 123, 456, 789), + Pools::create(RuntimeOrigin::signed(11), 19, 123, 456, 789), Error::::MinimumBondNotMet ); @@ -4136,7 +4175,7 @@ mod create { // Then assert_noop!( - Pools::create(Origin::signed(11), 20, 123, 456, 789), + Pools::create(RuntimeOrigin::signed(11), 20, 123, 456, 789), Error::::MaxPools ); @@ -4153,7 +4192,10 @@ mod create { nominator: 11, state_toggler: 11, }); - assert_noop!(create.dispatch(Origin::signed(11)), Error::::MaxPoolMembers); + assert_noop!( + create.dispatch(RuntimeOrigin::signed(11)), + Error::::MaxPoolMembers + ); }); } } @@ -4166,27 +4208,27 @@ mod nominate { ExtBuilder::default().build_and_execute(|| { // Depositor can't nominate assert_noop!( - Pools::nominate(Origin::signed(10), 1, vec![21]), + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![21]), Error::::NotNominator ); // State toggler can't nominate assert_noop!( - Pools::nominate(Origin::signed(902), 1, vec![21]), + Pools::nominate(RuntimeOrigin::signed(902), 1, vec![21]), Error::::NotNominator ); // Root can nominate - assert_ok!(Pools::nominate(Origin::signed(900), 1, vec![21])); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(900), 1, vec![21])); assert_eq!(Nominations::get().unwrap(), vec![21]); // Nominator can nominate - assert_ok!(Pools::nominate(Origin::signed(901), 1, vec![31])); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(901), 1, vec![31])); assert_eq!(Nominations::get().unwrap(), vec![31]); // Can't nominate for a pool that doesn't exist assert_noop!( - Pools::nominate(Origin::signed(902), 123, vec![21]), + Pools::nominate(RuntimeOrigin::signed(902), 123, vec![21]), Error::::PoolNotFound ); }); @@ -4204,16 +4246,16 @@ mod set_state { // Only the root and state toggler can change the state when the pool is ok to be open. assert_noop!( - Pools::set_state(Origin::signed(10), 1, PoolState::Blocked), + Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Blocked), Error::::CanNotChangeState ); assert_noop!( - Pools::set_state(Origin::signed(901), 1, PoolState::Blocked), + Pools::set_state(RuntimeOrigin::signed(901), 1, PoolState::Blocked), Error::::CanNotChangeState ); // Root can change state - assert_ok!(Pools::set_state(Origin::signed(900), 1, PoolState::Blocked)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(900), 1, PoolState::Blocked)); assert_eq!( pool_events_since_last_call(), @@ -4227,16 +4269,16 @@ mod set_state { assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Blocked); // State toggler can change state - assert_ok!(Pools::set_state(Origin::signed(902), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); // If the pool is destroying, then no one can set state assert_noop!( - Pools::set_state(Origin::signed(900), 1, PoolState::Blocked), + Pools::set_state(RuntimeOrigin::signed(900), 1, PoolState::Blocked), Error::::CanNotChangeState ); assert_noop!( - Pools::set_state(Origin::signed(902), 1, PoolState::Blocked), + Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Blocked), Error::::CanNotChangeState ); @@ -4248,7 +4290,7 @@ mod set_state { bonded_pool.points = 100; bonded_pool.put(); // When - assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); // Then assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); @@ -4256,7 +4298,7 @@ mod set_state { Balances::make_free_balance_be(&default_bonded_account(), Balance::max_value() / 10); unsafe_set_state(1, PoolState::Open); // When - assert_ok!(Pools::set_state(Origin::signed(11), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Destroying)); // Then assert_eq!(BondedPool::::get(1).unwrap().state, PoolState::Destroying); @@ -4264,7 +4306,7 @@ mod set_state { // isn't destroying unsafe_set_state(1, PoolState::Open); assert_noop!( - Pools::set_state(Origin::signed(11), 1, PoolState::Blocked), + Pools::set_state(RuntimeOrigin::signed(11), 1, PoolState::Blocked), Error::::CanNotChangeState ); @@ -4287,28 +4329,28 @@ mod set_metadata { fn set_metadata_works() { ExtBuilder::default().build_and_execute(|| { // Root can set metadata - assert_ok!(Pools::set_metadata(Origin::signed(900), 1, vec![1, 1])); + assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1])); assert_eq!(Metadata::::get(1), vec![1, 1]); // State toggler can set metadata - assert_ok!(Pools::set_metadata(Origin::signed(902), 1, vec![2, 2])); + assert_ok!(Pools::set_metadata(RuntimeOrigin::signed(902), 1, vec![2, 2])); assert_eq!(Metadata::::get(1), vec![2, 2]); // Depositor can't set metadata assert_noop!( - Pools::set_metadata(Origin::signed(10), 1, vec![3, 3]), + Pools::set_metadata(RuntimeOrigin::signed(10), 1, vec![3, 3]), Error::::DoesNotHavePermission ); // Nominator can't set metadata assert_noop!( - Pools::set_metadata(Origin::signed(901), 1, vec![3, 3]), + Pools::set_metadata(RuntimeOrigin::signed(901), 1, vec![3, 3]), Error::::DoesNotHavePermission ); // Metadata cannot be longer than `MaxMetadataLen` assert_noop!( - Pools::set_metadata(Origin::signed(900), 1, vec![1, 1, 1]), + Pools::set_metadata(RuntimeOrigin::signed(900), 1, vec![1, 1, 1]), Error::::MetadataExceedsMaxLen ); }); @@ -4323,7 +4365,7 @@ mod set_configs { ExtBuilder::default().build_and_execute(|| { // Setting works assert_ok!(Pools::set_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Set(1 as Balance), ConfigOp::Set(2 as Balance), ConfigOp::Set(3u32), @@ -4338,7 +4380,7 @@ mod set_configs { // Noop does nothing assert_storage_noop!(assert_ok!(Pools::set_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Noop, @@ -4348,7 +4390,7 @@ mod set_configs { // Removing works assert_ok!(Pools::set_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -4380,7 +4422,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(10), 100); // when - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(10))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(10))); // then assert_eq!(Balances::free_balance(10), 90); @@ -4397,7 +4439,7 @@ mod bond_extra { ); // when - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::FreeBalance(20))); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::FreeBalance(20))); // then assert_eq!(Balances::free_balance(10), 70); @@ -4426,7 +4468,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(10), 35); // when - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(10), 35); @@ -4469,7 +4511,7 @@ mod bond_extra { assert_eq!(Balances::free_balance(20), 20); // when - assert_ok!(Pools::bond_extra(Origin::signed(10), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(10), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(10), 35); @@ -4478,7 +4520,7 @@ mod bond_extra { assert_eq!(BondedPools::::get(1).unwrap().points, 30 + 1); // when - assert_ok!(Pools::bond_extra(Origin::signed(20), BondExtra::Rewards)); + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::Rewards)); // then assert_eq!(Balances::free_balance(20), 20); @@ -4522,7 +4564,7 @@ mod update_roles { // non-existent pools assert_noop!( Pools::update_roles( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4534,7 +4576,7 @@ mod update_roles { // depositor cannot change roles. assert_noop!( Pools::update_roles( - Origin::signed(1), + RuntimeOrigin::signed(1), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4546,7 +4588,7 @@ mod update_roles { // nominator cannot change roles. assert_noop!( Pools::update_roles( - Origin::signed(901), + RuntimeOrigin::signed(901), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4557,7 +4599,7 @@ mod update_roles { // state-toggler assert_noop!( Pools::update_roles( - Origin::signed(902), + RuntimeOrigin::signed(902), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4568,7 +4610,7 @@ mod update_roles { // but root can assert_ok!(Pools::update_roles( - Origin::signed(900), + RuntimeOrigin::signed(900), 1, ConfigOp::Set(5), ConfigOp::Set(6), @@ -4599,7 +4641,7 @@ mod update_roles { // also root origin can assert_ok!(Pools::update_roles( - Origin::root(), + RuntimeOrigin::root(), 1, ConfigOp::Set(1), ConfigOp::Set(2), @@ -4626,7 +4668,7 @@ mod update_roles { // Noop works assert_ok!(Pools::update_roles( - Origin::root(), + RuntimeOrigin::root(), 1, ConfigOp::Set(11), ConfigOp::Noop, @@ -4654,7 +4696,7 @@ mod update_roles { // Remove works assert_ok!(Pools::update_roles( - Origin::root(), + RuntimeOrigin::root(), 1, ConfigOp::Set(69), ConfigOp::Remove, @@ -4730,7 +4772,7 @@ mod reward_counter_precision { // tad bit less. cannot be paid out. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += expected_smallest_reward - 1)); - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!(pool_events_since_last_call(), vec![]); // revert it. @@ -4740,7 +4782,7 @@ mod reward_counter_precision { // tad bit more. can be claimed. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += expected_smallest_reward + 1)); - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 1173 }] @@ -4776,7 +4818,7 @@ mod reward_counter_precision { // some whale now joins with the other half ot the total issuance. This will bloat all // the calculation regarding current reward counter. Balances::make_free_balance_be(&20, pool_bond * 2); - assert_ok!(Pools::join(Origin::signed(20), pool_bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), pool_bond, 1)); assert_eq!( pool_events_since_last_call(), @@ -4788,8 +4830,8 @@ mod reward_counter_precision { }] ); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), @@ -4798,12 +4840,12 @@ mod reward_counter_precision { // now let a small member join with 10 DOTs. Balances::make_free_balance_be(&30, 20 * DOT); - assert_ok!(Pools::join(Origin::signed(30), 10 * DOT, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(30), 10 * DOT, 1)); // and give a reasonably small reward to the pool. assert_ok!(Balances::mutate_account(&default_reward_account(), |a| a.free += DOT)); - assert_ok!(Pools::claim_payout(Origin::signed(30))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(30))); assert_eq!( pool_events_since_last_call(), vec![ @@ -4841,7 +4883,10 @@ mod reward_counter_precision { // set to zero. In other tests that we want to assert a scenario won't fail, we should // also set the reward counters to some large value. Balances::make_free_balance_be(&20, pool_bond * 2); - assert_err!(Pools::join(Origin::signed(20), pool_bond, 1), Error::::OverflowRisk); + assert_err!( + Pools::join(RuntimeOrigin::signed(20), pool_bond, 1), + Error::::OverflowRisk + ); }) } @@ -4867,7 +4912,7 @@ mod reward_counter_precision { // and have a tiny fish join the pool as well.. Balances::make_free_balance_be(&20, 20 * DOT); - assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); // earn some small rewards assert_ok!( @@ -4876,7 +4921,7 @@ mod reward_counter_precision { // no point in claiming for 20 (nonetheless, it should be harmless) assert!(pending_rewards(20).unwrap().is_zero()); - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![ @@ -4896,7 +4941,7 @@ mod reward_counter_precision { Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) ); assert!(pending_rewards(20).unwrap().is_zero()); - assert_ok!(Pools::claim_payout(Origin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); assert_eq!( pool_events_since_last_call(), vec![Event::PaidOut { member: 10, pool_id: 1, payout: 10000000 }] @@ -4907,8 +4952,8 @@ mod reward_counter_precision { Balances::mutate_account(&default_reward_account(), |a| a.free += DOT / 1000) ); assert_eq!(pending_rewards(20).unwrap(), 1); - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ @@ -4941,7 +4986,7 @@ mod reward_counter_precision { // and have a tiny fish join the pool as well.. Balances::make_free_balance_be(&20, 20 * DOT); - assert_ok!(Pools::join(Origin::signed(20), 10 * DOT, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10 * DOT, 1)); // earn some small rewards assert_ok!( @@ -4951,8 +4996,8 @@ mod reward_counter_precision { // if 20 claims now, their reward counter should stay the same, so that they have a // chance of claiming this if they let it accumulate. Also see // `if_small_member_waits_long_enough_they_will_earn_rewards` - assert_ok!(Pools::claim_payout(Origin::signed(10))); - assert_ok!(Pools::claim_payout(Origin::signed(20))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); assert_eq!( pool_events_since_last_call(), vec![ @@ -4998,7 +5043,7 @@ mod fuzz_test { const REWARD_AGENT_ACCOUNT: AccountId = 42; /// Grab random accounts, either known ones, or new ones. - fn random_signed_origin(rng: &mut R) -> (Origin, AccountId) { + fn random_signed_origin(rng: &mut R) -> (RuntimeOrigin, AccountId) { let count = PoolMembers::::count(); if rng.gen::() && count > 0 { // take an existing account. @@ -5011,11 +5056,11 @@ mod fuzz_test { let acc = if candidate == REWARD_AGENT_ACCOUNT { rng.gen::() } else { candidate }; - (Origin::signed(acc), acc) + (RuntimeOrigin::signed(acc), acc) } else { // create a new account let acc = rng.gen::(); - (Origin::signed(acc), acc) + (RuntimeOrigin::signed(acc), acc) } } @@ -5036,7 +5081,7 @@ mod fuzz_test { BondedPools::::iter_keys().collect::>().choose(&mut rng).map(|x| *x) } - fn random_call(mut rng: &mut R) -> (crate::pallet::Call, Origin) { + fn random_call(mut rng: &mut R) -> (crate::pallet::Call, RuntimeOrigin) { let op = rng.gen::(); let mut op_count = as frame_support::dispatch::GetCallName>::get_call_names() @@ -5133,7 +5178,7 @@ mod fuzz_test { } let pool_id = LastPoolId::::get(); let amount = 10 * ExistentialDeposit::get(); - let origin = Origin::signed(self.who); + let origin = RuntimeOrigin::signed(self.who); let _ = Balances::deposit_creating(&self.who, 10 * amount); self.pool_id = Some(pool_id); log::info!(target: "reward-agent", "🤖 reward agent joining in {} with {}", pool_id, amount); @@ -5148,7 +5193,7 @@ mod fuzz_test { return } let pre = Balances::free_balance(&42); - let origin = Origin::signed(42); + let origin = RuntimeOrigin::signed(42); assert_ok!(PoolsCall::::claim_payout {}.dispatch_bypass_filter(origin)); let post = Balances::free_balance(&42); diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 8c6ecae937063..7d848e98174b4 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -35,11 +35,11 @@ fn pool_lifecycle_e2e() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(Origin::signed(10), 50, 10, 10, 10)); + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); // have the pool nominate. - assert_ok!(Pools::nominate(Origin::signed(10), 1, vec![1, 2, 3])); + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 50),]); assert_eq!( @@ -51,8 +51,8 @@ fn pool_lifecycle_e2e() { ); // have two members join - assert_ok!(Pools::join(Origin::signed(20), 10, 1)); - assert_ok!(Pools::join(Origin::signed(21), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 10, 1)); assert_eq!( staking_events_since_last_call(), @@ -67,17 +67,17 @@ fn pool_lifecycle_e2e() { ); // pool goes into destroying - assert_ok!(Pools::set_state(Origin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); // depositor cannot unbond yet. assert_noop!( - Pools::unbond(Origin::signed(10), 10, 50), + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); // now the members want to unbond. - assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); - assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); assert_eq!(PoolMembers::::get(20).unwrap().unbonding_eras.len(), 1); assert_eq!(PoolMembers::::get(20).unwrap().points, 0); @@ -102,14 +102,14 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( - Pools::unbond(Origin::signed(10), 10, 50), + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); for e in 1..BondingDuration::get() { CurrentEra::::set(Some(e)); assert_noop!( - Pools::withdraw_unbonded(Origin::signed(20), 20, 0), + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0), PoolsError::::CannotWithdrawAny ); } @@ -119,13 +119,13 @@ fn pool_lifecycle_e2e() { // depositor cannot still unbond assert_noop!( - Pools::unbond(Origin::signed(10), 10, 50), + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), PoolsError::::MinimumBondNotMet, ); // but members can now withdraw. - assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(21), 21, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); assert!(PoolMembers::::get(20).is_none()); assert!(PoolMembers::::get(21).is_none()); @@ -146,12 +146,12 @@ fn pool_lifecycle_e2e() { // as soon as all members have left, the depositor can try to unbond, but since the // min-nominator intention is set, they must chill first. assert_noop!( - Pools::unbond(Origin::signed(10), 10, 50), + Pools::unbond(RuntimeOrigin::signed(10), 10, 50), pallet_staking::Error::::InsufficientBond ); - assert_ok!(Pools::chill(Origin::signed(10), 1)); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 50)); + assert_ok!(Pools::chill(RuntimeOrigin::signed(10), 1)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 50)); assert_eq!( staking_events_since_last_call(), @@ -164,7 +164,7 @@ fn pool_lifecycle_e2e() { // waiting another bonding duration: CurrentEra::::set(Some(BondingDuration::get() * 2)); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 1)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 1)); // pools is fully destroyed now. assert_eq!( @@ -190,7 +190,7 @@ fn pool_slash_e2e() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); @@ -205,8 +205,8 @@ fn pool_slash_e2e() { assert_eq!(Payee::::get(POOL1_BONDED), RewardDestination::Account(POOL1_REWARD)); // have two members join - assert_ok!(Pools::join(Origin::signed(20), 20, 1)); - assert_ok!(Pools::join(Origin::signed(21), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), 20, 1)); assert_eq!( staking_events_since_last_call(), @@ -224,8 +224,8 @@ fn pool_slash_e2e() { CurrentEra::::set(Some(1)); // 20 / 80 of the total funds are unlocked, and safe from any further slash. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); assert_eq!( staking_events_since_last_call(), @@ -246,9 +246,9 @@ fn pool_slash_e2e() { // note: depositor cannot fully unbond at this point. // these funds will still get slashed. - assert_ok!(Pools::unbond(Origin::signed(10), 10, 10)); - assert_ok!(Pools::unbond(Origin::signed(20), 20, 10)); - assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); assert_eq!( staking_events_since_last_call(), @@ -290,7 +290,7 @@ fn pool_slash_e2e() { ); CurrentEra::::set(Some(3)); - assert_ok!(Pools::unbond(Origin::signed(21), 21, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, 10)); assert_eq!( PoolMembers::::get(21).unwrap(), @@ -310,8 +310,8 @@ fn pool_slash_e2e() { // now we start withdrawing. we do it all at once, at era 6 where 20 and 21 are fully free. CurrentEra::::set(Some(6)); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(20), 20, 0)); - assert_ok!(Pools::withdraw_unbonded(Origin::signed(21), 21, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(21), 21, 0)); assert_eq!( pool_events_since_last_call(), @@ -331,8 +331,8 @@ fn pool_slash_e2e() { ); // now, finally, we can unbond the depositor further than their current limit. - assert_ok!(Pools::set_state(Origin::signed(10), 1, PoolState::Destroying)); - assert_ok!(Pools::unbond(Origin::signed(10), 10, 20)); + assert_ok!(Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Destroying)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(10), 10, 20)); assert_eq!( staking_events_since_last_call(), @@ -357,7 +357,7 @@ fn pool_slash_e2e() { } ); // withdraw the depositor, they should lose 12 balance in total due to slash. - assert_ok!(Pools::withdraw_unbonded(Origin::signed(10), 10, 0)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 0)); assert_eq!( staking_events_since_last_call(), @@ -385,7 +385,7 @@ fn pool_slash_proportional() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); @@ -399,9 +399,9 @@ fn pool_slash_proportional() { // have two members join let bond = 20; - assert_ok!(Pools::join(Origin::signed(20), bond, 1)); - assert_ok!(Pools::join(Origin::signed(21), bond, 1)); - assert_ok!(Pools::join(Origin::signed(22), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(21), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(22), bond, 1)); assert_eq!( staking_events_since_last_call(), @@ -424,7 +424,7 @@ fn pool_slash_proportional() { CurrentEra::::set(Some(99)); // and unbond - assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), @@ -442,7 +442,7 @@ fn pool_slash_proportional() { ); CurrentEra::::set(Some(100)); - assert_ok!(Pools::unbond(Origin::signed(21), 21, bond)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] @@ -459,7 +459,7 @@ fn pool_slash_proportional() { ); CurrentEra::::set(Some(101)); - assert_ok!(Pools::unbond(Origin::signed(22), 22, bond)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] @@ -516,7 +516,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), @@ -528,7 +528,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { // have two members join let bond = 20; - assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, bond)] @@ -540,7 +540,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { // progress and unbond. CurrentEra::::set(Some(99)); - assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] @@ -589,7 +589,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_eq!(Staking::current_era(), None); // create the pool, we know this has id 1. - assert_ok!(Pools::create(Origin::signed(10), 40, 10, 10, 10)); + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); assert_eq!( pool_events_since_last_call(), @@ -601,7 +601,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // have two members join let bond = 20; - assert_ok!(Pools::join(Origin::signed(20), bond, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, bond)] @@ -613,7 +613,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // progress and unbond. CurrentEra::::set(Some(99)); - assert_ok!(Pools::unbond(Origin::signed(20), 20, bond)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index cb63ad04d1a38..39111b7f813c7 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -43,7 +43,7 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; @@ -227,7 +227,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // set some limit for nominations. assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), pallet_staking::ConfigOp::Set(10), // minimum nominator bond pallet_staking::ConfigOp::Noop, pallet_staking::ConfigOp::Noop, diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 43c248a09a0b8..c6d8589870f52 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -50,7 +50,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 5b07aab5bd85e..31dac8d51d3b1 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -92,7 +92,7 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/preimage/src/benchmarking.rs b/frame/preimage/src/benchmarking.rs index e0d7e9614abbc..3c7be9db573f4 100644 --- a/frame/preimage/src/benchmarking.rs +++ b/frame/preimage/src/benchmarking.rs @@ -72,7 +72,7 @@ benchmarks! { let s in 0 .. T::MaxSize::get(); let (preimage, hash) = sized_preimage_and_hash::(s); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) + }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) verify { assert!(Preimage::::have_preimage(&hash)); } @@ -91,7 +91,7 @@ benchmarks! { unnote_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: unnote_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unnote_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert!(!Preimage::::have_preimage(&hash)); } @@ -102,7 +102,7 @@ benchmarks! { let noter = funded_account::("noter", 0); whitelist_account!(noter); assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter).into(), preimage)); - }: _(T::ManagerOrigin::successful_origin(), hash) + }: _(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } @@ -110,14 +110,14 @@ benchmarks! { request_no_deposit_preimage { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } // Cheap request - the preimage is not yet noted, so deposit to unreserve. request_unnoted_preimage { let (_, hash) = preimage_and_hash::(); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } @@ -125,7 +125,7 @@ benchmarks! { request_requested_preimage { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: request_preimage(T::ManagerOrigin::successful_origin(), hash) + }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(2))); } @@ -135,7 +135,7 @@ benchmarks! { let (preimage, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); - }: _(T::ManagerOrigin::successful_origin(), hash) + }: _(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), None); } @@ -143,7 +143,7 @@ benchmarks! { unrequest_unnoted_preimage { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), None); } @@ -152,7 +152,7 @@ benchmarks! { let (_, hash) = preimage_and_hash::(); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); - }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) + }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); } diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 749e5924cc885..90f5ac175f540 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -86,7 +86,7 @@ pub mod pallet { /// An origin that can request a preimage be placed on-chain without a deposit or fee, or /// manage existing preimages. - type ManagerOrigin: EnsureOrigin; + type ManagerOrigin: EnsureOrigin; /// Max size allowed for a preimage. type MaxSize: Get; @@ -191,7 +191,9 @@ pub mod pallet { impl Pallet { /// Ensure that the origin is either the `ManagerOrigin` or a signed origin. - fn ensure_signed_or_manager(origin: T::Origin) -> Result, BadOrigin> { + fn ensure_signed_or_manager( + origin: T::RuntimeOrigin, + ) -> Result, BadOrigin> { if T::ManagerOrigin::ensure_origin(origin.clone()).is_ok() { return Ok(None) } diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index 75e8767f3e59f..e12598a35b4bb 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -57,7 +57,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/preimage/src/tests.rs b/frame/preimage/src/tests.rs index 721bb128de239..e6b64ae16dd8c 100644 --- a/frame/preimage/src/tests.rs +++ b/frame/preimage/src/tests.rs @@ -26,7 +26,7 @@ use pallet_balances::Error as BalancesError; #[test] fn user_note_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_eq!(Balances::reserved_balance(2), 3); assert_eq!(Balances::free_balance(2), 97); @@ -35,11 +35,11 @@ fn user_note_preimage_works() { assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); assert_noop!( - Preimage::note_preimage(Origin::signed(2), vec![1]), + Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1]), Error::::AlreadyNoted ); assert_noop!( - Preimage::note_preimage(Origin::signed(0), vec![2]), + Preimage::note_preimage(RuntimeOrigin::signed(0), vec![2]), BalancesError::::InsufficientBalance ); }); @@ -48,7 +48,7 @@ fn user_note_preimage_works() { #[test] fn manager_note_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(1), 100); @@ -57,7 +57,7 @@ fn manager_note_preimage_works() { assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); assert_noop!( - Preimage::note_preimage(Origin::signed(1), vec![1]), + Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1]), Error::::AlreadyNoted ); }); @@ -66,19 +66,19 @@ fn manager_note_preimage_works() { #[test] fn user_unnote_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_noop!( - Preimage::unnote_preimage(Origin::signed(3), hashed([1])), + Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), Error::::NotAuthorized ); assert_noop!( - Preimage::unnote_preimage(Origin::signed(2), hashed([2])), + Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), Error::::NotNoted ); - assert_ok!(Preimage::unnote_preimage(Origin::signed(2), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); assert_noop!( - Preimage::unnote_preimage(Origin::signed(2), hashed([1])), + Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1])), Error::::NotNoted ); @@ -91,10 +91,10 @@ fn user_unnote_preimage_works() { #[test] fn manager_unnote_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); - assert_ok!(Preimage::unnote_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_noop!( - Preimage::unnote_preimage(Origin::signed(1), hashed([1])), + Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), Error::::NotNoted ); @@ -107,17 +107,17 @@ fn manager_unnote_preimage_works() { #[test] fn manager_unnote_user_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_noop!( - Preimage::unnote_preimage(Origin::signed(3), hashed([1])), + Preimage::unnote_preimage(RuntimeOrigin::signed(3), hashed([1])), Error::::NotAuthorized ); assert_noop!( - Preimage::unnote_preimage(Origin::signed(2), hashed([2])), + Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([2])), Error::::NotNoted ); - assert_ok!(Preimage::unnote_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); let h = hashed([1]); assert!(!Preimage::have_preimage(&h)); @@ -128,10 +128,10 @@ fn manager_unnote_user_preimage_works() { #[test] fn requested_then_noted_preimage_cannot_be_unnoted() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_noop!( - Preimage::unnote_preimage(Origin::signed(1), hashed([1])), + Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), Error::::Requested ); @@ -144,16 +144,16 @@ fn requested_then_noted_preimage_cannot_be_unnoted() { #[test] fn request_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(1), vec![1])); - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -165,8 +165,8 @@ fn request_note_order_makes_no_difference() { #[test] fn requested_then_user_noted_preimage_is_free() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_eq!(Balances::reserved_balance(2), 0); assert_eq!(Balances::free_balance(2), 100); @@ -179,16 +179,16 @@ fn requested_then_user_noted_preimage_is_free() { #[test] fn request_user_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -200,20 +200,20 @@ fn request_user_note_order_makes_no_difference() { #[test] fn unrequest_preimage_works() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_noop!( - Preimage::unrequest_preimage(Origin::signed(1), hashed([2])), + Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([2])), Error::::NotRequested ); - assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert!(Preimage::have_preimage(&hashed([1]))); - assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_noop!( - Preimage::unrequest_preimage(Origin::signed(1), hashed([1])), + Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1])), Error::::NotRequested ); }); @@ -222,10 +222,10 @@ fn unrequest_preimage_works() { #[test] fn user_noted_then_requested_preimage_is_refunded_once_only() { new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1; 3])); - assert_ok!(Preimage::note_preimage(Origin::signed(2), vec![1])); - assert_ok!(Preimage::request_preimage(Origin::signed(1), hashed([1]))); - assert_ok!(Preimage::unrequest_preimage(Origin::signed(1), hashed([1]))); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1; 3])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); + assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); // Still have reserve from `vec[1; 3]`. assert_eq!(Balances::reserved_balance(2), 5); assert_eq!(Balances::free_balance(2), 95); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index dcc19e85085f8..af96069397255 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -113,7 +113,7 @@ pub mod pallet { /// The overarching call type. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From> + IsSubType> @@ -819,7 +819,7 @@ impl Pallet { call: ::RuntimeCall, ) { // This is a freshly authenticated new account, the origin restrictions doesn't apply. - let mut origin: T::Origin = frame_system::RawOrigin::Signed(real).into(); + let mut origin: T::RuntimeOrigin = frame_system::RawOrigin::Signed(real).into(); origin.add_filter(move |c: &::RuntimeCall| { let c = ::RuntimeCall::from_ref(c); // We make sure the proxy call does access this pallet to change modify proxies. diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 0025b1510526e..3ed80107e0702 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -61,7 +61,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -204,7 +204,7 @@ fn call_transfer(dest: u64, value: u64) -> RuntimeCall { #[test] fn announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); System::assert_last_event( ProxyEvent::ProxyAdded { delegator: 1, @@ -214,10 +214,10 @@ fn announcement_works() { } .into(), ); - assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); assert_eq!(Balances::reserved_balance(3), 0); - assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -225,7 +225,7 @@ fn announcement_works() { ); assert_eq!(Balances::reserved_balance(3), announcements.1); - assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -236,20 +236,23 @@ fn announcement_works() { ); assert_eq!(Balances::reserved_balance(3), announcements.1); - assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); + assert_noop!( + Proxy::announce(RuntimeOrigin::signed(3), 2, [3; 32].into()), + Error::::TooMany + ); }); } #[test] fn remove_announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); - assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); let e = Error::::NotFound; - assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); - assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); + assert_noop!(Proxy::remove_announcement(RuntimeOrigin::signed(3), 1, [0; 32].into()), e); + assert_ok!(Proxy::remove_announcement(RuntimeOrigin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -262,15 +265,15 @@ fn remove_announcement_works() { #[test] fn reject_announcement_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); - assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, [1; 32].into())); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, [2; 32].into())); let e = Error::::NotFound; - assert_noop!(Proxy::reject_announcement(Origin::signed(1), 3, [0; 32].into()), e); + assert_noop!(Proxy::reject_announcement(RuntimeOrigin::signed(1), 3, [0; 32].into()), e); let e = Error::::NotFound; - assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); - assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); + assert_noop!(Proxy::reject_announcement(RuntimeOrigin::signed(4), 3, [1; 32].into()), e); + assert_ok!(Proxy::reject_announcement(RuntimeOrigin::signed(1), 3, [1; 32].into())); let announcements = Announcements::::get(3); assert_eq!( announcements.0, @@ -283,41 +286,44 @@ fn reject_announcement_works() { #[test] fn announcer_must_be_proxy() { new_test_ext().execute_with(|| { - assert_noop!(Proxy::announce(Origin::signed(2), 1, H256::zero()), Error::::NotProxy); + assert_noop!( + Proxy::announce(RuntimeOrigin::signed(2), 1, H256::zero()), + Error::::NotProxy + ); }); } #[test] fn delayed_requires_pre_announcement() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 1)); let call = Box::new(call_transfer(6, 1)); let e = Error::::Unannounced; - assert_noop!(Proxy::proxy(Origin::signed(2), 1, None, call.clone()), e); + assert_noop!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone()), e); let e = Error::::Unannounced; - assert_noop!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone()), e); + assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone()), e); let call_hash = BlakeTwo256::hash_of(&call); - assert_ok!(Proxy::announce(Origin::signed(2), 1, call_hash)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(2), 1, call_hash)); system::Pallet::::set_block_number(2); - assert_ok!(Proxy::proxy_announced(Origin::signed(0), 2, 1, None, call.clone())); + assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 2, 1, None, call.clone())); }); } #[test] fn proxy_announced_removes_announcement_and_returns_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 1)); - assert_ok!(Proxy::add_proxy(Origin::signed(2), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 1)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(2), 3, ProxyType::Any, 1)); let call = Box::new(call_transfer(6, 1)); let call_hash = BlakeTwo256::hash_of(&call); - assert_ok!(Proxy::announce(Origin::signed(3), 1, call_hash)); - assert_ok!(Proxy::announce(Origin::signed(3), 2, call_hash)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 1, call_hash)); + assert_ok!(Proxy::announce(RuntimeOrigin::signed(3), 2, call_hash)); // Too early to execute announced call let e = Error::::Unannounced; - assert_noop!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone()), e); + assert_noop!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone()), e); system::Pallet::::set_block_number(2); - assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); + assert_ok!(Proxy::proxy_announced(RuntimeOrigin::signed(0), 3, 1, None, call.clone())); let announcements = Announcements::::get(3); assert_eq!(announcements.0, vec![Announcement { real: 2, call_hash, height: 1 }]); assert_eq!(Balances::reserved_balance(3), announcements.1); @@ -328,16 +334,16 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { fn filtering_works() { new_test_ext().execute_with(|| { assert!(Balances::mutate_account(&1, |a| a.free = 1000).is_ok()); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); let call = Box::new(call_transfer(6, 1)); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); @@ -350,28 +356,28 @@ fn filtering_works() { index: 0, call: inner.clone(), })); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), @@ -384,16 +390,16 @@ fn filtering_works() { 0, ))); let call = Box::new(RuntimeCall::Utility(UtilityCall::batch { calls: vec![*inner] })); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), ]); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); expect_events(vec![ UtilityEvent::BatchInterrupted { index: 0, error: SystemError::CallFiltered.into() } .into(), @@ -401,15 +407,15 @@ fn filtering_works() { ]); let call = Box::new(RuntimeCall::Proxy(ProxyCall::remove_proxies {})); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); expect_events(vec![ BalancesEvent::::Unreserved { who: 1, amount: 5 }.into(), ProxyEvent::ProxyExecuted { result: Ok(()) }.into(), @@ -420,27 +426,27 @@ fn filtering_works() { #[test] fn add_remove_proxies_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); assert_noop!( - Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), + Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate ); assert_eq!(Balances::reserved_balance(1), 2); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); assert_noop!( - Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), + Proxy::add_proxy(RuntimeOrigin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany ); assert_noop!( - Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), + Proxy::remove_proxy(RuntimeOrigin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound ); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); + assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 4, ProxyType::JustUtility, 0)); System::assert_last_event( ProxyEvent::ProxyRemoved { delegator: 1, @@ -451,7 +457,7 @@ fn add_remove_proxies_works() { .into(), ); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 3); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -462,7 +468,7 @@ fn add_remove_proxies_works() { } .into(), ); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); + assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 2, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(1), 2); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -473,7 +479,7 @@ fn add_remove_proxies_works() { } .into(), ); - assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::remove_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); System::assert_last_event( ProxyEvent::ProxyRemoved { @@ -485,7 +491,7 @@ fn add_remove_proxies_works() { .into(), ); assert_noop!( - Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), + Proxy::add_proxy(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0), Error::::NoSelfProxy ); }); @@ -494,10 +500,10 @@ fn add_remove_proxies_works() { #[test] fn cannot_add_proxy_without_balance() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(5), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(5), 3, ProxyType::Any, 0)); assert_eq!(Balances::reserved_balance(5), 2); assert_noop!( - Proxy::add_proxy(Origin::signed(5), 4, ProxyType::Any, 0), + Proxy::add_proxy(RuntimeOrigin::signed(5), 4, ProxyType::Any, 0), BalancesError::::InsufficientBalance ); }); @@ -506,24 +512,24 @@ fn cannot_add_proxy_without_balance() { #[test] fn proxying_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); - assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 2, ProxyType::JustTransfer, 0)); + assert_ok!(Proxy::add_proxy(RuntimeOrigin::signed(1), 3, ProxyType::Any, 0)); let call = Box::new(call_transfer(6, 1)); assert_noop!( - Proxy::proxy(Origin::signed(4), 1, None, call.clone()), + Proxy::proxy(RuntimeOrigin::signed(4), 1, None, call.clone()), Error::::NotProxy ); assert_noop!( - Proxy::proxy(Origin::signed(2), 1, Some(ProxyType::Any), call.clone()), + Proxy::proxy(RuntimeOrigin::signed(2), 1, Some(ProxyType::Any), call.clone()), Error::::NotProxy ); - assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); let call = Box::new(RuntimeCall::System(SystemCall::set_code { code: vec![] })); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); @@ -533,11 +539,11 @@ fn proxying_works() { value: 1, })); assert_ok!(RuntimeCall::Proxy(super::Call::new_call_variant_proxy(1, None, call.clone())) - .dispatch(Origin::signed(2))); + .dispatch(RuntimeOrigin::signed(2))); System::assert_last_event( ProxyEvent::ProxyExecuted { result: Err(SystemError::CallFiltered.into()) }.into(), ); - assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 2); }); @@ -546,7 +552,7 @@ fn proxying_works() { #[test] fn anonymous_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( ProxyEvent::AnonymousCreated { @@ -559,23 +565,23 @@ fn anonymous_works() { ); // other calls to anonymous allowed as long as they're not exactly the same. - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::JustTransfer, 0, 0)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1)); let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0)); assert_noop!( - Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), + Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate ); System::set_extrinsic_index(1); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(call_transfer(6, 1)); - assert_ok!(Balances::transfer(Origin::signed(3), anon, 5)); - assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), anon, 5)); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call)); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); @@ -586,18 +592,18 @@ fn anonymous_works() { 1, 0, ))); - assert_ok!(Proxy::proxy(Origin::signed(2), anon2, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(2), anon2, None, call.clone())); let de = DispatchError::from(Error::::NoPermission).stripped(); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( - Proxy::kill_anonymous(Origin::signed(1), 1, ProxyType::Any, 0, 1, 0), + Proxy::kill_anonymous(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission ); assert_eq!(Balances::free_balance(1), 0); - assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call.clone())); + assert_ok!(Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone())); assert_eq!(Balances::free_balance(1), 2); assert_noop!( - Proxy::proxy(Origin::signed(1), anon, None, call.clone()), + Proxy::proxy(RuntimeOrigin::signed(1), anon, None, call.clone()), Error::::NotProxy ); }); diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index bd86cc0d4029d..be970ba2a8422 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -197,7 +197,7 @@ mod tests { type BlockWeights = (); type BlockLength = BlockLength; type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 44b942d000012..bdd5d26373980 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -241,12 +241,12 @@ impl, I: 'static> GetMaxVoters for Pallet { /// Guard to ensure that the given origin is a member of the collective. The rank of the member is /// the `Success` value. pub struct EnsureRanked(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureRanked { type Success = Rank; - fn try_origin(o: T::Origin) -> Result { + fn try_origin(o: T::RuntimeOrigin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(rank), @@ -255,13 +255,13 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::Origin { + fn successful_origin() -> T::RuntimeOrigin { match Self::try_successful_origin() { Ok(o) => o, Err(()) => { @@ -277,12 +277,12 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin /// Guard to ensure that the given origin is a member of the collective. The account ID of the /// member is the `Success` value. pub struct EnsureMember(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureMember { type Success = T::AccountId; - fn try_origin(o: T::Origin) -> Result { + fn try_origin(o: T::RuntimeOrigin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok(who), @@ -291,13 +291,13 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::Origin { + fn successful_origin() -> T::RuntimeOrigin { match Self::try_successful_origin() { Ok(o) => o, Err(()) => { @@ -313,12 +313,12 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin /// Guard to ensure that the given origin is a member of the collective. The pair of including both /// the account ID and the rank of the member is the `Success` value. pub struct EnsureRankedMember(PhantomData<(T, I)>); -impl, I: 'static, const MIN_RANK: u16> EnsureOrigin +impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureRankedMember { type Success = (T::AccountId, Rank); - fn try_origin(o: T::Origin) -> Result { + fn try_origin(o: T::RuntimeOrigin) -> Result { let who = frame_system::EnsureSigned::try_origin(o)?; match Members::::get(&who) { Some(MemberRecord { rank, .. }) if rank >= MIN_RANK => Ok((who, rank)), @@ -327,13 +327,13 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { + fn try_successful_origin() -> Result { let who = IndexToId::::get(MIN_RANK, 0).ok_or(())?; Ok(frame_system::RawOrigin::Signed(who).into()) } #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::Origin { + fn successful_origin() -> T::RuntimeOrigin { match Self::try_successful_origin() { Ok(o) => o, Err(()) => { @@ -367,11 +367,11 @@ pub mod pallet { /// The origin required to add or promote a mmember. The success value indicates the /// maximum rank *to which* the promotion may be. - type PromoteOrigin: EnsureOrigin; + type PromoteOrigin: EnsureOrigin; /// The origin required to demote or remove a member. The success value indicates the /// maximum rank *from which* the demotion/removal may be. - type DemoteOrigin: EnsureOrigin; + type DemoteOrigin: EnsureOrigin; /// The polling system used for our voting. type Polls: Polling, Votes = Votes, Moment = Self::BlockNumber>; diff --git a/frame/ranked-collective/src/tests.rs b/frame/ranked-collective/src/tests.rs index 25413b490483c..68bb79f3d07f7 100644 --- a/frame/ranked-collective/src/tests.rs +++ b/frame/ranked-collective/src/tests.rs @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -239,10 +239,10 @@ fn basic_stuff() { #[test] fn member_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(Origin::root(), 1)); - assert_ok!(Club::promote_member(Origin::root(), 1)); - assert_ok!(Club::demote_member(Origin::root(), 1)); - assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 0); assert_eq!(member_count(1), 0); }); @@ -251,29 +251,29 @@ fn member_lifecycle_works() { #[test] fn add_remove_works() { new_test_ext().execute_with(|| { - assert_noop!(Club::add_member(Origin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::add_member(Origin::root(), 1)); + assert_noop!(Club::add_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 0); - assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::add_member(Origin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); assert_eq!(member_count(0), 2); - assert_ok!(Club::add_member(Origin::root(), 3)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); assert_eq!(member_count(0), 3); - assert_ok!(Club::demote_member(Origin::root(), 3)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 3)); assert_eq!(member_count(0), 2); - assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 1); - assert_ok!(Club::demote_member(Origin::root(), 2)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 2)); assert_eq!(member_count(0), 0); }); } @@ -281,29 +281,29 @@ fn add_remove_works() { #[test] fn promote_demote_works() { new_test_ext().execute_with(|| { - assert_noop!(Club::add_member(Origin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::add_member(Origin::root(), 1)); + assert_noop!(Club::add_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 1); assert_eq!(member_count(1), 0); - assert_ok!(Club::add_member(Origin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 0); - assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 1); - assert_ok!(Club::promote_member(Origin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 2); - assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 2); assert_eq!(member_count(1), 1); - assert_noop!(Club::demote_member(Origin::signed(1), 1), DispatchError::BadOrigin); - assert_ok!(Club::demote_member(Origin::root(), 1)); + assert_noop!(Club::demote_member(RuntimeOrigin::signed(1), 1), DispatchError::BadOrigin); + assert_ok!(Club::demote_member(RuntimeOrigin::root(), 1)); assert_eq!(member_count(0), 1); assert_eq!(member_count(1), 1); }); @@ -312,88 +312,100 @@ fn promote_demote_works() { #[test] fn promote_demote_by_rank_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(Origin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); for _ in 0..7 { - assert_ok!(Club::promote_member(Origin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); } // #1 can add #2 and promote to rank 1 - assert_ok!(Club::add_member(Origin::signed(1), 2)); - assert_ok!(Club::promote_member(Origin::signed(1), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(1), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); // #2 as rank 1 cannot do anything privileged - assert_noop!(Club::add_member(Origin::signed(2), 3), BadOrigin); + assert_noop!(Club::add_member(RuntimeOrigin::signed(2), 3), BadOrigin); - assert_ok!(Club::promote_member(Origin::signed(1), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); // #2 as rank 2 can add #3. - assert_ok!(Club::add_member(Origin::signed(2), 3)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 3)); // #2 as rank 2 cannot promote #3 to rank 1 - assert_noop!(Club::promote_member(Origin::signed(2), 3), Error::::NoPermission); + assert_noop!( + Club::promote_member(RuntimeOrigin::signed(2), 3), + Error::::NoPermission + ); // #1 as rank 7 can promote #2 only up to rank 5 and once there cannot demote them. - assert_ok!(Club::promote_member(Origin::signed(1), 2)); - assert_ok!(Club::promote_member(Origin::signed(1), 2)); - assert_ok!(Club::promote_member(Origin::signed(1), 2)); - assert_noop!(Club::promote_member(Origin::signed(1), 2), Error::::NoPermission); - assert_noop!(Club::demote_member(Origin::signed(1), 2), Error::::NoPermission); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(1), 2)); + assert_noop!( + Club::promote_member(RuntimeOrigin::signed(1), 2), + Error::::NoPermission + ); + assert_noop!(Club::demote_member(RuntimeOrigin::signed(1), 2), Error::::NoPermission); // #2 as rank 5 can promote #3 only up to rank 3 and once there cannot demote them. - assert_ok!(Club::promote_member(Origin::signed(2), 3)); - assert_ok!(Club::promote_member(Origin::signed(2), 3)); - assert_ok!(Club::promote_member(Origin::signed(2), 3)); - assert_noop!(Club::promote_member(Origin::signed(2), 3), Error::::NoPermission); - assert_noop!(Club::demote_member(Origin::signed(2), 3), Error::::NoPermission); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 3)); + assert_noop!( + Club::promote_member(RuntimeOrigin::signed(2), 3), + Error::::NoPermission + ); + assert_noop!(Club::demote_member(RuntimeOrigin::signed(2), 3), Error::::NoPermission); // #2 can add #4 & #5 as rank 0 and #6 & #7 as rank 1. - assert_ok!(Club::add_member(Origin::signed(2), 4)); - assert_ok!(Club::add_member(Origin::signed(2), 5)); - assert_ok!(Club::add_member(Origin::signed(2), 6)); - assert_ok!(Club::promote_member(Origin::signed(2), 6)); - assert_ok!(Club::add_member(Origin::signed(2), 7)); - assert_ok!(Club::promote_member(Origin::signed(2), 7)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 4)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 5)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 6)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 6)); + assert_ok!(Club::add_member(RuntimeOrigin::signed(2), 7)); + assert_ok!(Club::promote_member(RuntimeOrigin::signed(2), 7)); // #3 as rank 3 can demote/remove #4 & #5 but not #6 & #7 - assert_ok!(Club::demote_member(Origin::signed(3), 4)); - assert_ok!(Club::remove_member(Origin::signed(3), 5, 0)); - assert_noop!(Club::demote_member(Origin::signed(3), 6), Error::::NoPermission); - assert_noop!(Club::remove_member(Origin::signed(3), 7, 1), Error::::NoPermission); + assert_ok!(Club::demote_member(RuntimeOrigin::signed(3), 4)); + assert_ok!(Club::remove_member(RuntimeOrigin::signed(3), 5, 0)); + assert_noop!(Club::demote_member(RuntimeOrigin::signed(3), 6), Error::::NoPermission); + assert_noop!( + Club::remove_member(RuntimeOrigin::signed(3), 7, 1), + Error::::NoPermission + ); // #2 as rank 5 can demote/remove #6 & #7 - assert_ok!(Club::demote_member(Origin::signed(2), 6)); - assert_ok!(Club::remove_member(Origin::signed(2), 7, 1)); + assert_ok!(Club::demote_member(RuntimeOrigin::signed(2), 6)); + assert_ok!(Club::remove_member(RuntimeOrigin::signed(2), 7, 1)); }); } #[test] fn voting_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(Origin::root(), 0)); - assert_ok!(Club::add_member(Origin::root(), 1)); - assert_ok!(Club::promote_member(Origin::root(), 1)); - assert_ok!(Club::add_member(Origin::root(), 2)); - assert_ok!(Club::promote_member(Origin::root(), 2)); - assert_ok!(Club::promote_member(Origin::root(), 2)); - assert_ok!(Club::add_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - - assert_noop!(Club::vote(Origin::signed(0), 3, true), Error::::RankTooLow); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 0)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + + assert_noop!(Club::vote(RuntimeOrigin::signed(0), 3, true), Error::::RankTooLow); assert_eq!(tally(3), Tally::from_parts(0, 0, 0)); - assert_ok!(Club::vote(Origin::signed(1), 3, true)); + assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 1, 0)); - assert_ok!(Club::vote(Origin::signed(1), 3, false)); + assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 1)); - assert_ok!(Club::vote(Origin::signed(2), 3, true)); + assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 3, 1)); - assert_ok!(Club::vote(Origin::signed(2), 3, false)); + assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 4)); - assert_ok!(Club::vote(Origin::signed(3), 3, true)); + assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, true)); assert_eq!(tally(3), Tally::from_parts(1, 6, 4)); - assert_ok!(Club::vote(Origin::signed(3), 3, false)); + assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, false)); assert_eq!(tally(3), Tally::from_parts(0, 0, 10)); }); } @@ -401,59 +413,78 @@ fn voting_works() { #[test] fn cleanup_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(Origin::root(), 1)); - assert_ok!(Club::promote_member(Origin::root(), 1)); - assert_ok!(Club::add_member(Origin::root(), 2)); - assert_ok!(Club::promote_member(Origin::root(), 2)); - assert_ok!(Club::add_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - - assert_ok!(Club::vote(Origin::signed(1), 3, true)); - assert_ok!(Club::vote(Origin::signed(2), 3, false)); - assert_ok!(Club::vote(Origin::signed(3), 3, true)); - - assert_noop!(Club::cleanup_poll(Origin::signed(4), 3, 10), Error::::Ongoing); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + + assert_ok!(Club::vote(RuntimeOrigin::signed(1), 3, true)); + assert_ok!(Club::vote(RuntimeOrigin::signed(2), 3, false)); + assert_ok!(Club::vote(RuntimeOrigin::signed(3), 3, true)); + + assert_noop!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10), Error::::Ongoing); Polls::set( vec![(1, Completed(1, true)), (2, Completed(2, false)), (3, Completed(3, true))] .into_iter() .collect(), ); - assert_ok!(Club::cleanup_poll(Origin::signed(4), 3, 10)); + assert_ok!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10)); // NOTE: This will fail until #10016 is merged. - // assert_noop!(Club::cleanup_poll(Origin::signed(4), 3, 10), Error::::NoneRemaining); + // assert_noop!(Club::cleanup_poll(RuntimeOrigin::signed(4), 3, 10), + // Error::::NoneRemaining); }); } #[test] fn ensure_ranked_works() { new_test_ext().execute_with(|| { - assert_ok!(Club::add_member(Origin::root(), 1)); - assert_ok!(Club::promote_member(Origin::root(), 1)); - assert_ok!(Club::add_member(Origin::root(), 2)); - assert_ok!(Club::promote_member(Origin::root(), 2)); - assert_ok!(Club::promote_member(Origin::root(), 2)); - assert_ok!(Club::add_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); - assert_ok!(Club::promote_member(Origin::root(), 3)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); use frame_support::traits::OriginTrait; type Rank1 = EnsureRanked; type Rank2 = EnsureRanked; type Rank3 = EnsureRanked; type Rank4 = EnsureRanked; - assert_eq!(Rank1::try_origin(Origin::signed(1)).unwrap(), 1); - assert_eq!(Rank1::try_origin(Origin::signed(2)).unwrap(), 2); - assert_eq!(Rank1::try_origin(Origin::signed(3)).unwrap(), 3); - assert_eq!(Rank2::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); - assert_eq!(Rank2::try_origin(Origin::signed(2)).unwrap(), 2); - assert_eq!(Rank2::try_origin(Origin::signed(3)).unwrap(), 3); - assert_eq!(Rank3::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); - assert_eq!(Rank3::try_origin(Origin::signed(2)).unwrap_err().as_signed().unwrap(), 2); - assert_eq!(Rank3::try_origin(Origin::signed(3)).unwrap(), 3); - assert_eq!(Rank4::try_origin(Origin::signed(1)).unwrap_err().as_signed().unwrap(), 1); - assert_eq!(Rank4::try_origin(Origin::signed(2)).unwrap_err().as_signed().unwrap(), 2); - assert_eq!(Rank4::try_origin(Origin::signed(3)).unwrap_err().as_signed().unwrap(), 3); + assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(1)).unwrap(), 1); + assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); + assert_eq!(Rank1::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!( + Rank2::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + 1 + ); + assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(2)).unwrap(), 2); + assert_eq!(Rank2::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!( + Rank3::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + 1 + ); + assert_eq!( + Rank3::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), + 2 + ); + assert_eq!(Rank3::try_origin(RuntimeOrigin::signed(3)).unwrap(), 3); + assert_eq!( + Rank4::try_origin(RuntimeOrigin::signed(1)).unwrap_err().as_signed().unwrap(), + 1 + ); + assert_eq!( + Rank4::try_origin(RuntimeOrigin::signed(2)).unwrap_err().as_signed().unwrap(), + 2 + ); + assert_eq!( + Rank4::try_origin(RuntimeOrigin::signed(3)).unwrap_err().as_signed().unwrap(), + 3 + ); }); } diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 7b85c119e68c5..0b6b7b0f51d9a 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -232,7 +232,7 @@ pub mod pallet { /// The overarching call type. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From>; diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 6496ceb253b77..2a29390fdd20f 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -55,7 +55,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index a3f19e5a61bc8..b037a8110147d 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -20,8 +20,8 @@ use super::*; use frame_support::{assert_noop, assert_ok, bounded_vec, traits::Currency}; use mock::{ - new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Origin, Recovery, RecoveryCall, - RuntimeCall, Test, + new_test_ext, run_to_block, Balances, BalancesCall, MaxFriends, Recovery, RecoveryCall, + RuntimeCall, RuntimeOrigin, Test, }; use sp_runtime::traits::BadOrigin; @@ -41,12 +41,12 @@ fn basic_setup_works() { fn set_recovered_works() { new_test_ext().execute_with(|| { // Not accessible by a normal user - assert_noop!(Recovery::set_recovered(Origin::signed(1), 5, 1), BadOrigin); + assert_noop!(Recovery::set_recovered(RuntimeOrigin::signed(1), 5, 1), BadOrigin); // Root can set a recovered account though - assert_ok!(Recovery::set_recovered(Origin::root(), 5, 1)); + assert_ok!(Recovery::set_recovered(RuntimeOrigin::root(), 5, 1)); // Account 1 should now be able to make a call through account 5 let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 100 })); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // Account 1 has successfully drained the funds from account 5 assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(5), 0); @@ -60,38 +60,46 @@ fn recovery_life_cycle_works() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + RuntimeOrigin::signed(5), + friends, + threshold, + delay_period + )); // Some time has passed, and the user lost their keys! run_to_block(10); // Using account 1, the user begins the recovery process to recover the lost account - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Off chain, the user contacts their friends and asks them to vouch for the recovery // attempt - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); // We met the threshold, lets try to recover the account...? - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::DelayPeriod + ); // We need to wait at least the delay_period number of blocks before we can recover run_to_block(20); - assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); // Account 1 can use account 5 to close the active recovery process, claiming the deposited // funds used to initiate the recovery process into account 5. let call = Box::new(RuntimeCall::Recovery(RecoveryCall::close_recovery { rescuer: 1 })); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // Account 1 can then use account 5 to remove the recovery configuration, claiming the // deposited funds used to create the recovery configuration into account 5. let call = Box::new(RuntimeCall::Recovery(RecoveryCall::remove_recovery {})); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // Account 1 should now be able to make a call through account 5 to get all of their funds assert_eq!(Balances::free_balance(5), 110); let call = Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest: 1, value: 110 })); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + assert_ok!(Recovery::as_recovered(RuntimeOrigin::signed(1), 5, call)); // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(5), 0); // Remove the proxy link. - assert_ok!(Recovery::cancel_recovered(Origin::signed(1), 5)); + assert_ok!(Recovery::cancel_recovered(RuntimeOrigin::signed(1), 5)); // All storage items are removed from the module assert!(!>::contains_key(&5, &1)); @@ -107,38 +115,52 @@ fn malicious_recovery_fails() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + RuntimeOrigin::signed(5), + friends, + threshold, + delay_period + )); // Some time has passed, and account 1 wants to try and attack this account! run_to_block(10); // Using account 1, the malicious user begins the recovery process on account 5 - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Off chain, the user **tricks** their friends and asks them to vouch for the recovery - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); // shame on you // We met the threshold, lets try to recover the account...? - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::DelayPeriod + ); // Account 1 needs to wait... run_to_block(19); // One more block to wait! - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::DelayPeriod + ); // Account 5 checks their account every `delay_period` and notices the malicious attack! // Account 5 can close the recovery process before account 1 can claim it - assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); + assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 1)); // By doing so, account 5 has now claimed the deposit originally reserved by account 1 assert_eq!(Balances::total_balance(&1), 90); // Thanks for the free money! assert_eq!(Balances::total_balance(&5), 110); // The recovery process has been closed, so account 1 can't make the claim run_to_block(20); - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::NotStarted + ); // Account 5 can remove their recovery config and pick some better friends - assert_ok!(Recovery::remove_recovery(Origin::signed(5))); + assert_ok!(Recovery::remove_recovery(RuntimeOrigin::signed(5))); assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), vec![22, 33, 44], threshold, delay_period @@ -151,23 +173,23 @@ fn create_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // No friends assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![], 1, 0), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![], 1, 0), Error::::NotEnoughFriends ); // Zero threshold assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2], 0, 0), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2], 0, 0), Error::::ZeroThreshold ); // Threshold greater than friends length assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 4, 0), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 4, 0), Error::::NotEnoughFriends ); // Too many friends assert_noop!( Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), vec![1; (MaxFriends::get() + 1) as usize], 1, 0 @@ -176,18 +198,18 @@ fn create_recovery_handles_basic_errors() { ); // Unsorted friends assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![3, 2, 4], 3, 0), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![3, 2, 4], 3, 0), Error::::NotSorted ); // Duplicate friends assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 2, 4], 3, 0), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 2, 4], 3, 0), Error::::NotSorted ); // Already configured - assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10)); + assert_ok!(Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 3, 10)); assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), + Recovery::create_recovery(RuntimeOrigin::signed(5), vec![2, 3, 4], 3, 10), Error::::AlreadyRecoverable ); }); @@ -201,7 +223,7 @@ fn create_recovery_works() { let delay_period = 10; // Account 5 sets up a recovery configuration on their account assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period @@ -225,7 +247,7 @@ fn initiate_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // No recovery process set up for the account assert_noop!( - Recovery::initiate_recovery(Origin::signed(1), 5), + Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5), Error::::NotRecoverable ); // Create a recovery process for next test @@ -233,15 +255,15 @@ fn initiate_recovery_handles_basic_errors() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); // Same user cannot recover same account twice - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); assert_noop!( - Recovery::initiate_recovery(Origin::signed(1), 5), + Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5), Error::::AlreadyStarted ); // No double deposit @@ -257,13 +279,13 @@ fn initiate_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); // Recovery can be initiated - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Deposit is reserved assert_eq!(Balances::reserved_balance(1), 10); // Recovery status object is created correctly @@ -271,7 +293,7 @@ fn initiate_recovery_works() { ActiveRecovery { created: 0, deposit: 10, friends: Default::default() }; assert_eq!(>::get(&5, &1), Some(recovery_status)); // Multiple users can attempt to recover the same account - assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(2), 5)); }); } @@ -280,7 +302,7 @@ fn vouch_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot vouch for non-recoverable account assert_noop!( - Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), Error::::NotRecoverable ); // Create a recovery process for next tests @@ -288,21 +310,27 @@ fn vouch_recovery_handles_basic_errors() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); // Cannot vouch a recovery process that has not started - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotStarted); + assert_noop!( + Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), + Error::::NotStarted + ); // Initiate a recovery process - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Cannot vouch if you are not a friend - assert_noop!(Recovery::vouch_recovery(Origin::signed(22), 5, 1), Error::::NotFriend); + assert_noop!( + Recovery::vouch_recovery(RuntimeOrigin::signed(22), 5, 1), + Error::::NotFriend + ); // Cannot vouch twice - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); assert_noop!( - Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1), Error::::AlreadyVouched ); }); @@ -316,17 +344,17 @@ fn vouch_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Vouching works - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); // Handles out of order vouches - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); // Final recovery status object is updated correctly let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: bounded_vec![2, 3, 4] }; @@ -338,28 +366,40 @@ fn vouch_recovery_works() { fn claim_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot claim a non-recoverable account - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotRecoverable); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::NotRecoverable + ); // Create a recovery process for the test let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); // Cannot claim an account which has not started the recovery process - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::NotStarted + ); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); // Cannot claim an account which has not passed the delay period - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::DelayPeriod + ); run_to_block(11); // Cannot claim an account which has not passed the threshold number of votes - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); // Only 2/3 is not good enough - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::Threshold); + assert_noop!( + Recovery::claim_recovery(RuntimeOrigin::signed(1), 5), + Error::::Threshold + ); }); } @@ -371,32 +411,32 @@ fn claim_recovery_works() { let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 1)); run_to_block(11); // Account can be recovered. - assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(1), 5)); // Recovered storage item is correctly created assert_eq!(>::get(&1), Some(5)); // Account could be re-recovered in the case that the recoverer account also gets lost. - assert_ok!(Recovery::initiate_recovery(Origin::signed(4), 5)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 4)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 4)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 4)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(4), 5)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(2), 5, 4)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(3), 5, 4)); + assert_ok!(Recovery::vouch_recovery(RuntimeOrigin::signed(4), 5, 4)); run_to_block(21); // Account is re-recovered. - assert_ok!(Recovery::claim_recovery(Origin::signed(4), 5)); + assert_ok!(Recovery::claim_recovery(RuntimeOrigin::signed(4), 5)); // Recovered storage item is correctly updated assert_eq!(>::get(&4), Some(5)); }); @@ -406,7 +446,10 @@ fn claim_recovery_works() { fn close_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot close a non-active recovery - assert_noop!(Recovery::close_recovery(Origin::signed(5), 1), Error::::NotStarted); + assert_noop!( + Recovery::close_recovery(RuntimeOrigin::signed(5), 1), + Error::::NotStarted + ); }); } @@ -414,26 +457,35 @@ fn close_recovery_handles_basic_errors() { fn remove_recovery_works() { new_test_ext().execute_with(|| { // Cannot remove an unrecoverable account - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::NotRecoverable); + assert_noop!( + Recovery::remove_recovery(RuntimeOrigin::signed(5)), + Error::::NotRecoverable + ); // Create and initiate a recovery process for the test let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; assert_ok!(Recovery::create_recovery( - Origin::signed(5), + RuntimeOrigin::signed(5), friends.clone(), threshold, delay_period )); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(RuntimeOrigin::signed(2), 5)); // Cannot remove a recovery when there are active recoveries. - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); - assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); + assert_noop!( + Recovery::remove_recovery(RuntimeOrigin::signed(5)), + Error::::StillActive + ); + assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 1)); // Still need to remove one more! - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); - assert_ok!(Recovery::close_recovery(Origin::signed(5), 2)); + assert_noop!( + Recovery::remove_recovery(RuntimeOrigin::signed(5)), + Error::::StillActive + ); + assert_ok!(Recovery::close_recovery(RuntimeOrigin::signed(5), 2)); // Finally removed - assert_ok!(Recovery::remove_recovery(Origin::signed(5))); + assert_ok!(Recovery::remove_recovery(RuntimeOrigin::signed(5))); }); } diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 2116d56f4647b..45ec894e2c2c0 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -42,8 +42,8 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } -fn create_referendum, I: 'static>() -> (T::Origin, ReferendumIndex) { - let origin: T::Origin = T::SubmitOrigin::successful_origin(); +fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, ReferendumIndex) { + let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); whitelist_account!(caller); @@ -188,12 +188,12 @@ fn is_not_confirming, I: 'static>(index: ReferendumIndex) -> bool { benchmarks_instance_pallet! { submit { - let origin: T::Origin = T::SubmitOrigin::successful_origin(); + let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); whitelist_account!(caller); } - }: _( + }: _( origin, Box::new(RawOrigin::Root.into()), T::Hashing::hash_of(&0), @@ -205,7 +205,7 @@ benchmarks_instance_pallet! { place_decision_deposit_preparing { let (origin, index) = create_referendum::(); - }: place_decision_deposit(origin, index) + }: place_decision_deposit(origin, index) verify { assert!(Referenda::::ensure_ongoing(index).unwrap().decision_deposit.is_some()); } @@ -213,7 +213,7 @@ benchmarks_instance_pallet! { place_decision_deposit_queued { let (origin, index) = create_referendum::(); fill_queue::(index, 1, 90); - }: place_decision_deposit(origin, index) + }: place_decision_deposit(origin, index) verify { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); @@ -226,7 +226,7 @@ benchmarks_instance_pallet! { let track = Referenda::::ensure_ongoing(index).unwrap().track; assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); - }: place_decision_deposit(origin, index) + }: place_decision_deposit(origin, index) verify { assert_eq!(TrackQueue::::get(&track).len() as u32, T::MaxQueued::get()); assert!(TrackQueue::::get(&track).into_iter().all(|(i, _)| i != index)); @@ -236,7 +236,7 @@ benchmarks_instance_pallet! { let (origin, index) = create_referendum::(); skip_prepare_period::(index); make_passing::(index); - }: place_decision_deposit(origin, index) + }: place_decision_deposit(origin, index) verify { assert!(is_confirming::(index)); } @@ -244,7 +244,7 @@ benchmarks_instance_pallet! { place_decision_deposit_failing { let (origin, index) = create_referendum::(); skip_prepare_period::(index); - }: place_decision_deposit(origin, index) + }: place_decision_deposit(origin, index) verify { assert!(is_not_confirming::(index)); } @@ -253,7 +253,7 @@ benchmarks_instance_pallet! { let (origin, index) = create_referendum::(); place_deposit::(index); assert_ok!(Referenda::::cancel(T::CancelOrigin::successful_origin(), index)); - }: _(origin, index) + }: _(origin, index) verify { assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(_, _, None))); } @@ -261,7 +261,7 @@ benchmarks_instance_pallet! { cancel { let (_origin, index) = create_referendum::(); place_deposit::(index); - }: _(T::CancelOrigin::successful_origin(), index) + }: _(T::CancelOrigin::successful_origin(), index) verify { assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Cancelled(..))); } @@ -269,7 +269,7 @@ benchmarks_instance_pallet! { kill { let (_origin, index) = create_referendum::(); place_deposit::(index); - }: _(T::KillOrigin::successful_origin(), index) + }: _(T::KillOrigin::successful_origin(), index) verify { assert_matches!(ReferendumInfoFor::::get(index), Some(ReferendumInfo::Killed(..))); } diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index a2a5aae4e617b..739f0dbc30ed4 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -147,7 +147,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { // System level stuff. - type RuntimeCall: Parameter + Dispatchable + From>; + type RuntimeCall: Parameter + + Dispatchable + + From>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. @@ -168,11 +170,11 @@ pub mod pallet { type Currency: ReservableCurrency; // Origins and unbalances. /// Origin from which proposals may be submitted. - type SubmitOrigin: EnsureOrigin; + type SubmitOrigin: EnsureOrigin; /// Origin from which any vote may be cancelled. - type CancelOrigin: EnsureOrigin; + type CancelOrigin: EnsureOrigin; /// Origin from which any vote may be killed. - type KillOrigin: EnsureOrigin; + type KillOrigin: EnsureOrigin; /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; /// The counting type for votes. Usually just balance. @@ -217,7 +219,7 @@ pub mod pallet { > + TracksInfo< BalanceOf, Self::BlockNumber, - Origin = ::PalletsOrigin, + RuntimeOrigin = ::PalletsOrigin, >; } diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 70ca304eed3cb..920e529bf05ca 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -71,7 +71,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -103,7 +103,7 @@ impl pallet_preimage::Config for Test { } impl pallet_scheduler::Config for Test { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaxWeight; @@ -148,7 +148,7 @@ impl SortedMembers for OneToFive { pub struct TestTracksInfo; impl TracksInfo for TestTracksInfo { type Id = u8; - type Origin = ::PalletsOrigin; + type RuntimeOrigin = ::PalletsOrigin; fn tracks() -> &'static [(Self::Id, TrackInfo)] { static DATA: [(u8, TrackInfo); 2] = [ ( @@ -198,7 +198,7 @@ impl TracksInfo for TestTracksInfo { ]; &DATA[..] } - fn track_for(id: &Self::Origin) -> Result { + fn track_for(id: &Self::RuntimeOrigin) -> Result { if let Ok(system_origin) = frame_system::RawOrigin::try_from(id.clone()) { match system_origin { frame_system::RawOrigin::Root => Ok(0), @@ -319,7 +319,7 @@ pub fn set_balance_proposal_hash(value: u64) -> H256 { #[allow(dead_code)] pub fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { Referenda::submit( - Origin::signed(who), + RuntimeOrigin::signed(who), Box::new(frame_system::RawOrigin::Root.into()), set_balance_proposal_hash(value), DispatchTime::After(delay), @@ -447,12 +447,12 @@ pub enum RefState { impl RefState { pub fn create(self) -> ReferendumIndex { assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(frame_support::dispatch::RawOrigin::Root.into()), set_balance_proposal_hash(1), DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); if matches!(self, RefState::Confirming { immediate: true }) { set_tally(0, 100, 0); } diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index 69cd62f79159a..778d00e516693 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -42,14 +42,14 @@ fn basic_happy_path_works() { new_test_ext().execute_with(|| { // #1: submit assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), set_balance_proposal_hash(1), DispatchTime::At(10), )); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(ReferendumCount::::get(), 1); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); run_to(4); assert_eq!(DecidingCount::::get(0), 0); run_to(5); @@ -63,7 +63,7 @@ fn basic_happy_path_works() { run_to(9); // #8: Should be confirmed & ended. assert_eq!(approved_since(0), 9); - assert_ok!(Referenda::refund_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(2), 0)); run_to(12); // #9: Should not yet be enacted. assert_eq!(Balances::free_balance(&42), 0); @@ -78,7 +78,7 @@ fn insta_confirm_then_kill_works() { new_test_ext().execute_with(|| { let r = Confirming { immediate: true }.create(); run_to(6); - assert_ok!(Referenda::kill(Origin::root(), r)); + assert_ok!(Referenda::kill(RuntimeOrigin::root(), r)); assert_eq!(killed_since(r), 6); }); } @@ -173,24 +173,24 @@ fn queueing_works() { new_test_ext().execute_with(|| { // Submit a proposal into a track with a queue len of 1. assert_ok!(Referenda::submit( - Origin::signed(5), + RuntimeOrigin::signed(5), Box::new(RawOrigin::Root.into()), set_balance_proposal_hash(0), DispatchTime::After(0), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(5), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(5), 0)); run_to(2); // Submit 3 more proposals into the same queue. for i in 1..=4 { assert_ok!(Referenda::submit( - Origin::signed(i), + RuntimeOrigin::signed(i), Box::new(RawOrigin::Root.into()), set_balance_proposal_hash(i), DispatchTime::After(0), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(i), i as u32)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(i), i as u32)); // TODO: decision deposit after some initial votes with a non-highest voted coming // first. } @@ -214,7 +214,7 @@ fn queueing_works() { println!("{:?}", Vec::<_>::from(TrackQueue::::get(0))); // Cancel the first. - assert_ok!(Referenda::cancel(Origin::signed(4), 0)); + assert_ok!(Referenda::cancel(RuntimeOrigin::signed(4), 0)); assert_eq!(cancelled_since(0), 6); // The other with the most approvals (#4) should be being decided. @@ -270,7 +270,7 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { new_test_ext().execute_with(|| { // #1: submit assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), set_balance_proposal_hash(1), DispatchTime::At(20), @@ -290,20 +290,20 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { fn tracks_are_distinguished() { new_test_ext().execute_with(|| { assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), set_balance_proposal_hash(1), DispatchTime::At(10), )); assert_ok!(Referenda::submit( - Origin::signed(2), + RuntimeOrigin::signed(2), Box::new(RawOrigin::None.into()), set_balance_proposal_hash(2), DispatchTime::At(20), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(3), 0)); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(4), 1)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(3), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(4), 1)); let mut i = ReferendumInfoFor::::iter().collect::>(); i.sort_by_key(|x| x.0); @@ -354,7 +354,7 @@ fn submit_errors_work() { // No track for Signed origins. assert_noop!( Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Signed(2).into()), h, DispatchTime::At(10), @@ -365,7 +365,7 @@ fn submit_errors_work() { // No funds for deposit assert_noop!( Referenda::submit( - Origin::signed(10), + RuntimeOrigin::signed(10), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), @@ -379,21 +379,21 @@ fn submit_errors_work() { fn decision_deposit_errors_work() { new_test_ext().execute_with(|| { let e = Error::::NotOngoing; - assert_noop!(Referenda::place_decision_deposit(Origin::signed(2), 0), e); + assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); let e = BalancesError::::InsufficientBalance; - assert_noop!(Referenda::place_decision_deposit(Origin::signed(10), 0), e); + assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(10), 0), e); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); let e = Error::::HasDeposit; - assert_noop!(Referenda::place_decision_deposit(Origin::signed(2), 0), e); + assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); }); } @@ -401,24 +401,24 @@ fn decision_deposit_errors_work() { fn refund_deposit_works() { new_test_ext().execute_with(|| { let e = Error::::BadReferendum; - assert_noop!(Referenda::refund_decision_deposit(Origin::signed(1), 0), e); + assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(1), 0), e); let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); let e = Error::::NoDeposit; - assert_noop!(Referenda::refund_decision_deposit(Origin::signed(2), 0), e); + assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(2), 0), e); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); let e = Error::::Unfinished; - assert_noop!(Referenda::refund_decision_deposit(Origin::signed(3), 0), e); + assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0), e); run_to(11); - assert_ok!(Referenda::refund_decision_deposit(Origin::signed(3), 0)); + assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0)); }); } @@ -427,16 +427,16 @@ fn cancel_works() { new_test_ext().execute_with(|| { let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); run_to(8); - assert_ok!(Referenda::cancel(Origin::signed(4), 0)); - assert_ok!(Referenda::refund_decision_deposit(Origin::signed(3), 0)); + assert_ok!(Referenda::cancel(RuntimeOrigin::signed(4), 0)); + assert_ok!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0)); assert_eq!(cancelled_since(0), 8); }); } @@ -446,16 +446,16 @@ fn cancel_errors_works() { new_test_ext().execute_with(|| { let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); - assert_noop!(Referenda::cancel(Origin::signed(1), 0), BadOrigin); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_noop!(Referenda::cancel(RuntimeOrigin::signed(1), 0), BadOrigin); run_to(11); - assert_noop!(Referenda::cancel(Origin::signed(4), 0), Error::::NotOngoing); + assert_noop!(Referenda::cancel(RuntimeOrigin::signed(4), 0), Error::::NotOngoing); }); } @@ -464,17 +464,17 @@ fn kill_works() { new_test_ext().execute_with(|| { let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); run_to(8); - assert_ok!(Referenda::kill(Origin::root(), 0)); + assert_ok!(Referenda::kill(RuntimeOrigin::root(), 0)); let e = Error::::NoDeposit; - assert_noop!(Referenda::refund_decision_deposit(Origin::signed(3), 0), e); + assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(3), 0), e); assert_eq!(killed_since(0), 8); }); } @@ -484,16 +484,16 @@ fn kill_errors_works() { new_test_ext().execute_with(|| { let h = set_balance_proposal_hash(1); assert_ok!(Referenda::submit( - Origin::signed(1), + RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), h, DispatchTime::At(10), )); - assert_ok!(Referenda::place_decision_deposit(Origin::signed(2), 0)); - assert_noop!(Referenda::kill(Origin::signed(4), 0), BadOrigin); + assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); + assert_noop!(Referenda::kill(RuntimeOrigin::signed(4), 0), BadOrigin); run_to(11); - assert_noop!(Referenda::kill(Origin::root(), 0), Error::::NotOngoing); + assert_noop!(Referenda::kill(RuntimeOrigin::root(), 0), Error::::NotOngoing); }); } diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index 984bd0642d959..a6311e5f925be 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -33,7 +33,8 @@ pub type NegativeImbalanceOf = <>::Currency as Currency< pub type CallOf = >::RuntimeCall; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; -pub type PalletsOriginOf = <::Origin as OriginTrait>::PalletsOrigin; +pub type PalletsOriginOf = + <::RuntimeOrigin as OriginTrait>::PalletsOrigin; pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, @@ -139,13 +140,13 @@ pub trait TracksInfo { type Id: Copy + Parameter + Ord + PartialOrd + Send + Sync + 'static + MaxEncodedLen; /// The origin type from which a track is implied. - type Origin; + type RuntimeOrigin; /// Return the array of known tracks and their information. fn tracks() -> &'static [(Self::Id, TrackInfo)]; /// Determine the voting track for the given `origin`. - fn track_for(origin: &Self::Origin) -> Result; + fn track_for(origin: &Self::RuntimeOrigin) -> Result; /// Return the track info for track `id`, by default this just looks it up in `Self::tracks()`. fn info(id: Self::Id) -> Option<&'static TrackInfo> { @@ -157,7 +158,7 @@ pub trait TracksInfo { #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ReferendumStatus< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -168,7 +169,7 @@ pub struct ReferendumStatus< /// The track of this referendum. pub(crate) track: TrackId, /// The origin for this referendum. - pub(crate) origin: Origin, + pub(crate) origin: RuntimeOrigin, /// The hash of the proposal up for referendum. pub(crate) proposal_hash: Hash, /// The time the proposal should be scheduled for enactment. @@ -194,7 +195,7 @@ pub struct ReferendumStatus< #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum ReferendumInfo< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -204,7 +205,16 @@ pub enum ReferendumInfo< > { /// Referendum has been submitted and is being voted on. Ongoing( - ReferendumStatus, + ReferendumStatus< + TrackId, + RuntimeOrigin, + Moment, + Hash, + Balance, + Tally, + AccountId, + ScheduleAddress, + >, ), /// Referendum finished with approval. Submission deposit is held. Approved(Moment, Deposit, Option>), @@ -220,14 +230,14 @@ pub enum ReferendumInfo< impl< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - Origin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/frame/remark/src/mock.rs b/frame/remark/src/mock.rs index 6ce7115345e57..22467796cf37b 100644 --- a/frame/remark/src/mock.rs +++ b/frame/remark/src/mock.rs @@ -45,7 +45,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index fbd59555b5ebc..9397c66170425 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -31,7 +31,7 @@ use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; -type SystemOrigin = ::Origin; +type SystemOrigin = ::RuntimeOrigin; /// Add `n` named items to the schedule. /// diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 7e9b8e1b92fb7..143fa37a9261d 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -202,17 +202,19 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The aggregated origin which the dispatch will take. - type Origin: OriginTrait + type RuntimeOrigin: OriginTrait + From - + IsType<::Origin>; + + IsType<::RuntimeOrigin>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; /// The aggregated call type. type RuntimeCall: Parameter - + Dispatchable::Origin, PostInfo = PostDispatchInfo> - + GetDispatchInfo + + Dispatchable< + RuntimeOrigin = ::RuntimeOrigin, + PostInfo = PostDispatchInfo, + > + GetDispatchInfo + From>; /// The maximum weight that may be scheduled per block for any dispatchables of less @@ -221,7 +223,7 @@ pub mod pallet { type MaximumWeight: Get; /// Required origin to schedule or cancel calls. - type ScheduleOrigin: EnsureOrigin<::Origin>; + type ScheduleOrigin: EnsureOrigin<::RuntimeOrigin>; /// Compare the privileges of origins. /// @@ -354,9 +356,10 @@ pub mod pallet { let periodic = s.maybe_periodic.is_some(); let call_weight = call.get_dispatch_info().weight; let mut item_weight = T::WeightInfo::item(periodic, named, Some(resolved)); - let origin = - <::Origin as From>::from(s.origin.clone()) - .into(); + let origin = <::RuntimeOrigin as From>::from( + s.origin.clone(), + ) + .into(); if ensure_signed(origin).is_ok() { // Weights of Signed dispatches expect their signing account to be whitelisted. item_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); @@ -431,7 +434,7 @@ pub mod pallet { call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_schedule( DispatchTime::At(when), maybe_periodic, @@ -446,7 +449,7 @@ pub mod pallet { #[pallet::weight(::WeightInfo::cancel(T::MaxScheduledPerBlock::get()))] pub fn cancel(origin: OriginFor, when: T::BlockNumber, index: u32) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_cancel(Some(origin.caller().clone()), (when, index))?; Ok(()) } @@ -462,7 +465,7 @@ pub mod pallet { call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_schedule_named( id, DispatchTime::At(when), @@ -478,7 +481,7 @@ pub mod pallet { #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; Ok(()) } @@ -497,7 +500,7 @@ pub mod pallet { call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_schedule( DispatchTime::After(after), maybe_periodic, @@ -523,7 +526,7 @@ pub mod pallet { call: Box>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; - let origin = ::Origin::from(origin); + let origin = ::RuntimeOrigin::from(origin); Self::do_schedule_named( id, DispatchTime::After(after), diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6460d0f4c1cd1..6f6667590a6c3 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -70,7 +70,7 @@ pub mod logger { #[pallet::call] impl Pallet where - ::Origin: OriginTrait, + ::RuntimeOrigin: OriginTrait, { #[pallet::weight(*weight)] pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { @@ -125,7 +125,7 @@ impl system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; @@ -169,7 +169,7 @@ impl pallet_preimage::Config for Test { impl Config for Test { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type PalletsOrigin = OriginCaller; type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index f1168e6e9d26f..f6db70ae42d49 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -52,7 +52,7 @@ fn scheduling_with_preimages_works() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); let hash = ::Hashing::hash_of(&call); let hashed = MaybeHashed::Hash(hash); - assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); assert!(Preimage::preimage_requested(&hash)); run_to_block(3); @@ -82,7 +82,7 @@ fn scheduling_with_preimage_postpones_correctly() { assert!(logger::log().is_empty()); // Register preimage. - assert_ok!(Preimage::note_preimage(Origin::signed(0), call.encode())); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); run_to_block(5); // #5 empty since postponement is 2 blocks. @@ -619,14 +619,21 @@ fn root_calls_works() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) .into(), ); - assert_ok!(Scheduler::schedule_named(Origin::root(), 1u32.encode(), 4, None, 127, call,)); - assert_ok!(Scheduler::schedule(Origin::root(), 4, None, 127, call2)); + assert_ok!(Scheduler::schedule_named( + RuntimeOrigin::root(), + 1u32.encode(), + 4, + None, + 127, + call, + )); + assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2)); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(Origin::root(), 1u32.encode())); - assert_ok!(Scheduler::cancel(Origin::root(), 4, 1)); + assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), 1u32.encode())); + assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); assert!(logger::log().is_empty()); @@ -651,17 +658,17 @@ fn fails_to_schedule_task_in_the_past() { .into(), ); assert_err!( - Scheduler::schedule_named(Origin::root(), 1u32.encode(), 2, None, 127, call1), + Scheduler::schedule_named(RuntimeOrigin::root(), 1u32.encode(), 2, None, 127, call1), Error::::TargetBlockNumberInPast, ); assert_err!( - Scheduler::schedule(Origin::root(), 2, None, 127, call2), + Scheduler::schedule(RuntimeOrigin::root(), 2, None, 127, call2), Error::::TargetBlockNumberInPast, ); assert_err!( - Scheduler::schedule(Origin::root(), 3, None, 127, call3), + Scheduler::schedule(RuntimeOrigin::root(), 3, None, 127, call3), Error::::TargetBlockNumberInPast, ); }); diff --git a/frame/scored-pool/README.md b/frame/scored-pool/README.md index 56c6af916ecd0..455bae24e7951 100644 --- a/frame/scored-pool/README.md +++ b/frame/scored-pool/README.md @@ -58,7 +58,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let _ = >::submit_candidacy( - T::Origin::from(Some(who.clone()).into()) + T::RuntimeOrigin::from(Some(who.clone()).into()) ); Ok(()) } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index c9ba30df80b8b..a015c1c568153 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -75,7 +75,7 @@ //! let who = ensure_signed(origin)?; //! //! let _ = >::submit_candidacy( -//! T::Origin::from(Some(who.clone()).into()) +//! T::RuntimeOrigin::from(Some(who.clone()).into()) //! ); //! Ok(()) //! } @@ -183,13 +183,13 @@ pub mod pallet { type MembershipChanged: ChangeMembers; /// Allows a configurable origin type to set a score to a candidate in the pool. - type ScoreOrigin: EnsureOrigin; + type ScoreOrigin: EnsureOrigin; /// Required origin for removing a member (though can always be Root). /// Configurable origin which enables removing an entity. If the entity /// is part of the `Members` it is immediately replaced by the next /// highest scoring candidate, if available. - type KickOrigin: EnsureOrigin; + type KickOrigin: EnsureOrigin; } #[pallet::event] diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 20fa47baf639f..d6f653b32ad2d 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -61,7 +61,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 1a68891be0f50..8f4daff47cc44 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -41,11 +41,11 @@ fn query_membership_works() { fn submit_candidacy_must_not_work() { new_test_ext().execute_with(|| { assert_noop!( - ScoredPool::submit_candidacy(Origin::signed(99)), + ScoredPool::submit_candidacy(RuntimeOrigin::signed(99)), pallet_balances::Error::::InsufficientBalance, ); assert_noop!( - ScoredPool::submit_candidacy(Origin::signed(40)), + ScoredPool::submit_candidacy(RuntimeOrigin::signed(40)), Error::::AlreadyInPool ); }); @@ -58,7 +58,7 @@ fn submit_candidacy_works() { let who = 15; // when - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); assert_eq!(fetch_from_pool(15), Some((who, None))); // then @@ -72,11 +72,11 @@ fn scoring_works() { // given let who = 15; let score = 99; - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); + assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, score)); // then assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); @@ -93,7 +93,7 @@ fn scoring_same_element_with_same_score_works() { let score = 2; // when - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); + assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, score)); // then assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); @@ -109,7 +109,7 @@ fn kicking_works_only_for_authorized() { new_test_ext().execute_with(|| { let who = 40; let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_noop!(ScoredPool::kick(Origin::signed(99), who, index), BadOrigin); + assert_noop!(ScoredPool::kick(RuntimeOrigin::signed(99), who, index), BadOrigin); }); } @@ -123,7 +123,7 @@ fn kicking_works() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); + assert_ok!(ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index)); // then assert_eq!(find_in_pool(who), None); @@ -138,14 +138,14 @@ fn unscored_entities_must_not_be_used_for_filling_members() { new_test_ext().execute_with(|| { // given // we submit a candidacy, score will be `None` - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(15))); // when // we remove every scored member ScoredPool::pool().into_iter().for_each(|(who, score)| { if let Some(_) = score { let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); + assert_ok!(ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index)); } }); @@ -161,9 +161,9 @@ fn refreshing_works() { new_test_ext().execute_with(|| { // given let who = 15; - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99)); + assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, 99)); // when ScoredPool::refresh_members(ScoredPool::pool(), ChangeReceiver::MembershipChanged); @@ -179,9 +179,9 @@ fn refreshing_happens_every_period() { new_test_ext().execute_with(|| { // given System::set_block_number(1); - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(15))); let index = find_in_pool(15).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), 15, index, 99)); + assert_ok!(ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), 15, index, 99)); assert_eq!(ScoredPool::members(), vec![20, 40]); // when @@ -200,7 +200,7 @@ fn withdraw_candidacy_must_only_work_for_members() { let who = 77; let index = 0; assert_noop!( - ScoredPool::withdraw_candidacy(Origin::signed(who), index), + ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index), Error::::WrongAccountIndex ); }); @@ -212,15 +212,15 @@ fn oob_index_should_abort() { let who = 40; let oob_index = ScoredPool::pool().len() as u32; assert_noop!( - ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), + ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), oob_index), Error::::InvalidIndex ); assert_noop!( - ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), + ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, oob_index, 99), Error::::InvalidIndex ); assert_noop!( - ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), + ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, oob_index), Error::::InvalidIndex ); }); @@ -232,15 +232,15 @@ fn index_mismatches_should_abort() { let who = 40; let index = 3; assert_noop!( - ScoredPool::withdraw_candidacy(Origin::signed(who), index), + ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index), Error::::WrongAccountIndex ); assert_noop!( - ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), + ScoredPool::score(RuntimeOrigin::signed(ScoreOrigin::get()), who, index, 99), Error::::WrongAccountIndex ); assert_noop!( - ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), + ScoredPool::kick(RuntimeOrigin::signed(KickOrigin::get()), who, index), Error::::WrongAccountIndex ); }); @@ -254,7 +254,7 @@ fn withdraw_unscored_candidacy_must_work() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); // then assert_eq!(fetch_from_pool(5), None); @@ -270,7 +270,7 @@ fn withdraw_scored_candidacy_must_work() { // when let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); // then assert_eq!(fetch_from_pool(who), None); @@ -286,12 +286,12 @@ fn candidacy_resubmitting_works() { let who = 15; // when - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); assert_eq!(ScoredPool::candidate_exists(who), true); let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + assert_ok!(ScoredPool::withdraw_candidacy(RuntimeOrigin::signed(who), index)); assert_eq!(ScoredPool::candidate_exists(who), false); - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); // then assert_eq!(ScoredPool::candidate_exists(who), true); @@ -303,13 +303,18 @@ fn pool_candidates_exceeded() { new_test_ext().execute_with(|| { for i in [1, 2, 3, 4, 6] { let who = i as u64; - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_ok!(ScoredPool::submit_candidacy(RuntimeOrigin::signed(who))); let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99)); + assert_ok!(ScoredPool::score( + RuntimeOrigin::signed(ScoreOrigin::get()), + who, + index, + 99 + )); } assert_noop!( - ScoredPool::submit_candidacy(Origin::signed(8)), + ScoredPool::submit_candidacy(RuntimeOrigin::signed(8)), Error::::TooManyMembers ); }); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index de2f4ae23f423..60b17213d2f1c 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -52,7 +52,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index f6d9e65c1e0b0..aa13eacba9564 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -248,7 +248,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 7947a8a670b5a..43809cc3a9de0 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -21,8 +21,8 @@ use super::*; use crate::mock::{ authorities, before_session_end_called, force_new_session, new_test_ext, reset_before_session_end_called, session_changed, set_next_validators, set_session_length, - Origin, PreUpgradeMockSessionKeys, Session, SessionChanged, System, Test, TestSessionChanged, - TestValidatorIdOf, + PreUpgradeMockSessionKeys, RuntimeOrigin, Session, SessionChanged, System, Test, + TestSessionChanged, TestValidatorIdOf, }; use codec::Decode; @@ -67,7 +67,7 @@ fn keys_cleared_on_kill() { assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); assert!(System::is_provider_required(&1)); - assert_ok!(Session::purge_keys(Origin::signed(1))); + assert_ok!(Session::purge_keys(RuntimeOrigin::signed(1))); assert!(!System::is_provider_required(&1)); assert_eq!(Session::load_keys(&1), None); @@ -87,8 +87,8 @@ fn purge_keys_works_for_stash_id() { let id = DUMMY; assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - assert_ok!(Session::purge_keys(Origin::signed(10))); - assert_ok!(Session::purge_keys(Origin::signed(2))); + assert_ok!(Session::purge_keys(RuntimeOrigin::signed(10))); + assert_ok!(Session::purge_keys(RuntimeOrigin::signed(2))); assert_eq!(Session::load_keys(&10), None); assert_eq!(Session::load_keys(&20), None); @@ -128,7 +128,7 @@ fn authorities_should_track_validators() { reset_before_session_end_called(); set_next_validators(vec![1, 2, 4]); - assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); + assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(4).into(), vec![])); force_new_session(); initialize_block(3); assert_eq!( @@ -194,7 +194,7 @@ fn session_change_should_work() { // Block 3: Set new key for validator 2; no visible change. initialize_block(3); - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); + assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![])); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); // Block 4: Session rollover; no visible change. @@ -219,13 +219,13 @@ fn duplicates_are_not_allowed() { System::set_block_number(1); Session::on_initialize(1); assert_noop!( - Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]), + Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![]), Error::::DuplicatedKey, ); - assert_ok!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![])); + assert_ok!(Session::set_keys(RuntimeOrigin::signed(1), UintAuthorityId(10).into(), vec![])); // is fine now that 1 has migrated off. - assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![])); + assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![])); }); } @@ -268,7 +268,7 @@ fn session_changed_flag_works() { assert!(before_session_end_called()); reset_before_session_end_called(); - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); + assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![])); force_new_session(); initialize_block(6); assert!(!session_changed()); @@ -276,7 +276,11 @@ fn session_changed_flag_works() { reset_before_session_end_called(); // changing the keys of a validator leads to change. - assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); + assert_ok!(Session::set_keys( + RuntimeOrigin::signed(69), + UintAuthorityId(69).into(), + vec![] + )); force_new_session(); initialize_block(7); assert!(session_changed()); @@ -355,7 +359,7 @@ fn session_keys_generate_output_works_as_set_keys_input() { new_test_ext().execute_with(|| { let new_keys = mock::MockSessionKeys::generate(None); assert_ok!(Session::set_keys( - Origin::signed(2), + RuntimeOrigin::signed(2), ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), vec![], )); diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 122dedabbcf0d..73a09490ea579 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -421,10 +421,10 @@ pub mod pallet { type MaxLockDuration: Get; /// The origin that is allowed to call `found`. - type FounderSetOrigin: EnsureOrigin; + type FounderSetOrigin: EnsureOrigin; /// The origin that is allowed to make suspension judgements. - type SuspensionJudgementOrigin: EnsureOrigin; + type SuspensionJudgementOrigin: EnsureOrigin; /// The number of blocks between membership challenges. #[pallet::constant] @@ -1268,19 +1268,19 @@ pub mod pallet { /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); -impl EnsureOrigin for EnsureFounder { +impl EnsureOrigin for EnsureFounder { type Success = T::AccountId; - fn try_origin(o: T::Origin) -> Result { + fn try_origin(o: T::RuntimeOrigin) -> Result { o.into().and_then(|o| match (o, Founder::::get()) { (frame_system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), - (r, _) => Err(T::Origin::from(r)), + (r, _) => Err(T::RuntimeOrigin::from(r)), }) } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { + fn try_successful_origin() -> Result { let founder = Founder::::get().ok_or(())?; - Ok(T::Origin::from(frame_system::RawOrigin::Signed(founder))) + Ok(T::RuntimeOrigin::from(frame_system::RawOrigin::Signed(founder))) } } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 1d9eebdda65ce..0b1b93aeae761 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -63,7 +63,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index d394ddc9011b0..864735aa10cca 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -33,9 +33,9 @@ fn founding_works() { assert_eq!(Society::pot(), 0); // Account 1 is set as the founder origin // Account 5 cannot start a society - assert_noop!(Society::found(Origin::signed(5), 20, 100, vec![]), BadOrigin); + assert_noop!(Society::found(RuntimeOrigin::signed(5), 20, 100, vec![]), BadOrigin); // Account 1 can start a society, where 10 is the founding member - assert_ok!(Society::found(Origin::signed(1), 10, 100, b"be cool".to_vec())); + assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, b"be cool".to_vec())); // Society members only include 10 assert_eq!(Society::members(), vec![10]); // 10 is the head of the society @@ -51,7 +51,7 @@ fn founding_works() { assert_eq!(Society::pot(), 1000); // Cannot start another society assert_noop!( - Society::found(Origin::signed(1), 20, 100, vec![]), + Society::found(RuntimeOrigin::signed(1), 20, 100, vec![]), Error::::AlreadyFounded ); }); @@ -61,22 +61,22 @@ fn founding_works() { fn unfounding_works() { EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { // Account 1 sets the founder... - assert_ok!(Society::found(Origin::signed(1), 10, 100, vec![])); + assert_ok!(Society::found(RuntimeOrigin::signed(1), 10, 100, vec![])); // Account 2 cannot unfound it as it's not the founder. - assert_noop!(Society::unfound(Origin::signed(2)), Error::::NotFounder); + assert_noop!(Society::unfound(RuntimeOrigin::signed(2)), Error::::NotFounder); // Account 10 can, though. - assert_ok!(Society::unfound(Origin::signed(10))); + assert_ok!(Society::unfound(RuntimeOrigin::signed(10))); // 1 sets the founder to 20 this time - assert_ok!(Society::found(Origin::signed(1), 20, 100, vec![])); + assert_ok!(Society::found(RuntimeOrigin::signed(1), 20, 100, vec![])); // Bring in a new member... - assert_ok!(Society::bid(Origin::signed(10), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(10), 0)); run_to_block(4); - assert_ok!(Society::vote(Origin::signed(20), 10, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(20), 10, true)); run_to_block(8); // Unfounding won't work now, even though it's from 20. - assert_noop!(Society::unfound(Origin::signed(20)), Error::::NotHead); + assert_noop!(Society::unfound(RuntimeOrigin::signed(20)), Error::::NotHead); }); } @@ -85,7 +85,7 @@ fn basic_new_member_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); // Bid causes Candidate Deposit to be reserved. - assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotate period every 4 blocks @@ -93,7 +93,7 @@ fn basic_new_member_works() { // 20 is now a candidate assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // 10 (a member) can vote for the candidate - assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); // Rotate period every 4 blocks run_to_block(8); // 20 is now a member of the society @@ -108,10 +108,10 @@ fn basic_new_member_works() { fn bidding_works() { EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(60), 1900)); - assert_ok!(Society::bid(Origin::signed(50), 500)); - assert_ok!(Society::bid(Origin::signed(40), 400)); - assert_ok!(Society::bid(Origin::signed(30), 300)); + assert_ok!(Society::bid(RuntimeOrigin::signed(60), 1900)); + assert_ok!(Society::bid(RuntimeOrigin::signed(50), 500)); + assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); // Rotate period run_to_block(4); // Pot is 1000 after "PeriodSpend" @@ -126,8 +126,8 @@ fn bidding_works() { ] ); // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 40, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); run_to_block(8); // Candidates become members after a period rotation assert_eq!(Society::members(), vec![10, 30, 40]); @@ -137,7 +137,7 @@ fn bidding_works() { // Left over from the original bids is 50 who satisfies the condition of bid less than pot. assert_eq!(Society::candidates(), vec![create_bid(500, 50, BidKind::Deposit(25))]); // 40, now a member, can vote for 50 - assert_ok!(Society::vote(Origin::signed(40), 50, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, true)); run_to_block(12); // 50 is now a member assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -146,8 +146,8 @@ fn bidding_works() { assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // No more candidates satisfy the requirements assert_eq!(Society::candidates(), vec![]); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around + // Next period run_to_block(16); // Same members assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -158,7 +158,7 @@ fn bidding_works() { // Candidate 60 now qualifies based on the increased pot size. assert_eq!(Society::candidates(), vec![create_bid(1900, 60, BidKind::Deposit(25))]); // Candidate 60 is voted in. - assert_ok!(Society::vote(Origin::signed(50), 60, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(50), 60, true)); run_to_block(20); // 60 joins as a member assert_eq!(Society::members(), vec![10, 30, 40, 50, 60]); @@ -172,15 +172,15 @@ fn bidding_works() { fn unbidding_works() { EnvBuilder::new().execute(|| { // 20 and 30 make bids - assert_ok!(Society::bid(Origin::signed(20), 1000)); - assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); // Balances are reserved assert_eq!(Balances::free_balance(30), 25); assert_eq!(Balances::reserved_balance(30), 25); // Must know right position to unbid + cannot unbid someone else - assert_noop!(Society::unbid(Origin::signed(30), 1), Error::::BadPosition); + assert_noop!(Society::unbid(RuntimeOrigin::signed(30), 1), Error::::BadPosition); // Can unbid themselves with the right position - assert_ok!(Society::unbid(Origin::signed(30), 0)); + assert_ok!(Society::unbid(RuntimeOrigin::signed(30), 0)); // Balance is returned assert_eq!(Balances::free_balance(30), 50); assert_eq!(Balances::reserved_balance(30), 0); @@ -195,15 +195,15 @@ fn payout_works() { EnvBuilder::new().execute(|| { // Original balance of 50 assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(Origin::signed(20), 1000)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); run_to_block(8); // payout not ready - assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); + assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); run_to_block(9); // payout should be here - assert_ok!(Society::payout(Origin::signed(20))); + assert_ok!(Society::payout(RuntimeOrigin::signed(20))); assert_eq!(Balances::free_balance(20), 1050); }); } @@ -212,7 +212,7 @@ fn payout_works() { fn basic_new_member_skeptic_works() { EnvBuilder::new().execute(|| { assert_eq!(Strikes::::get(10), 0); - assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); run_to_block(8); @@ -227,14 +227,14 @@ fn basic_new_member_reject_works() { // Starting Balance assert_eq!(Balances::free_balance(20), 50); // 20 makes a bid - assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotation Period run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); run_to_block(8); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -248,19 +248,19 @@ fn basic_new_member_reject_works() { fn slash_payout_works() { EnvBuilder::new().execute(|| { assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(Origin::signed(20), 1000)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 1000)); run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); run_to_block(8); // payout in queue assert_eq!(Payouts::::get(20), vec![(9, 1000)]); - assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); + assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NoPayout); // slash payout assert_eq!(Society::slash_payout(&20, 500), 500); assert_eq!(Payouts::::get(20), vec![(9, 500)]); run_to_block(9); // payout should be here, but 500 less - assert_ok!(Society::payout(Origin::signed(20))); + assert_ok!(Society::payout(RuntimeOrigin::signed(20))); assert_eq!(Balances::free_balance(20), 550); }); } @@ -295,10 +295,10 @@ fn suspended_member_life_cycle_works() { assert_eq!(>::get(20), false); // Let's suspend account 20 by giving them 2 strikes by not voting - assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); run_to_block(8); assert_eq!(Strikes::::get(20), 1); - assert_ok!(Society::bid(Origin::signed(40), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); run_to_block(16); // Strike 2 is accumulated, and 20 is suspended :( @@ -307,14 +307,17 @@ fn suspended_member_life_cycle_works() { // Suspended members cannot get payout Society::bump_payout(&20, 10, 100); - assert_noop!(Society::payout(Origin::signed(20)), Error::::NotMember); + assert_noop!(Society::payout(RuntimeOrigin::signed(20)), Error::::NotMember); // Normal people cannot make judgement - assert_noop!(Society::judge_suspended_member(Origin::signed(20), 20, true), BadOrigin); + assert_noop!( + Society::judge_suspended_member(RuntimeOrigin::signed(20), 20, true), + BadOrigin + ); // Suspension judgment origin can judge thee // Suspension judgement origin forgives the suspended member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, true)); + assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, true)); assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10, 20]); @@ -322,7 +325,7 @@ fn suspended_member_life_cycle_works() { Society::suspend_member(&20); assert_eq!(>::get(20), true); // Suspension judgement origin does not forgive the suspended member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); // Cleaned up assert_eq!(>::get(20), false); assert_eq!(>::get(), vec![10]); @@ -337,14 +340,14 @@ fn suspended_candidate_rejected_works() { assert_eq!(Balances::free_balance(20), 50); assert_eq!(Balances::free_balance(Society::account_id()), 10000); // 20 makes a bid - assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 25); // Rotation Period run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); run_to_block(8); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -354,18 +357,22 @@ fn suspended_candidate_rejected_works() { // Normal user cannot make judgement on suspended candidate assert_noop!( - Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), + Society::judge_suspended_candidate(RuntimeOrigin::signed(20), 20, Judgement::Approve), BadOrigin ); // Suspension judgement origin makes no direct judgement - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Rebid)); + assert_ok!(Society::judge_suspended_candidate( + RuntimeOrigin::signed(2), + 20, + Judgement::Rebid + )); // They are placed back in bid pool, repeat suspension process // Rotation Period run_to_block(12); assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, false)); run_to_block(16); // User is not added as member assert_eq!(Society::members(), vec![10]); @@ -374,7 +381,11 @@ fn suspended_candidate_rejected_works() { assert_eq!(Society::suspended_candidate(20).is_some(), true); // Suspension judgement origin rejects the candidate - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); + assert_ok!(Society::judge_suspended_candidate( + RuntimeOrigin::signed(2), + 20, + Judgement::Reject + )); // User is slashed assert_eq!(Balances::free_balance(20), 25); assert_eq!(Balances::reserved_balance(20), 0); @@ -392,13 +403,16 @@ fn vouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // A non-member cannot vouch - assert_noop!(Society::vouch(Origin::signed(1), 20, 1000, 100), Error::::NotMember); + assert_noop!( + Society::vouch(RuntimeOrigin::signed(1), 20, 1000, 100), + Error::::NotMember + ); // A member can though - assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 1000, 100)); assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // A member cannot vouch twice at the same time assert_noop!( - Society::vouch(Origin::signed(10), 30, 100, 0), + Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), Error::::AlreadyVouching ); // Vouching creates the right kind of bid @@ -407,7 +421,7 @@ fn vouch_works() { run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); // Vote yes - assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); // Vouched user can win run_to_block(8); assert_eq!(Society::members(), vec![10, 20]); @@ -426,14 +440,14 @@ fn voucher_cannot_win_more_than_bid() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches, but asks for more than the bid - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 1000)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 1000)); // Vouching creates the right kind of bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); // Vouched user can become candidate run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); // Vote yes - assert_ok!(Society::vote(Origin::signed(10), 20, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 20, true)); // Vouched user can win run_to_block(8); assert_eq!(Society::members(), vec![10, 20]); @@ -450,25 +464,25 @@ fn unvouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches for 20 - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); // 20 has a bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); // 10 is vouched assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // To unvouch, you must know the right bid position - assert_noop!(Society::unvouch(Origin::signed(10), 2), Error::::BadPosition); + assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 2), Error::::BadPosition); // 10 can unvouch with the right position - assert_ok!(Society::unvouch(Origin::signed(10), 0)); + assert_ok!(Society::unvouch(RuntimeOrigin::signed(10), 0)); // 20 no longer has a bid assert_eq!(>::get(), vec![]); // 10 is no longer vouching assert_eq!(>::get(10), None); // Cannot unvouch after they become candidate - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); run_to_block(4); assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::BadPosition); + assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::BadPosition); // 10 is still vouching until candidate is approved or rejected assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); run_to_block(8); @@ -478,18 +492,22 @@ fn unvouch_works() { // User is stuck vouching until judgement origin resolves suspended candidate assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // Judge denies candidate - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); + assert_ok!(Society::judge_suspended_candidate( + RuntimeOrigin::signed(2), + 20, + Judgement::Reject + )); // 10 is banned from vouching assert_eq!(>::get(10), Some(VouchingStatus::Banned)); assert_eq!(Society::members(), vec![10]); // 10 cannot vouch again assert_noop!( - Society::vouch(Origin::signed(10), 30, 100, 0), + Society::vouch(RuntimeOrigin::signed(10), 30, 100, 0), Error::::AlreadyVouching ); // 10 cannot unvouch either, so they are banned forever. - assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::NotVouching); + assert_noop!(Society::unvouch(RuntimeOrigin::signed(10), 0), Error::::NotVouching); }); } @@ -499,13 +517,13 @@ fn unbid_vouch_works() { // 10 is the only member assert_eq!(Society::members(), vec![10]); // 10 vouches for 20 - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 20, 100, 0)); // 20 has a bid assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); // 10 is vouched assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // 20 doesn't want to be a member and can unbid themselves. - assert_ok!(Society::unbid(Origin::signed(20), 0)); + assert_ok!(Society::unbid(RuntimeOrigin::signed(20), 0)); // Everything is cleaned up assert_eq!(>::get(10), None); assert_eq!(>::get(), vec![]); @@ -520,22 +538,22 @@ fn founder_and_head_cannot_be_removed() { assert_eq!(Society::founder(), Some(10)); assert_eq!(Society::head(), Some(10)); // 10 can still accumulate strikes - assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); run_to_block(8); assert_eq!(Strikes::::get(10), 1); - assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); run_to_block(16); assert_eq!(Strikes::::get(10), 2); // Awkwardly they can obtain more than MAX_STRIKES... - assert_ok!(Society::bid(Origin::signed(40), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); run_to_block(24); assert_eq!(Strikes::::get(10), 3); // Replace the head - assert_ok!(Society::bid(Origin::signed(50), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(50), 0)); run_to_block(28); - assert_ok!(Society::vote(Origin::signed(10), 50, true)); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around run_to_block(32); assert_eq!(Society::members(), vec![10, 50]); assert_eq!(Society::head(), Some(50)); @@ -543,29 +561,29 @@ fn founder_and_head_cannot_be_removed() { assert_eq!(Society::founder(), Some(10)); // 50 can still accumulate strikes - assert_ok!(Society::bid(Origin::signed(60), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(60), 0)); run_to_block(40); assert_eq!(Strikes::::get(50), 1); - assert_ok!(Society::bid(Origin::signed(70), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(70), 0)); run_to_block(48); assert_eq!(Strikes::::get(50), 2); // Replace the head - assert_ok!(Society::bid(Origin::signed(80), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(80), 0)); run_to_block(52); - assert_ok!(Society::vote(Origin::signed(10), 80, true)); - assert_ok!(Society::vote(Origin::signed(50), 80, true)); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 80, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(50), 80, true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); // Keep defender around run_to_block(56); assert_eq!(Society::members(), vec![10, 50, 80]); assert_eq!(Society::head(), Some(80)); assert_eq!(Society::founder(), Some(10)); // 50 can now be suspended for strikes - assert_ok!(Society::bid(Origin::signed(90), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(90), 0)); run_to_block(60); // The candidate is rejected, so voting approve will give a strike - assert_ok!(Society::vote(Origin::signed(50), 90, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(50), 90, true)); run_to_block(64); assert_eq!(Strikes::::get(50), 0); assert_eq!(>::get(50), true); @@ -592,19 +610,22 @@ fn challenges_work() { run_to_block(8); assert_eq!(Society::defender(), Some(30)); // They can always free vote for themselves - assert_ok!(Society::defender_vote(Origin::signed(30), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); // If no one else votes, nothing happens run_to_block(16); assert_eq!(Society::members(), vec![10, 20, 30, 40]); // New challenge period assert_eq!(Society::defender(), Some(30)); // Non-member cannot challenge - assert_noop!(Society::defender_vote(Origin::signed(1), true), Error::::NotMember); + assert_noop!( + Society::defender_vote(RuntimeOrigin::signed(1), true), + Error::::NotMember + ); // 3 people say accept, 1 reject - assert_ok!(Society::defender_vote(Origin::signed(10), true)); - assert_ok!(Society::defender_vote(Origin::signed(20), true)); - assert_ok!(Society::defender_vote(Origin::signed(30), true)); - assert_ok!(Society::defender_vote(Origin::signed(40), false)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); run_to_block(24); // 20 survives assert_eq!(Society::members(), vec![10, 20, 30, 40]); @@ -616,10 +637,10 @@ fn challenges_work() { // One more time assert_eq!(Society::defender(), Some(30)); // 2 people say accept, 2 reject - assert_ok!(Society::defender_vote(Origin::signed(10), true)); - assert_ok!(Society::defender_vote(Origin::signed(20), true)); - assert_ok!(Society::defender_vote(Origin::signed(30), false)); - assert_ok!(Society::defender_vote(Origin::signed(40), false)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(10), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(20), true)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(30), false)); + assert_ok!(Society::defender_vote(RuntimeOrigin::signed(40), false)); run_to_block(32); // 20 is suspended assert_eq!(Society::members(), vec![10, 20, 40]); @@ -653,12 +674,12 @@ fn bad_vote_slash_works() { assert_eq!(>::get(30), vec![(5, 100)]); assert_eq!(>::get(40), vec![(5, 100)]); // Create a new bid - assert_ok!(Society::bid(Origin::signed(50), 1000)); + assert_ok!(Society::bid(RuntimeOrigin::signed(50), 1000)); run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 50, false)); - assert_ok!(Society::vote(Origin::signed(20), 50, true)); - assert_ok!(Society::vote(Origin::signed(30), 50, false)); - assert_ok!(Society::vote(Origin::signed(40), 50, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(20), 50, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(30), 50, false)); + assert_ok!(Society::vote(RuntimeOrigin::signed(40), 50, false)); run_to_block(8); // Wrong voter gained a strike assert_eq!(>::get(10), 0); @@ -677,15 +698,15 @@ fn bad_vote_slash_works() { fn user_cannot_bid_twice() { EnvBuilder::new().execute(|| { // Cannot bid twice - assert_ok!(Society::bid(Origin::signed(20), 100)); - assert_noop!(Society::bid(Origin::signed(20), 100), Error::::AlreadyBid); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 100)); + assert_noop!(Society::bid(RuntimeOrigin::signed(20), 100), Error::::AlreadyBid); // Cannot bid when vouched - assert_ok!(Society::vouch(Origin::signed(10), 30, 100, 100)); - assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); + assert_ok!(Society::vouch(RuntimeOrigin::signed(10), 30, 100, 100)); + assert_noop!(Society::bid(RuntimeOrigin::signed(30), 100), Error::::AlreadyBid); // Cannot vouch when already bid assert_ok!(Society::add_member(&50)); assert_noop!( - Society::vouch(Origin::signed(50), 20, 100, 100), + Society::vouch(RuntimeOrigin::signed(50), 20, 100, 100), Error::::AlreadyBid ); }); @@ -697,7 +718,7 @@ fn vouching_handles_removed_member_with_bid() { // Add a member assert_ok!(Society::add_member(&20)); // Have that member vouch for a user - assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); @@ -708,7 +729,7 @@ fn vouching_handles_removed_member_with_bid() { assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); // Remove member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); // Bid is removed, vouching status is removed assert_eq!(>::get(), vec![]); assert_eq!(>::get(20), None); @@ -721,7 +742,7 @@ fn vouching_handles_removed_member_with_candidate() { // Add a member assert_ok!(Society::add_member(&20)); // Have that member vouch for a user - assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); + assert_ok!(Society::vouch(RuntimeOrigin::signed(20), 30, 1000, 100)); // That user is now a bid and the member is vouching assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); @@ -735,12 +756,12 @@ fn vouching_handles_removed_member_with_candidate() { assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); // Remove member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); + assert_ok!(Society::judge_suspended_member(RuntimeOrigin::signed(2), 20, false)); // Vouching status is removed, but candidate is still in the queue assert_eq!(>::get(20), None); assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); // Candidate wins - assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); run_to_block(8); assert_eq!(Society::members(), vec![10, 30]); // Payout does not go to removed member @@ -753,16 +774,19 @@ fn vouching_handles_removed_member_with_candidate() { fn votes_are_working() { EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(50), 500)); - assert_ok!(Society::bid(Origin::signed(40), 400)); - assert_ok!(Society::bid(Origin::signed(30), 300)); + assert_ok!(Society::bid(RuntimeOrigin::signed(50), 500)); + assert_ok!(Society::bid(RuntimeOrigin::signed(40), 400)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 300)); // Rotate period run_to_block(4); // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 40, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 40, true)); // You cannot vote for a non-candidate - assert_noop!(Society::vote(Origin::signed(10), 50, true), Error::::NotCandidate); + assert_noop!( + Society::vote(RuntimeOrigin::signed(10), 50, true), + Error::::NotCandidate + ); // Votes are stored assert_eq!(>::get(30, 10), Some(Vote::Approve)); assert_eq!(>::get(40, 10), Some(Vote::Approve)); @@ -784,7 +808,7 @@ fn max_limits_work() { for i in (100..1110).rev() { // Give them some funds let _ = Balances::make_free_balance_be(&(i as u128), 1000); - assert_ok!(Society::bid(Origin::signed(i as u128), i)); + assert_ok!(Society::bid(RuntimeOrigin::signed(i as u128), i)); } let bids = >::get(); // Length is 1000 @@ -810,7 +834,7 @@ fn max_limits_work() { // Fill up members with suspended candidates from the first rotation for i in 100..104 { assert_ok!(Society::judge_suspended_candidate( - Origin::signed(2), + RuntimeOrigin::signed(2), i, Judgement::Approve )); @@ -821,9 +845,9 @@ fn max_limits_work() { // However, a fringe scenario allows for in-progress candidates to increase the membership // pool, but it has no real after-effects. for i in Society::members().iter() { - assert_ok!(Society::vote(Origin::signed(*i), 110, true)); - assert_ok!(Society::vote(Origin::signed(*i), 111, true)); - assert_ok!(Society::vote(Origin::signed(*i), 112, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 110, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 111, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(*i), 112, true)); } // Rotate period run_to_block(12); @@ -832,7 +856,7 @@ fn max_limits_work() { // No candidates because full assert_eq!(Society::candidates().len(), 0); // Increase member limit - assert_ok!(Society::set_max_members(Origin::root(), 200)); + assert_ok!(Society::set_max_members(RuntimeOrigin::root(), 200)); // Rotate period run_to_block(16); // Candidates are back! @@ -847,11 +871,11 @@ fn zero_bid_works() { // * That zero bid is placed as head when accepted. EnvBuilder::new().execute(|| { // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(60), 400)); - assert_ok!(Society::bid(Origin::signed(50), 300)); - assert_ok!(Society::bid(Origin::signed(30), 0)); - assert_ok!(Society::bid(Origin::signed(20), 0)); - assert_ok!(Society::bid(Origin::signed(40), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(60), 400)); + assert_ok!(Society::bid(RuntimeOrigin::signed(50), 300)); + assert_ok!(Society::bid(RuntimeOrigin::signed(30), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(20), 0)); + assert_ok!(Society::bid(RuntimeOrigin::signed(40), 0)); // Rotate period run_to_block(4); @@ -872,9 +896,9 @@ fn zero_bid_works() { vec![create_bid(0, 20, BidKind::Deposit(25)), create_bid(0, 40, BidKind::Deposit(25)),] ); // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 50, true)); - assert_ok!(Society::vote(Origin::signed(10), 60, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 30, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 50, true)); + assert_ok!(Society::vote(RuntimeOrigin::signed(10), 60, true)); run_to_block(8); // Candidates become members after a period rotation assert_eq!(Society::members(), vec![10, 30, 50, 60]); @@ -892,7 +916,7 @@ fn bids_ordered_correctly() { for j in 0..5 { // Give them some funds let _ = Balances::make_free_balance_be(&(100 + (i * 5 + j) as u128), 1000); - assert_ok!(Society::bid(Origin::signed(100 + (i * 5 + j) as u128), j)); + assert_ok!(Society::bid(RuntimeOrigin::signed(100 + (i * 5 + j) as u128), j)); } } diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index a79eac3c4dc46..c270b5933e647 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -953,7 +953,7 @@ benchmarks! { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Balances, ExtBuilder, Origin, Staking, Test}; + use crate::mock::{Balances, ExtBuilder, RuntimeOrigin, Staking, Test}; use frame_support::assert_ok; #[test] @@ -1000,7 +1000,11 @@ mod tests { let current_era = CurrentEra::::get().unwrap(); let original_free_balance = Balances::free_balance(&validator_stash); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + validator_stash, + current_era + )); let new_free_balance = Balances::free_balance(&validator_stash); assert!(original_free_balance < new_free_balance); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 5980e6b40d15d..5682184929a5b 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -131,7 +131,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = RocksDbWeight; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; @@ -660,13 +660,22 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond(stash: AccountId, ctrl: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller)); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(stash), + ctrl, + val, + RewardDestination::Controller + )); } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { bond(stash, ctrl, val); - assert_ok!(Staking::validate(Origin::signed(ctrl), ValidatorPrefs::default())); - assert_ok!(Session::set_keys(Origin::signed(ctrl), SessionKeys { other: ctrl.into() }, vec![])); + assert_ok!(Staking::validate(RuntimeOrigin::signed(ctrl), ValidatorPrefs::default())); + assert_ok!(Session::set_keys( + RuntimeOrigin::signed(ctrl), + SessionKeys { other: ctrl.into() }, + vec![] + )); } pub(crate) fn bond_nominator( @@ -676,7 +685,7 @@ pub(crate) fn bond_nominator( target: Vec, ) { bond(stash, ctrl, val); - assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(ctrl), target)); } /// Progress to the given block, triggering session and era changes as we progress. @@ -846,7 +855,7 @@ pub(crate) fn make_all_reward_payment(era: EraIndex) { // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), ledger.stash, era)); } } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index b8a457beb5eae..49f53efbb08b0 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -155,7 +155,7 @@ pub mod pallet { type SlashDeferDuration: Get; /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; + type SlashCancelOrigin: EnsureOrigin; /// Interface for interacting with a session pallet. type SessionInterface: SessionInterface; @@ -608,18 +608,18 @@ pub mod pallet { "Stash does not have enough balance to bond." ); frame_support::assert_ok!(>::bond( - T::Origin::from(Some(stash.clone()).into()), + T::RuntimeOrigin::from(Some(stash.clone()).into()), T::Lookup::unlookup(controller.clone()), balance, RewardDestination::Staked, )); frame_support::assert_ok!(match status { crate::StakerStatus::Validator => >::validate( - T::Origin::from(Some(controller.clone()).into()), + T::RuntimeOrigin::from(Some(controller.clone()).into()), Default::default(), ), crate::StakerStatus::Nominator(votes) => >::nominate( - T::Origin::from(Some(controller.clone()).into()), + T::RuntimeOrigin::from(Some(controller.clone()).into()), votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), ), _ => Ok(()), @@ -1476,7 +1476,7 @@ pub mod pallet { /// Needed to report an accurate weight for the dispatch. Trusted by `Root` to report an /// accurate number. /// - /// Origin must be root. + /// RuntimeOrigin must be root. /// /// # /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 @@ -1593,7 +1593,7 @@ pub mod pallet { /// * `min_commission`: The minimum amount of commission that each validators must maintain. /// This is checked only upon calling `validate`. Existing validators are not affected. /// - /// Origin must be Root to call this function. + /// RuntimeOrigin must be Root to call this function. /// /// NOTE: Existing nominators and validators will not be affected by this update. /// to kick people under the new limits, `chill_other` should be called. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 85badfac769af..cc2b37d2ba910 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -44,7 +44,7 @@ fn set_staking_configs_works() { ExtBuilder::default().build_and_execute(|| { // setting works assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Set(1_500), ConfigOp::Set(2_000), ConfigOp::Set(10), @@ -61,7 +61,7 @@ fn set_staking_configs_works() { // noop does nothing assert_storage_noop!(assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Noop, @@ -72,7 +72,7 @@ fn set_staking_configs_works() { // removing works assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -98,22 +98,22 @@ fn force_unstake_works() { add_slash(&11); // Cant transfer assert_noop!( - Balances::transfer(Origin::signed(11), 1, 10), + Balances::transfer(RuntimeOrigin::signed(11), 1, 10), BalancesError::::LiquidityRestrictions ); // Force unstake requires root. - assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 2), BadOrigin); + assert_noop!(Staking::force_unstake(RuntimeOrigin::signed(11), 11, 2), BadOrigin); // Force unstake needs correct number of slashing spans (for weight calculation) assert_noop!( - Staking::force_unstake(Origin::root(), 11, 0), + Staking::force_unstake(RuntimeOrigin::root(), 11, 0), Error::::IncorrectSlashingSpans ); // We now force them to unstake - assert_ok!(Staking::force_unstake(Origin::root(), 11, 2)); + assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 11, 2)); // No longer bonded. assert_eq!(Staking::bonded(&11), None); // Transfer works. - assert_ok!(Balances::transfer(Origin::signed(11), 1, 10)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 1, 10)); }); } @@ -235,19 +235,19 @@ fn change_controller_works() { assert_eq!(Staking::bonded(&11), Some(10)); // 10 can control 11 who is initially a validator. - assert_ok!(Staking::chill(Origin::signed(10))); + assert_ok!(Staking::chill(RuntimeOrigin::signed(10))); // change controller - assert_ok!(Staking::set_controller(Origin::signed(11), 5)); + assert_ok!(Staking::set_controller(RuntimeOrigin::signed(11), 5)); assert_eq!(Staking::bonded(&11), Some(5)); mock::start_active_era(1); // 10 is no longer in control. assert_noop!( - Staking::validate(Origin::signed(10), ValidatorPrefs::default()), + Staking::validate(RuntimeOrigin::signed(10), ValidatorPrefs::default()), Error::::NotController, ); - assert_ok!(Staking::validate(Origin::signed(5), ValidatorPrefs::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); }) } @@ -382,9 +382,13 @@ fn staking_should_work() { // --- Block 2: start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); - assert_ok!(Session::set_keys(Origin::signed(4), SessionKeys { other: 4.into() }, vec![])); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); + assert_ok!(Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default())); + assert_ok!(Session::set_keys( + RuntimeOrigin::signed(4), + SessionKeys { other: 4.into() }, + vec![] + )); // No effects will be seen so far. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -408,7 +412,7 @@ fn staking_should_work() { assert_eq_uvec!(validator_controllers(), vec![20, 4]); // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 // 4 will chill - Staking::chill(Origin::signed(4)).unwrap(); + Staking::chill(RuntimeOrigin::signed(4)).unwrap(); // --- Block 7: nothing. 4 is still there. start_session(7); @@ -447,20 +451,20 @@ fn blocking_and_kicking_works() { .build_and_execute(|| { // block validator 10/11 assert_ok!(Staking::validate( - Origin::signed(10), + RuntimeOrigin::signed(10), ValidatorPrefs { blocked: true, ..Default::default() } )); // attempt to nominate from 100/101... - assert_ok!(Staking::nominate(Origin::signed(100), vec![11])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![11])); // should have worked since we're already nominated them assert_eq!(Nominators::::get(&101).unwrap().targets, vec![11]); // kick the nominator - assert_ok!(Staking::kick(Origin::signed(10), vec![101])); + assert_ok!(Staking::kick(RuntimeOrigin::signed(10), vec![101])); // should have been kicked now assert!(Nominators::::get(&101).unwrap().targets.is_empty()); // attempt to nominate from 100/101... assert_noop!( - Staking::nominate(Origin::signed(100), vec![11]), + Staking::nominate(RuntimeOrigin::signed(100), vec![11]), Error::::BadTarget ); }); @@ -506,7 +510,7 @@ fn no_candidate_emergency_condition() { ::MinimumValidatorCount::put(10); // try to chill - let res = Staking::chill(Origin::signed(10)); + let res = Staking::chill(RuntimeOrigin::signed(10)); assert_ok!(res); let current_era = CurrentEra::::get(); @@ -542,14 +546,26 @@ fn nominating_and_rewards_should_work() { assert_eq_uvec!(validator_controllers(), vec![40, 20]); // re-validate with 11 and 31. - assert_ok!(Staking::validate(Origin::signed(10), Default::default())); - assert_ok!(Staking::validate(Origin::signed(30), Default::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(30), Default::default())); // Set payee to controller. - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(40), RewardDestination::Controller)); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(10), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(20), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(30), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(40), + RewardDestination::Controller + )); // give the man some money let initial_balance = 1000; @@ -558,11 +574,21 @@ fn nominating_and_rewards_should_work() { } // bond two account pairs and state interest in nomination. - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(1), + 2, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 21, 31])); - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(3), + 4, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![11, 21, 41])); // the total reward for era 0 let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -714,20 +740,28 @@ fn double_staking_should_fail() { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok assert_ok!(Staking::bond( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, arbitrary_value, RewardDestination::default() )); // 4 = not used so far, 1 stashed => not allowed. assert_noop!( - Staking::bond(Origin::signed(1), 4, arbitrary_value, RewardDestination::default()), + Staking::bond( + RuntimeOrigin::signed(1), + 4, + arbitrary_value, + RewardDestination::default() + ), Error::::AlreadyBonded, ); // 1 = stashed => attempting to nominate should fail. - assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), Error::::NotController); + assert_noop!( + Staking::nominate(RuntimeOrigin::signed(1), vec![1]), + Error::::NotController + ); // 2 = controller => nominating should work. - assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![1])); }); } @@ -740,14 +774,19 @@ fn double_controlling_should_fail() { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok assert_ok!(Staking::bond( - Origin::signed(1), + RuntimeOrigin::signed(1), 2, arbitrary_value, RewardDestination::default(), )); // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op assert_noop!( - Staking::bond(Origin::signed(3), 2, arbitrary_value, RewardDestination::default()), + Staking::bond( + RuntimeOrigin::signed(3), + 2, + arbitrary_value, + RewardDestination::default() + ), Error::::AlreadyPaired, ); }); @@ -913,14 +952,14 @@ fn cannot_transfer_staked_balance() { assert_eq!(Staking::eras_stakers(active_era(), 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( - Balances::transfer(Origin::signed(11), 20, 1), + Balances::transfer(RuntimeOrigin::signed(11), 20, 1), BalancesError::::LiquidityRestrictions ); // Give account 11 extra free balance let _ = Balances::make_free_balance_be(&11, 10000); // Confirm that account 11 can now transfer some balance - assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(11), 20, 1)); }); } @@ -938,10 +977,10 @@ fn cannot_transfer_staked_balance_2() { assert_eq!(Staking::eras_stakers(active_era(), 21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( - Balances::transfer(Origin::signed(21), 20, 1001), + Balances::transfer(RuntimeOrigin::signed(21), 20, 1001), BalancesError::::LiquidityRestrictions ); - assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(21), 20, 1000)); }); } @@ -1133,7 +1172,7 @@ fn bond_extra_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // Call the bond_extra function from controller, add only 100 - assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), 100)); // There should be 100 more `total` and `active` in the ledger assert_eq!( Staking::ledger(&10), @@ -1147,7 +1186,7 @@ fn bond_extra_works() { ); // Call the bond_extra function with a large number, should handle it - assert_ok!(Staking::bond_extra(Origin::signed(11), Balance::max_value())); + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(11), Balance::max_value())); // The full amount of the funds should now be in the total and active assert_eq!( Staking::ledger(&10), @@ -1172,7 +1211,7 @@ fn bond_extra_and_withdraw_unbonded_works() { // * Once the unbonding period is done, it can actually take the funds out of the stash. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1203,7 +1242,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // deposit the extra 100 units - Staking::bond_extra(Origin::signed(11), 100).unwrap(); + Staking::bond_extra(RuntimeOrigin::signed(11), 100).unwrap(); assert_eq!( Staking::ledger(&10), @@ -1243,7 +1282,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 1000).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 1000).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1256,7 +1295,7 @@ fn bond_extra_and_withdraw_unbonded_works() { ); // Attempting to free the balances now will fail. 2 eras need to pass. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1272,7 +1311,7 @@ fn bond_extra_and_withdraw_unbonded_works() { mock::start_active_era(3); // nothing yet - assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1287,7 +1326,7 @@ fn bond_extra_and_withdraw_unbonded_works() { // trigger next era. mock::start_active_era(5); - assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); // Now the value is free and the staking ledger is updated. assert_eq!( Staking::ledger(&10), @@ -1311,7 +1350,7 @@ fn too_many_unbond_calls_should_not_work() { // There is only 1 chunk per era, so we need to be in a new era to create a chunk. current_era = i as u32; mock::start_active_era(current_era); - assert_ok!(Staking::unbond(Origin::signed(10), 1)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); } current_era += 1; @@ -1319,24 +1358,24 @@ fn too_many_unbond_calls_should_not_work() { // This chunk is locked at `current_era` through `current_era + 2` (because BondingDuration // == 3). - assert_ok!(Staking::unbond(Origin::signed(10), 1)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); assert_eq!( Staking::ledger(&10).unwrap().unlocking.len(), MaxUnlockingChunks::get() as usize ); // can't do more. - assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); + assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); current_era += 2; mock::start_active_era(current_era); - assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); + assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); // free up everything except the most recently added chunk. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(10), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(10), 0)); assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 1); // Can add again. - assert_ok!(Staking::unbond(Origin::signed(10), 1)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 2); }) } @@ -1350,7 +1389,7 @@ fn rebond_works() { // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1374,10 +1413,10 @@ fn rebond_works() { assert_eq!(active_era(), 2); // Try to rebond some funds. We get an error since no fund is unbonded. - assert_noop!(Staking::rebond(Origin::signed(10), 500), Error::::NoUnlockChunk); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(10), 500), Error::::NoUnlockChunk); // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1390,7 +1429,7 @@ fn rebond_works() { ); // Re-bond all the funds unbonded. - Staking::rebond(Origin::signed(10), 900).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1403,7 +1442,7 @@ fn rebond_works() { ); // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1416,7 +1455,7 @@ fn rebond_works() { ); // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1429,7 +1468,7 @@ fn rebond_works() { ); // Re-bond the remainder of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1442,9 +1481,9 @@ fn rebond_works() { ); // Unbond parts of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1457,7 +1496,7 @@ fn rebond_works() { ); // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 500).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1476,7 +1515,7 @@ fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1499,7 +1538,7 @@ fn rebond_is_fifo() { mock::start_active_era(2); // Unbond some of the funds in stash. - Staking::unbond(Origin::signed(10), 400).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 400).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1514,7 +1553,7 @@ fn rebond_is_fifo() { mock::start_active_era(3); // Unbond more of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 300).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1532,7 +1571,7 @@ fn rebond_is_fifo() { mock::start_active_era(4); // Unbond yet more of the funds in stash. - Staking::unbond(Origin::signed(10), 200).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 200).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1549,7 +1588,7 @@ fn rebond_is_fifo() { ); // Re-bond half of the unbonding funds. - Staking::rebond(Origin::signed(10), 400).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 400).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1572,7 +1611,7 @@ fn rebond_emits_right_value_in_event() { // and the rebond event emits the actual value rebonded. ExtBuilder::default().nominate(false).build_and_execute(|| { // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(10), RewardDestination::Controller)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1581,7 +1620,7 @@ fn rebond_emits_right_value_in_event() { mock::start_active_era(1); // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); + Staking::unbond(RuntimeOrigin::signed(10), 900).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1594,7 +1633,7 @@ fn rebond_emits_right_value_in_event() { ); // Re-bond less than the total - Staking::rebond(Origin::signed(10), 100).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 100).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1609,7 +1648,7 @@ fn rebond_emits_right_value_in_event() { assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); // Re-bond way more than available - Staking::rebond(Origin::signed(10), 100_000).unwrap(); + Staking::rebond(RuntimeOrigin::signed(10), 100_000).unwrap(); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -1699,11 +1738,14 @@ fn reap_stash_works() { // stash is not reapable assert_noop!( - Staking::reap_stash(Origin::signed(20), 11, 0), + Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), Error::::FundedTarget ); // controller or any other account is not reapable - assert_noop!(Staking::reap_stash(Origin::signed(20), 10, 0), Error::::NotStash); + assert_noop!( + Staking::reap_stash(RuntimeOrigin::signed(20), 10, 0), + Error::::NotStash + ); // no easy way to cause an account to go below ED, we tweak their staking ledger // instead. @@ -1719,7 +1761,7 @@ fn reap_stash_works() { ); // reap-able - assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 0)); + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0)); // then assert!(!>::contains_key(&10)); @@ -1736,7 +1778,10 @@ fn switching_roles() { ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination for i in &[10, 20] { - assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(*i), + RewardDestination::Controller + )); } assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -1747,16 +1792,20 @@ fn switching_roles() { } // add 2 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2, 2000, RewardDestination::Controller)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 5])); - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 500, RewardDestination::Controller)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21, 1])); // add a new validator candidate - assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - assert_ok!(Session::set_keys(Origin::signed(6), SessionKeys { other: 6.into() }, vec![])); + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 6, 1000, RewardDestination::Controller)); + assert_ok!(Staking::validate(RuntimeOrigin::signed(6), ValidatorPrefs::default())); + assert_ok!(Session::set_keys( + RuntimeOrigin::signed(6), + SessionKeys { other: 6.into() }, + vec![] + )); mock::start_active_era(1); @@ -1764,8 +1813,12 @@ fn switching_roles() { assert_eq_uvec!(validator_controllers(), vec![6, 10]); // 2 decides to be a validator. Consequences: - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - assert_ok!(Session::set_keys(Origin::signed(2), SessionKeys { other: 2.into() }, vec![])); + assert_ok!(Staking::validate(RuntimeOrigin::signed(2), ValidatorPrefs::default())); + assert_ok!(Session::set_keys( + RuntimeOrigin::signed(2), + SessionKeys { other: 2.into() }, + vec![] + )); // new stakes: // 10: 1000 self vote // 20: 1000 self vote + 250 vote @@ -1817,15 +1870,20 @@ fn bond_with_no_staked_value() { .build_and_execute(|| { // Can't bond with 1 assert_noop!( - Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), + Staking::bond(RuntimeOrigin::signed(1), 2, 1, RewardDestination::Controller), Error::::InsufficientBond, ); // bonded with absolute minimum value possible. - assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(1), + 2, + 5, + RewardDestination::Controller + )); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. - assert_ok!(Staking::unbond(Origin::signed(2), 1)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 1)); assert_eq!( Staking::ledger(2), Some(StakingLedger { @@ -1841,14 +1899,14 @@ fn bond_with_no_staked_value() { mock::start_active_era(2); // not yet removed. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(2), 0)); assert!(Staking::ledger(2).is_some()); assert_eq!(Balances::locks(&1)[0].amount, 5); mock::start_active_era(3); // poof. Account 1 is removed from the staking system. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(2), 0)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(2), 0)); assert!(Staking::ledger(2).is_none()); assert_eq!(Balances::locks(&1).len(), 0); }); @@ -1862,16 +1920,24 @@ fn bond_with_little_staked_value_bounded() { .minimum_validator_count(1) .build_and_execute(|| { // setup - assert_ok!(Staking::chill(Origin::signed(30))); - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(30))); + assert_ok!(Staking::set_payee( + RuntimeOrigin::signed(10), + RewardDestination::Controller + )); let init_balance_2 = Balances::free_balance(&2); let init_balance_10 = Balances::free_balance(&10); // Stingy validator. - assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(1), + 2, + 1, + RewardDestination::Controller + )); + assert_ok!(Staking::validate(RuntimeOrigin::signed(2), ValidatorPrefs::default())); assert_ok!(Session::set_keys( - Origin::signed(2), + RuntimeOrigin::signed(2), SessionKeys { other: 2.into() }, vec![] )); @@ -1944,11 +2010,21 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { let _ = Balances::make_free_balance_be(i, initial_balance); } - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(1), + 2, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 11, 11, 21, 31])); - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(3), + 4, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21, 31])); // winners should be 21 and 31. Otherwise this election is taking duplicates into // account. @@ -1989,11 +2065,21 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { let _ = Balances::make_free_balance_be(i, initial_balance); } - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(1), + 2, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(2), vec![11, 11, 11, 21])); - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21])); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(3), + 4, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![21])); // winners should be 21 and 11. let supports = ::ElectionProvider::elect().unwrap(); @@ -2025,8 +2111,8 @@ fn phragmen_should_not_overflow() { // This is the maximum value that we can have as the outcome of CurrencyToVote. type Votes = u64; - let _ = Staking::chill(Origin::signed(10)); - let _ = Staking::chill(Origin::signed(20)); + let _ = Staking::chill(RuntimeOrigin::signed(10)); + let _ = Staking::chill(RuntimeOrigin::signed(20)); bond_validator(3, 2, Votes::max_value() as Balance); bond_validator(5, 4, Votes::max_value() as Balance); @@ -2067,7 +2153,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { ErasStakers::::insert(0, 11, &exposure); ErasStakersClipped::::insert(0, 11, exposure); ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 0)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -2075,7 +2161,8 @@ fn reward_validator_slashing_validator_does_not_overflow() { let _ = Balances::make_free_balance_be(&2, stake); // only slashes out of bonded stake are applied. without this line, it is 0. - Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); + Staking::bond(RuntimeOrigin::signed(2), 20000, stake - 1, RewardDestination::default()) + .unwrap(); // Override exposure of 11 ErasStakers::::insert( 0, @@ -2151,7 +2238,7 @@ fn unbonded_balance_is_not_slashable() { // total amount staked is slashable. assert_eq!(Staking::slashable_balance_of(&11), 1000); - assert_ok!(Staking::unbond(Origin::signed(10), 800)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 800)); // only the active portion. assert_eq!(Staking::slashable_balance_of(&11), 200); @@ -2207,7 +2294,7 @@ fn offence_forces_new_era() { #[test] fn offence_ensures_new_era_without_clobbering() { ExtBuilder::default().build_and_execute(|| { - assert_ok!(Staking::force_new_era_always(Origin::root())); + assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); assert_eq!(Staking::force_era(), Forcing::ForceAlways); on_offence_now( @@ -2288,7 +2375,7 @@ fn slash_in_old_span_does_not_deselect() { mock::start_active_era(2); - Staking::validate(Origin::signed(10), Default::default()).unwrap(); + Staking::validate(RuntimeOrigin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(!Session::validators().contains(&11)); @@ -2547,10 +2634,10 @@ fn garbage_collection_after_slashing() { // reap_stash respects num_slashing_spans so that weight is accurate assert_noop!( - Staking::reap_stash(Origin::signed(20), 11, 0), + Staking::reap_stash(RuntimeOrigin::signed(20), 11, 0), Error::::IncorrectSlashingSpans ); - assert_ok!(Staking::reap_stash(Origin::signed(20), 11, 2)); + assert_ok!(Staking::reap_stash(RuntimeOrigin::signed(20), 11, 2)); assert!(::SlashingSpans::get(&11).is_none()); assert_eq!(::SpanSlash::get(&(11, 0)).amount(), &0); @@ -2713,7 +2800,7 @@ fn slashes_are_summed_across_spans() { assert_eq!(Balances::free_balance(21), 1900); // 21 has been force-chilled. re-signal intent to validate. - Staking::validate(Origin::signed(20), Default::default()).unwrap(); + Staking::validate(RuntimeOrigin::signed(20), Default::default()).unwrap(); mock::start_active_era(4); @@ -2832,8 +2919,8 @@ fn retroactive_deferred_slashes_one_before() { // unbond at slash era. mock::start_active_era(2); - assert_ok!(Staking::chill(Origin::signed(10))); - assert_ok!(Staking::unbond(Origin::signed(10), 100)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(10))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 100)); mock::start_active_era(3); on_offence_in_era( @@ -2865,7 +2952,7 @@ fn retroactive_deferred_slashes_one_before() { // their ledger has already been slashed. assert_eq!(Staking::ledger(10).unwrap().total, 900); - assert_ok!(Staking::unbond(Origin::signed(10), 1000)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1000)); assert_eq!(Staking::ledger(10).unwrap().total, 900); }) } @@ -2891,8 +2978,8 @@ fn staker_cannot_bail_deferred_slash() { ); // now we chill - assert_ok!(Staking::chill(Origin::signed(100))); - assert_ok!(Staking::unbond(Origin::signed(100), 500)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(100))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(100), 500)); assert_eq!(Staking::current_era().unwrap(), 1); assert_eq!(active_era(), 1); @@ -2927,7 +3014,9 @@ fn staker_cannot_bail_deferred_slash() { assert_eq!(active_era(), 3); // and cannot yet unbond: - assert_storage_noop!(assert!(Staking::withdraw_unbonded(Origin::signed(100), 0).is_ok())); + assert_storage_noop!(assert!( + Staking::withdraw_unbonded(RuntimeOrigin::signed(100), 0).is_ok() + )); assert_eq!( Ledger::::get(100).unwrap().unlocking.into_inner(), vec![UnlockChunk { era: 4u32, value: 500 as Balance }], @@ -2976,12 +3065,12 @@ fn remove_deferred() { // fails if empty assert_noop!( - Staking::cancel_deferred_slash(Origin::root(), 1, vec![]), + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![]), Error::::EmptyTargets ); // cancel one of them. - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0])); + assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0])); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); @@ -3062,21 +3151,21 @@ fn remove_multi_deferred() { // fails if list is not sorted assert_noop!( - Staking::cancel_deferred_slash(Origin::root(), 1, vec![2, 0, 4]), + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]), Error::::NotSortedAndUnique ); // fails if list is not unique assert_noop!( - Staking::cancel_deferred_slash(Origin::root(), 1, vec![0, 2, 2]), + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]), Error::::NotSortedAndUnique ); // fails if bad index assert_noop!( - Staking::cancel_deferred_slash(Origin::root(), 1, vec![1, 2, 3, 4, 5]), + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]), Error::::InvalidSlashIndex ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 4, vec![0, 2, 4])); + assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4])); let slashes = ::UnappliedSlashes::get(&4); assert_eq!(slashes.len(), 2); @@ -3126,7 +3215,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert!(nominations.submitted_in < last_slash); // actually re-bond the slashed validator - assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); mock::start_active_era(2); let exposure_11 = Staking::eras_stakers(active_era(), &11); @@ -3353,19 +3442,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // Last kept is 1: assert!(current_era - Staking::history_depth() == 1); assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 0), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0), // Fail: Era out of history Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2)); assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 2), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), // Fail: Double claim Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, active_era), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, active_era), // Fail: Era not finished yet Error::::InvalidEraToReward.with_weight(err_weight) ); @@ -3486,12 +3575,12 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); assert_ok!(Staking::bond( - Origin::signed(stash), + RuntimeOrigin::signed(stash), controller, balance, RewardDestination::Stash )); - assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![11])); } mock::start_active_era(1); @@ -3519,16 +3608,16 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( fn set_history_depth_works() { ExtBuilder::default().build_and_execute(|| { mock::start_active_era(10); - Staking::set_history_depth(Origin::root(), 20, 0).unwrap(); + Staking::set_history_depth(RuntimeOrigin::root(), 20, 0).unwrap(); assert!(::ErasTotalStake::contains_key(10 - 4)); assert!(::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::root(), 4, 0).unwrap(); + Staking::set_history_depth(RuntimeOrigin::root(), 4, 0).unwrap(); assert!(::ErasTotalStake::contains_key(10 - 4)); assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::root(), 3, 0).unwrap(); + Staking::set_history_depth(RuntimeOrigin::root(), 3, 0).unwrap(); assert!(!::ErasTotalStake::contains_key(10 - 4)); assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::root(), 8, 0).unwrap(); + Staking::set_history_depth(RuntimeOrigin::root(), 8, 0).unwrap(); assert!(!::ErasTotalStake::contains_key(10 - 4)); assert!(!::ErasTotalStake::contains_key(10 - 5)); }); @@ -3570,7 +3659,7 @@ fn test_payout_stakers() { let pre_payout_total_issuance = Balances::total_issuance(); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); assert_eq_error_rate!( Balances::total_issuance(), pre_payout_total_issuance + actual_paid_out, @@ -3611,7 +3700,7 @@ fn test_payout_stakers() { mock::start_active_era(i); RewardOnUnbalanceWasCalled::set(false); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, i - 1)); assert_eq_error_rate!( Balances::total_issuance(), pre_payout_total_issuance + actual_paid_out, @@ -3640,8 +3729,8 @@ fn test_payout_stakers() { } // We clean it up as history passes - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -3654,9 +3743,9 @@ fn test_payout_stakers() { ); // Out of order claims works. - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 69)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 23)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 69)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 23)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 42)); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -3696,12 +3785,12 @@ fn payout_stakers_handles_basic_errors() { // Wrong Era, too big assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 2), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 2), Error::::InvalidEraToReward.with_weight(err_weight) ); // Wrong Staker assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 10, 1), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 10, 1), Error::::NotStash.with_weight(err_weight) ); @@ -3714,23 +3803,23 @@ fn payout_stakers_handles_basic_errors() { // We are at era 99, with history depth of 84 // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 14), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 14), Error::::InvalidEraToReward.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 99), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 99), Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98)); // Can't claim again assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 15), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15), Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 98), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98), Error::::AlreadyClaimed.with_weight(err_weight) ); }); @@ -3780,7 +3869,7 @@ fn payout_stakers_handles_weight_refund() { // Collect payouts when there are no nominators let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 1 }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(20)); + let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3793,7 +3882,7 @@ fn payout_stakers_handles_weight_refund() { // Collect payouts for an era where the validator did not receive any points. let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 2 }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(20)); + let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3806,7 +3895,7 @@ fn payout_stakers_handles_weight_refund() { // Collect payouts when the validator has `half_max_nom_rewarded` nominators. let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 3 }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(20)); + let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); @@ -3829,14 +3918,14 @@ fn payout_stakers_handles_weight_refund() { // Collect payouts when the validator had `half_max_nom_rewarded` nominators. let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(20)); + let result = call.dispatch(RuntimeOrigin::signed(20)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); // Try and collect payouts for an era that has already been collected. let call = TestCall::Staking(StakingCall::payout_stakers { validator_stash: 11, era: 5 }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(20)); + let result = call.dispatch(RuntimeOrigin::signed(20)); assert!(result.is_err()); // When there is an error the consumed weight == weight when there are 0 nominator payouts. assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); @@ -3965,7 +4054,7 @@ fn payout_creates_controller() { bond_nominator(1234, 1337, 100, vec![11]); // kill controller - assert_ok!(Balances::transfer(Origin::signed(1337), 1234, 100)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1337), 1234, 100)); assert_eq!(Balances::free_balance(1337), 0); mock::start_active_era(1); @@ -3973,7 +4062,7 @@ fn payout_creates_controller() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); // Controller is created assert!(Balances::free_balance(1337) > 0); @@ -3991,7 +4080,7 @@ fn payout_to_any_account_works() { bond_nominator(1234, 1337, 100, vec![11]); // Update payout location - assert_ok!(Staking::set_payee(Origin::signed(1337), RewardDestination::Account(42))); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(1337), RewardDestination::Account(42))); // Reward Destination account doesn't exist assert_eq!(Balances::free_balance(42), 0); @@ -4001,7 +4090,7 @@ fn payout_to_any_account_works() { // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(2); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 1)); // Payment is successful assert!(Balances::free_balance(42) > 0); @@ -4129,8 +4218,8 @@ fn cannot_rebond_to_lower_than_ed() { ); // unbond all of it. must be chilled first. - assert_ok!(Staking::chill(Origin::signed(20))); - assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(20))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 10 * 1000)); assert_eq!( Staking::ledger(&20).unwrap(), StakingLedger { @@ -4143,7 +4232,10 @@ fn cannot_rebond_to_lower_than_ed() { ); // now bond a wee bit more - assert_noop!(Staking::rebond(Origin::signed(20), 5), Error::::InsufficientBond); + assert_noop!( + Staking::rebond(RuntimeOrigin::signed(20), 5), + Error::::InsufficientBond + ); }) } @@ -4166,8 +4258,8 @@ fn cannot_bond_extra_to_lower_than_ed() { ); // unbond all of it. must be chilled first. - assert_ok!(Staking::chill(Origin::signed(20))); - assert_ok!(Staking::unbond(Origin::signed(20), 10 * 1000)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(20))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 10 * 1000)); assert_eq!( Staking::ledger(&20).unwrap(), StakingLedger { @@ -4181,7 +4273,7 @@ fn cannot_bond_extra_to_lower_than_ed() { // now bond a wee bit more assert_noop!( - Staking::bond_extra(Origin::signed(21), 5), + Staking::bond_extra(RuntimeOrigin::signed(21), 5), Error::::InsufficientBond, ); }) @@ -4207,9 +4299,9 @@ fn do_not_die_when_active_is_ed() { ); // when unbond all of it except ed. - assert_ok!(Staking::unbond(Origin::signed(20), 999 * ed)); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(20), 999 * ed)); start_active_era(3); - assert_ok!(Staking::withdraw_unbonded(Origin::signed(20), 100)); + assert_ok!(Staking::withdraw_unbonded(RuntimeOrigin::signed(20), 100)); // then assert_eq!( @@ -4312,7 +4404,7 @@ mod election_data_provider { ); // resubmit and it is back - assert_ok!(Staking::nominate(Origin::signed(100), vec![11, 21])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![11, 21])); assert_eq!( ::electing_voters(None) .unwrap() @@ -4470,13 +4562,13 @@ mod election_data_provider { assert_eq!(staking_events().len(), 3); assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); - Staking::force_no_eras(Origin::root()).unwrap(); + Staking::force_no_eras(RuntimeOrigin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); - Staking::force_new_era_always(Origin::root()).unwrap(); + Staking::force_new_era_always(RuntimeOrigin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); - Staking::force_new_era(Origin::root()).unwrap(); + Staking::force_new_era(RuntimeOrigin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), 45 + 5); // Do a fail election @@ -4526,40 +4618,51 @@ fn min_bond_checks_work() { .min_validator_bond(1_500) .build_and_execute(|| { // 500 is not enough for any role - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(3), + 4, + 500, + RewardDestination::Controller + )); assert_noop!( - Staking::nominate(Origin::signed(4), vec![1]), + Staking::nominate(RuntimeOrigin::signed(4), vec![1]), Error::::InsufficientBond ); assert_noop!( - Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond, ); // 1000 is enough for nominator - assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); assert_noop!( - Staking::validate(Origin::signed(4), ValidatorPrefs::default()), + Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond, ); // 1500 is enough for validator - assert_ok!(Staking::bond_extra(Origin::signed(3), 500)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); + assert_ok!(Staking::bond_extra(RuntimeOrigin::signed(3), 500)); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); + assert_ok!(Staking::validate(RuntimeOrigin::signed(4), ValidatorPrefs::default())); // Can't unbond anything as validator - assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(4), 500), + Error::::InsufficientBond + ); // Once they are a nominator, they can unbond 500 - assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); - assert_ok!(Staking::unbond(Origin::signed(4), 500)); - assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 500)); + assert_noop!( + Staking::unbond(RuntimeOrigin::signed(4), 500), + Error::::InsufficientBond + ); // Once they are chilled they can unbond everything - assert_ok!(Staking::chill(Origin::signed(4))); - assert_ok!(Staking::unbond(Origin::signed(4), 1000)); + assert_ok!(Staking::chill(RuntimeOrigin::signed(4))); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 1000)); }) } @@ -4585,21 +4688,21 @@ fn chill_other_works() { // Nominator assert_ok!(Staking::bond( - Origin::signed(a), + RuntimeOrigin::signed(a), b, 1000, RewardDestination::Controller )); - assert_ok!(Staking::nominate(Origin::signed(b), vec![1])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(b), vec![1])); // Validator assert_ok!(Staking::bond( - Origin::signed(c), + RuntimeOrigin::signed(c), d, 1500, RewardDestination::Controller )); - assert_ok!(Staking::validate(Origin::signed(d), ValidatorPrefs::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(d), ValidatorPrefs::default())); } // To chill other users, we need to: @@ -4612,17 +4715,17 @@ fn chill_other_works() { // Can't chill these users assert_noop!( - Staking::chill_other(Origin::signed(1337), 1), + Staking::chill_other(RuntimeOrigin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(Origin::signed(1337), 3), + Staking::chill_other(RuntimeOrigin::signed(1337), 3), Error::::CannotChillOther ); // Change the minimum bond... but no limits. assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Set(1_500), ConfigOp::Set(2_000), ConfigOp::Remove, @@ -4633,17 +4736,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(Origin::signed(1337), 1), + Staking::chill_other(RuntimeOrigin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(Origin::signed(1337), 3), + Staking::chill_other(RuntimeOrigin::signed(1337), 3), Error::::CannotChillOther ); // Add limits, but no threshold assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Set(10), @@ -4654,17 +4757,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(Origin::signed(1337), 1), + Staking::chill_other(RuntimeOrigin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(Origin::signed(1337), 3), + Staking::chill_other(RuntimeOrigin::signed(1337), 3), Error::::CannotChillOther ); // Add threshold, but no limits assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Remove, @@ -4675,17 +4778,17 @@ fn chill_other_works() { // Still can't chill these users assert_noop!( - Staking::chill_other(Origin::signed(1337), 1), + Staking::chill_other(RuntimeOrigin::signed(1337), 1), Error::::CannotChillOther ); assert_noop!( - Staking::chill_other(Origin::signed(1337), 3), + Staking::chill_other(RuntimeOrigin::signed(1337), 3), Error::::CannotChillOther ); // Add threshold and limits assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Set(10), @@ -4703,19 +4806,19 @@ fn chill_other_works() { for i in 6..15 { let b = 4 * i + 1; let d = 4 * i + 3; - assert_ok!(Staking::chill_other(Origin::signed(1337), b)); - assert_ok!(Staking::chill_other(Origin::signed(1337), d)); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), b)); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), d)); } // chill a nominator. Limit is not reached, not chill-able assert_eq!(Nominators::::count(), 7); assert_noop!( - Staking::chill_other(Origin::signed(1337), 1), + Staking::chill_other(RuntimeOrigin::signed(1337), 1), Error::::CannotChillOther ); // chill a validator. Limit is reached, chill-able. assert_eq!(Validators::::count(), 9); - assert_ok!(Staking::chill_other(Origin::signed(1337), 3)); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(1337), 3)); }) } @@ -4730,7 +4833,7 @@ fn capped_stakers_works() { // Change the maximums let max = 10; assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Set(10), ConfigOp::Set(10), ConfigOp::Set(max), @@ -4748,7 +4851,10 @@ fn capped_stakers_works() { RewardDestination::Controller, ) .unwrap(); - assert_ok!(Staking::validate(Origin::signed(controller), ValidatorPrefs::default())); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(controller), + ValidatorPrefs::default() + )); some_existing_validator = controller; } @@ -4761,7 +4867,7 @@ fn capped_stakers_works() { .unwrap(); assert_noop!( - Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), + Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), Error::::TooManyValidators, ); @@ -4774,7 +4880,7 @@ fn capped_stakers_works() { RewardDestination::Controller, ) .unwrap(); - assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); some_existing_nominator = controller; } @@ -4786,21 +4892,21 @@ fn capped_stakers_works() { ) .unwrap(); assert_noop!( - Staking::nominate(Origin::signed(last_nominator), vec![1]), + Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1]), Error::::TooManyNominators ); // Re-nominate works fine - assert_ok!(Staking::nominate(Origin::signed(some_existing_nominator), vec![1])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(some_existing_nominator), vec![1])); // Re-validate works fine assert_ok!(Staking::validate( - Origin::signed(some_existing_validator), + RuntimeOrigin::signed(some_existing_validator), ValidatorPrefs::default() )); // No problem when we set to `None` again assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Noop, ConfigOp::Noop, ConfigOp::Remove, @@ -4808,8 +4914,11 @@ fn capped_stakers_works() { ConfigOp::Noop, ConfigOp::Noop, )); - assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); - assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(last_nominator), vec![1])); + assert_ok!(Staking::validate( + RuntimeOrigin::signed(last_validator), + ValidatorPrefs::default() + )); }) } @@ -4818,7 +4927,7 @@ fn min_commission_works() { ExtBuilder::default().build_and_execute(|| { // account 10 controls the stash from account 11 assert_ok!(Staking::validate( - Origin::signed(10), + RuntimeOrigin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } )); @@ -4832,7 +4941,7 @@ fn min_commission_works() { ); assert_ok!(Staking::set_staking_configs( - Origin::root(), + RuntimeOrigin::root(), ConfigOp::Remove, ConfigOp::Remove, ConfigOp::Remove, @@ -4844,7 +4953,7 @@ fn min_commission_works() { // can't make it less than 10 now assert_noop!( Staking::validate( - Origin::signed(10), + RuntimeOrigin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } ), Error::::CommissionTooLow @@ -4852,12 +4961,12 @@ fn min_commission_works() { // can only change to higher. assert_ok!(Staking::validate( - Origin::signed(10), + RuntimeOrigin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(10), blocked: false } )); assert_ok!(Staking::validate( - Origin::signed(10), + RuntimeOrigin::signed(10), ValidatorPrefs { commission: Perbill::from_percent(15), blocked: false } )); }) @@ -4940,7 +5049,7 @@ fn change_of_max_nominations() { assert_eq!(Staking::electing_voters(None).unwrap().len(), 3 + 1); // now one of them can revive themselves by re-nominating to a proper value. - assert_ok!(Staking::nominate(Origin::signed(71), vec![1])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); assert_eq!( Nominators::::iter() .map(|(k, n)| (k, n.targets.len())) @@ -4951,7 +5060,7 @@ fn change_of_max_nominations() { // or they can be chilled by any account. assert!(Nominators::::contains_key(101)); assert!(Nominators::::get(101).is_none()); - assert_ok!(Staking::chill_other(Origin::signed(70), 100)); + assert_ok!(Staking::chill_other(RuntimeOrigin::signed(70), 100)); assert!(!Nominators::::contains_key(101)); assert!(Nominators::::get(101).is_none()); }) @@ -4975,7 +5084,7 @@ mod sorted_list_provider { ); // when account 101 renominates - assert_ok!(Staking::nominate(Origin::signed(100), vec![41])); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(100), vec![41])); // then counts don't change assert_eq!(::VoterList::count(), pre_insert_voter_count); @@ -4998,7 +5107,7 @@ mod sorted_list_provider { assert_eq!(::VoterList::iter().collect::>(), vec![11, 21, 31]); // when account 11 re-validates - assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + assert_ok!(Staking::validate(RuntimeOrigin::signed(10), Default::default())); // then counts don't change assert_eq!(::VoterList::count(), pre_insert_voter_count); @@ -5013,31 +5122,31 @@ fn force_apply_min_commission_works() { let prefs = |c| ValidatorPrefs { commission: Perbill::from_percent(c), blocked: false }; let validators = || Validators::::iter().collect::>(); ExtBuilder::default().build_and_execute(|| { - assert_ok!(Staking::validate(Origin::signed(30), prefs(10))); - assert_ok!(Staking::validate(Origin::signed(20), prefs(5))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(30), prefs(10))); + assert_ok!(Staking::validate(RuntimeOrigin::signed(20), prefs(5))); // Given assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); MinCommission::::set(Perbill::from_percent(5)); // When applying to a commission greater than min - assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 31)); + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 31)); // Then the commission is not changed assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); // When applying to a commission that is equal to min - assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 21)); + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 21)); // Then the commission is not changed assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(0))]); // When applying to a commission that is less than the min - assert_ok!(Staking::force_apply_min_commission(Origin::signed(1), 11)); + assert_ok!(Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 11)); // Then the commission is bumped to the min assert_eq!(validators(), vec![(31, prefs(10)), (21, prefs(5)), (11, prefs(5))]); // When applying commission to a validator that doesn't exist then storage is not altered assert_noop!( - Staking::force_apply_min_commission(Origin::signed(1), 420), + Staking::force_apply_min_commission(RuntimeOrigin::signed(1), 420), Error::::NotStash ); }); diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 8c73f99be3aea..3f81d3c211a95 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -446,10 +446,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { /// Origin that can control the configurations of this pallet. - type ControlOrigin: frame_support::traits::EnsureOrigin; + type ControlOrigin: frame_support::traits::EnsureOrigin; /// Filter on which origin that trigger the manual migrations. - type SignedFilter: EnsureOrigin; + type SignedFilter: EnsureOrigin; /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -1084,7 +1084,7 @@ mod mock { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u32; @@ -1293,7 +1293,7 @@ mod test { // fails if the top key is too long. frame_support::assert_ok!(StateTrieMigration::continue_migrate( - Origin::signed(1), + RuntimeOrigin::signed(1), MigrationLimits { item: 50, size: 1 << 20 }, Bounded::max_value(), MigrationProcess::::get() @@ -1328,7 +1328,7 @@ mod test { // fails if the top key is too long. frame_support::assert_ok!(StateTrieMigration::continue_migrate( - Origin::signed(1), + RuntimeOrigin::signed(1), MigrationLimits { item: 50, size: 1 << 20 }, Bounded::max_value(), MigrationProcess::::get() @@ -1457,7 +1457,7 @@ mod test { // can't submit if limit is too high. frame_support::assert_err!( StateTrieMigration::continue_migrate( - Origin::signed(1), + RuntimeOrigin::signed(1), MigrationLimits { item: 5, size: sp_runtime::traits::Bounded::max_value() }, Bounded::max_value(), MigrationProcess::::get() @@ -1468,7 +1468,7 @@ mod test { // can't submit if poor. frame_support::assert_err!( StateTrieMigration::continue_migrate( - Origin::signed(2), + RuntimeOrigin::signed(2), MigrationLimits { item: 5, size: 100 }, 100, MigrationProcess::::get() @@ -1479,7 +1479,7 @@ mod test { // can't submit with bad witness. frame_support::assert_err_ignore_postinfo!( StateTrieMigration::continue_migrate( - Origin::signed(1), + RuntimeOrigin::signed(1), MigrationLimits { item: 5, size: 100 }, 100, MigrationTask { @@ -1500,7 +1500,7 @@ mod test { assert!(result.is_ok()); frame_support::assert_ok!(StateTrieMigration::continue_migrate( - Origin::signed(1), + RuntimeOrigin::signed(1), StateTrieMigration::signed_migration_max_limits().unwrap(), task.dyn_size, MigrationProcess::::get() @@ -1523,7 +1523,7 @@ mod test { let correct_witness = 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3; new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], correct_witness, )); @@ -1536,7 +1536,7 @@ mod test { new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { // works if the witness is an overestimate frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], correct_witness + 99, )); @@ -1551,7 +1551,7 @@ mod test { // note that we don't expect this to be a noop -- we do slash. frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], correct_witness - 1, ),); @@ -1569,7 +1569,7 @@ mod test { fn custom_migrate_child_works() { new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( - Origin::signed(1), + RuntimeOrigin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 55 + 66, @@ -1585,7 +1585,7 @@ mod test { // note that we don't expect this to be a noop -- we do slash. frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( - Origin::signed(1), + RuntimeOrigin::signed(1), StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 69eac2c1b93c4..75d15d23c680a 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -119,7 +119,7 @@ pub mod pallet { /// A sudo-able call. type RuntimeCall: Parameter - + UnfilteredDispatchable + + UnfilteredDispatchable + GetDispatchInfo; } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index 67c221b234de1..db2ad4d563910 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -125,7 +125,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index fbf4063c992e4..ae8f198736004 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use frame_support::{assert_noop, assert_ok, weights::Weight}; use mock::{ - new_test_ext, Logger, LoggerCall, Origin, RuntimeCall, RuntimeEvent as TestEvent, Sudo, + new_test_ext, Logger, LoggerCall, RuntimeCall, RuntimeEvent as TestEvent, RuntimeOrigin, Sudo, SudoCall, System, Test, }; @@ -43,7 +43,7 @@ fn sudo_basics() { i: 42, weight: Weight::from_ref_time(1_000), })); - assert_ok!(Sudo::sudo(Origin::signed(1), call)); + assert_ok!(Sudo::sudo(RuntimeOrigin::signed(1), call)); assert_eq!(Logger::i32_log(), vec![42i32]); // A privileged function should not work when `sudo` is passed a non-root `key` as `origin`. @@ -51,7 +51,7 @@ fn sudo_basics() { i: 42, weight: Weight::from_ref_time(1_000), })); - assert_noop!(Sudo::sudo(Origin::signed(2), call), Error::::RequireSudo); + assert_noop!(Sudo::sudo(RuntimeOrigin::signed(2), call), Error::::RequireSudo); }); } @@ -66,7 +66,7 @@ fn sudo_emits_events_correctly() { i: 42, weight: Weight::from_ref_time(1), })); - assert_ok!(Sudo::sudo(Origin::signed(1), call)); + assert_ok!(Sudo::sudo(RuntimeOrigin::signed(1), call)); System::assert_has_event(TestEvent::Sudo(Event::Sudid { sudo_result: Ok(()) })); }) } @@ -80,7 +80,7 @@ fn sudo_unchecked_weight_basics() { weight: Weight::from_ref_time(1_000), })); assert_ok!(Sudo::sudo_unchecked_weight( - Origin::signed(1), + RuntimeOrigin::signed(1), call, Weight::from_ref_time(1_000) )); @@ -92,7 +92,11 @@ fn sudo_unchecked_weight_basics() { weight: Weight::from_ref_time(1_000), })); assert_noop!( - Sudo::sudo_unchecked_weight(Origin::signed(2), call, Weight::from_ref_time(1_000)), + Sudo::sudo_unchecked_weight( + RuntimeOrigin::signed(2), + call, + Weight::from_ref_time(1_000) + ), Error::::RequireSudo, ); // `I32Log` is unchanged after unsuccessful call. @@ -122,7 +126,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { weight: Weight::from_ref_time(1), })); assert_ok!(Sudo::sudo_unchecked_weight( - Origin::signed(1), + RuntimeOrigin::signed(1), call, Weight::from_ref_time(1_000) )); @@ -134,14 +138,14 @@ fn sudo_unchecked_weight_emits_events_correctly() { fn set_key_basics() { new_test_ext(1).execute_with(|| { // A root `key` can change the root `key` - assert_ok!(Sudo::set_key(Origin::signed(1), 2)); + assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); assert_eq!(Sudo::key(), Some(2u64)); }); new_test_ext(1).execute_with(|| { // A non-root `key` will trigger a `RequireSudo` error and a non-root `key` cannot change // the root `key`. - assert_noop!(Sudo::set_key(Origin::signed(2), 3), Error::::RequireSudo); + assert_noop!(Sudo::set_key(RuntimeOrigin::signed(2), 3), Error::::RequireSudo); }); } @@ -152,10 +156,10 @@ fn set_key_emits_events_correctly() { System::set_block_number(1); // A root `key` can change the root `key`. - assert_ok!(Sudo::set_key(Origin::signed(1), 2)); + assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(1) })); // Double check. - assert_ok!(Sudo::set_key(Origin::signed(2), 4)); + assert_ok!(Sudo::set_key(RuntimeOrigin::signed(2), 4)); System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(2) })); }); } @@ -168,7 +172,7 @@ fn sudo_as_basics() { i: 42, weight: Weight::from_ref_time(1_000), })); - assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); + assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); @@ -177,14 +181,14 @@ fn sudo_as_basics() { i: 42, weight: Weight::from_ref_time(1), })); - assert_noop!(Sudo::sudo_as(Origin::signed(3), 2, call), Error::::RequireSudo); + assert_noop!(Sudo::sudo_as(RuntimeOrigin::signed(3), 2, call), Error::::RequireSudo); // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, weight: Weight::from_ref_time(1), })); - assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); + assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); // The correct user makes the call within `sudo_as`. assert_eq!(Logger::account_log(), vec![2]); @@ -202,7 +206,7 @@ fn sudo_as_emits_events_correctly() { i: 42, weight: Weight::from_ref_time(1), })); - assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); + assert_ok!(Sudo::sudo_as(RuntimeOrigin::signed(1), 2, call)); System::assert_has_event(TestEvent::Sudo(Event::SudoAsDone { sudo_result: Ok(()) })); }); } diff --git a/frame/support/procedural/src/construct_runtime/expand/call.rs b/frame/support/procedural/src/construct_runtime/expand/call.rs index 101f0c371382f..8f07448f5785e 100644 --- a/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -162,12 +162,12 @@ pub fn expand_outer_dispatch( } } impl #scrate::dispatch::Dispatchable for RuntimeCall { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Config = RuntimeCall; type Info = #scrate::dispatch::DispatchInfo; type PostInfo = #scrate::dispatch::PostDispatchInfo; - fn dispatch(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { - if !::filter_call(&origin, &self) { + fn dispatch(self, origin: RuntimeOrigin) -> #scrate::dispatch::DispatchResultWithPostInfo { + if !::filter_call(&origin, &self) { return #scrate::sp_std::result::Result::Err( #system_path::Error::<#runtime>::CallFiltered.into() ); @@ -177,8 +177,8 @@ pub fn expand_outer_dispatch( } } impl #scrate::traits::UnfilteredDispatchable for RuntimeCall { - type Origin = Origin; - fn dispatch_bypass_filter(self, origin: Origin) -> #scrate::dispatch::DispatchResultWithPostInfo { + type RuntimeOrigin = RuntimeOrigin; + fn dispatch_bypass_filter(self, origin: RuntimeOrigin) -> #scrate::dispatch::DispatchResultWithPostInfo { match self { #( #pallet_attrs diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index d234ca6415115..acbcb65a3e986 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -103,13 +103,13 @@ pub fn expand_outer_origin( /// #[doc = #doc_string] #[derive(Clone)] - pub struct Origin { + pub struct RuntimeOrigin { caller: OriginCaller, filter: #scrate::sp_std::rc::Rc::RuntimeCall) -> bool>>, } #[cfg(not(feature = "std"))] - impl #scrate::sp_std::fmt::Debug for Origin { + impl #scrate::sp_std::fmt::Debug for RuntimeOrigin { fn fmt( &self, fmt: &mut #scrate::sp_std::fmt::Formatter, @@ -119,7 +119,7 @@ pub fn expand_outer_origin( } #[cfg(feature = "std")] - impl #scrate::sp_std::fmt::Debug for Origin { + impl #scrate::sp_std::fmt::Debug for RuntimeOrigin { fn fmt( &self, fmt: &mut #scrate::sp_std::fmt::Formatter, @@ -131,7 +131,7 @@ pub fn expand_outer_origin( } } - impl #scrate::traits::OriginTrait for Origin { + impl #scrate::traits::OriginTrait for RuntimeOrigin { type Call = <#runtime as #system_path::Config>::RuntimeCall; type PalletsOrigin = OriginCaller; type AccountId = <#runtime as #system_path::Config>::AccountId; @@ -214,21 +214,21 @@ pub fn expand_outer_origin( // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] - impl Origin { + impl RuntimeOrigin { #[doc = #doc_string_none_origin] pub fn none() -> Self { - ::none() + ::none() } #[doc = #doc_string_root_origin] pub fn root() -> Self { - ::root() + ::root() } #[doc = #doc_string_signed_origin] pub fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { - ::signed(by) + ::signed(by) } } @@ -251,7 +251,7 @@ pub fn expand_outer_origin( } } - impl From<#system_path::Origin<#runtime>> for Origin { + impl From<#system_path::Origin<#runtime>> for RuntimeOrigin { #[doc = #doc_string_runtime_origin] fn from(x: #system_path::Origin<#runtime>) -> Self { @@ -260,9 +260,9 @@ pub fn expand_outer_origin( } } - impl From for Origin { + impl From for RuntimeOrigin { fn from(x: OriginCaller) -> Self { - let mut o = Origin { + let mut o = RuntimeOrigin { caller: x, filter: #scrate::sp_std::rc::Rc::new(Box::new(|_| true)), }; @@ -273,9 +273,9 @@ pub fn expand_outer_origin( } } - impl From for #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, Origin> { + impl From for #scrate::sp_std::result::Result<#system_path::Origin<#runtime>, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. - fn from(val: Origin) -> Self { + fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::system(l) = val.caller { Ok(l) } else { @@ -283,7 +283,7 @@ pub fn expand_outer_origin( } } } - impl From::AccountId>> for Origin { + impl From::AccountId>> for RuntimeOrigin { #[doc = #doc_string_runtime_origin_with_caller] fn from(x: Option<<#runtime as #system_path::Config>::AccountId>) -> Self { <#system_path::Origin<#runtime>>::from(x).into() @@ -374,7 +374,7 @@ fn expand_origin_pallet_conversions( } #attr - impl From<#pallet_origin> for Origin { + impl From<#pallet_origin> for RuntimeOrigin { #[doc = #doc_string] fn from(x: #pallet_origin) -> Self { let x: OriginCaller = x.into(); @@ -383,9 +383,9 @@ fn expand_origin_pallet_conversions( } #attr - impl From for #scrate::sp_std::result::Result<#pallet_origin, Origin> { + impl From for #scrate::sp_std::result::Result<#pallet_origin, RuntimeOrigin> { /// NOTE: converting to pallet origin loses the origin filter information. - fn from(val: Origin) -> Self { + fn from(val: RuntimeOrigin) -> Self { if let OriginCaller::#variant_name(l) = val.caller { Ok(l) } else { diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index d5c84fa37510d..18d5adee63ad6 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -278,10 +278,10 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { for #call_ident<#type_use_gen> #where_clause { - type Origin = #frame_system::pallet_prelude::OriginFor; + type RuntimeOrigin = #frame_system::pallet_prelude::OriginFor; fn dispatch_bypass_filter( self, - origin: Self::Origin + origin: Self::RuntimeOrigin ) -> #frame_support::dispatch::DispatchResultWithPostInfo { match self { #( diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index f1e520952d872..8b4ab154b306a 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -119,7 +119,7 @@ impl Def { let g = genesis_build::GenesisBuildDef::try_from(span, index, item)?; genesis_build = Some(g); }, - Some(PalletAttr::Origin(_)) if origin.is_none() => + Some(PalletAttr::RuntimeOrigin(_)) if origin.is_none() => origin = Some(origin::OriginDef::try_from(index, item)?), Some(PalletAttr::Inherent(_)) if inherent.is_none() => inherent = Some(inherent::InherentDef::try_from(index, item)?), @@ -394,7 +394,7 @@ enum PalletAttr { RuntimeCall(proc_macro2::Span), Error(proc_macro2::Span), RuntimeEvent(proc_macro2::Span), - Origin(proc_macro2::Span), + RuntimeOrigin(proc_macro2::Span), Inherent(proc_macro2::Span), Storage(proc_macro2::Span), GenesisConfig(proc_macro2::Span), @@ -413,7 +413,7 @@ impl PalletAttr { Self::RuntimeCall(span) => *span, Self::Error(span) => *span, Self::RuntimeEvent(span) => *span, - Self::Origin(span) => *span, + Self::RuntimeOrigin(span) => *span, Self::Inherent(span) => *span, Self::Storage(span) => *span, Self::GenesisConfig(span) => *span, @@ -447,7 +447,7 @@ impl syn::parse::Parse for PalletAttr { } else if lookahead.peek(keyword::event) { Ok(PalletAttr::RuntimeEvent(content.parse::()?.span())) } else if lookahead.peek(keyword::origin) { - Ok(PalletAttr::Origin(content.parse::()?.span())) + Ok(PalletAttr::RuntimeOrigin(content.parse::()?.span())) } else if lookahead.peek(keyword::inherent) { Ok(PalletAttr::Inherent(content.parse::()?.span())) } else if lookahead.peek(keyword::storage) { diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index cacd599fa008c..db2bc90658ee2 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -650,7 +650,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. @@ -677,7 +677,7 @@ impl PaysFee for (u64, Pays) { /// * `Module`: The struct generated by the macro, with type `Config`. /// * `Call`: The enum generated for every pallet, which implements /// [`Callable`](./dispatch/trait.Callable.html). -/// * `origin`: Alias of `T::Origin`. +/// * `origin`: Alias of `T::RuntimeOrigin`. /// * `Result`: The expected return type from pallet functions. /// /// The first parameter of dispatchable functions must always be `origin`. @@ -693,7 +693,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { /// // Your implementation @@ -728,7 +728,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::{weights::Weight, dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}}; /// # use frame_system::{Config, ensure_signed}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// #[weight = 1_000_000] /// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { /// ensure_signed(origin).map_err(|e| e.with_weight(Weight::from_ref_time(100_000)))?; @@ -758,7 +758,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::transactional; /// # use frame_system::Config; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { @@ -779,7 +779,7 @@ impl PaysFee for (u64, Pays) { /// # use frame_support::dispatch; /// # use frame_system::{Config, ensure_signed, ensure_root}; /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// #[weight = 0] /// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; @@ -819,7 +819,7 @@ impl PaysFee for (u64, Pays) { /// pub trait Config: frame_system::Config {} /// /// decl_module! { -/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { +/// pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::RuntimeOrigin { /// // Your implementation /// } /// } @@ -831,7 +831,7 @@ impl PaysFee for (u64, Pays) { /// /// ## Where clause /// -/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module +/// Besides the default `origin: T::RuntimeOrigin`, you can also pass other bounds to the module /// declaration. This where bound will be replicated to all types generated by this macro. The /// chaining of multiple trait bounds with `+` is not supported. If multiple bounds for one type are /// required, it needs to be split up into multiple bounds. @@ -844,7 +844,7 @@ impl PaysFee for (u64, Pays) { /// pub trait Config: system::Config where Self::AccountId: From {} /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin, T::AccountId: From { /// // Your implementation /// } /// } @@ -1885,7 +1885,7 @@ macro_rules! decl_module { ) ); }; - // Ignore any ident which is not `origin` with type `T::Origin`. + // Ignore any ident which is not `origin` with type `T::RuntimeOrigin`. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> @@ -1906,7 +1906,7 @@ macro_rules! decl_module { $(#[weight = $weight:expr])? $(#[$fn_attr:meta])* $fn_vis:vis fn $fn_name:ident( - $origin:ident : T::Origin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? + $origin:ident : T::RuntimeOrigin $( , $( #[$codec_attr:ident] )* $param_name:ident : $param:ty )* $(,)? ) $( -> $result:ty )* { $( $impl:tt )* } $($rest:tt)* ) => { @@ -2906,8 +2906,8 @@ macro_rules! decl_module { impl<$trait_instance: $trait_name $(, $instance: $instantiable)?> $crate::traits::UnfilteredDispatchable for $call_type<$trait_instance $(, $instance)?> where $( $other_where_bounds )* { - type Origin = $origin_type; - fn dispatch_bypass_filter(self, _origin: Self::Origin) -> $crate::dispatch::DispatchResultWithPostInfo { + type RuntimeOrigin = $origin_type; + fn dispatch_bypass_filter(self, _origin: Self::RuntimeOrigin) -> $crate::dispatch::DispatchResultWithPostInfo { match self { $( $call_type::$fn_name { $( $param_name ),* } => { @@ -3200,7 +3200,7 @@ mod tests { type AccountId; type RuntimeCall; type BaseCallFilter; - type Origin: crate::traits::OriginTrait; + type RuntimeOrigin: crate::traits::OriginTrait; type BlockNumber: Into; type PalletInfo: crate::traits::PalletInfo; type DbWeight: Get; @@ -3212,7 +3212,7 @@ mod tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = system, T::AccountId: From { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system = system, T::AccountId: From { /// Hi, this is a comment. #[weight = 0] fn aux_0(_origin) -> DispatchResult { unreachable!() } @@ -3347,7 +3347,7 @@ mod tests { } impl system::Config for TraitImpl { - type Origin = OuterOrigin; + type RuntimeOrigin = OuterOrigin; type AccountId = u32; type RuntimeCall = (); type BaseCallFilter = frame_support::traits::Everything; @@ -3500,7 +3500,7 @@ mod weight_tests { use sp_weights::RuntimeDbWeight; pub trait Config: 'static { - type Origin; + type RuntimeOrigin; type Balance; type BlockNumber; type DbWeight: Get; @@ -3517,7 +3517,7 @@ mod weight_tests { } impl Config for TraitImpl { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type Balance = u32; type DbWeight = DbWeight; @@ -3525,7 +3525,7 @@ mod weight_tests { } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { // no arguments, fixed weight #[weight = 1000] fn f00(_origin) { unimplemented!(); } diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index 0ffe4334e2e30..337bd75895c2c 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -51,7 +51,7 @@ pub use sp_runtime::traits::{BadOrigin, LookupError}; /// // exported in the metadata. /// /// decl_module! { -/// pub struct Module for enum Call where origin: T::Origin { +/// pub struct Module for enum Call where origin: T::RuntimeOrigin { /// type Error = MyError; /// /// #[weight = 0] diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 31b95de5f413d..57f29e592b242 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -845,7 +845,7 @@ pub mod tests { pub trait Config: 'static { type BlockNumber: Codec + EncodeLike + Default + TypeInfo; - type Origin; + type RuntimeOrigin; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } @@ -856,7 +856,7 @@ pub mod tests { use super::Config; decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } } @@ -888,7 +888,7 @@ pub mod tests { impl Config for Test { type BlockNumber = u32; - type Origin = u32; + type RuntimeOrigin = u32; type PalletInfo = PanicPalletInfo; type DbWeight = (); } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 4ffe32651a9cc..c95dcee9d7e5c 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -512,14 +512,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type Origin; + type RuntimeOrigin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index d190145ea4c00..f6c8eaa270bb3 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -354,14 +354,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type Origin; + type RuntimeOrigin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index ca893f44b3cb0..334e9b9e24e86 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -47,21 +47,21 @@ mod tests { struct Runtime; pub trait Config: 'static { - type Origin; + type RuntimeOrigin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } impl Config for Runtime { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = crate::tests::PanicPalletInfo; type DbWeight = (); } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } crate::decl_storage! { diff --git a/frame/support/src/storage/generator/nmap.rs b/frame/support/src/storage/generator/nmap.rs index f1d0f9a5f0801..79f3d72044e28 100755 --- a/frame/support/src/storage/generator/nmap.rs +++ b/frame/support/src/storage/generator/nmap.rs @@ -460,14 +460,14 @@ mod test_iterators { use codec::{Decode, Encode}; pub trait Config: 'static { - type Origin; + type RuntimeOrigin; type BlockNumber; type PalletInfo: crate::traits::PalletInfo; type DbWeight: crate::traits::Get; } crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } #[derive(PartialEq, Eq, Clone, Encode, Decode)] diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index afc819aa454e5..c0e7e32a5529e 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -229,14 +229,14 @@ impl< /// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by /// `construct_runtime`. pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). - type Origin; + /// The origin type of the runtime, (i.e. `frame_system::Config::RuntimeOrigin`). + type RuntimeOrigin; /// Dispatch this call but do not check the filter in origin. - fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; + fn dispatch_bypass_filter(self, origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo; } -/// Methods available on `frame_system::Config::Origin`. +/// Methods available on `frame_system::Config::RuntimeOrigin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` type Call; diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index 39ebbb78321d6..0dbbbd9e2a553 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -132,7 +132,7 @@ pub mod v1 { use super::*; /// A type that can be used as a scheduler. - pub trait Anon { + pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug + TypeInfo + MaxEncodedLen; @@ -143,7 +143,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: Call, ) -> Result; @@ -177,7 +177,7 @@ pub mod v1 { } /// A type that can be used as a scheduler. - pub trait Named { + pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; @@ -189,7 +189,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: Call, ) -> Result; @@ -215,9 +215,9 @@ pub mod v1 { fn next_dispatch_time(id: Vec) -> Result; } - impl Anon for T + impl Anon for T where - T: v2::Anon, + T: v2::Anon, { type Address = T::Address; @@ -225,7 +225,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: Call, ) -> Result { let c = MaybeHashed::::Value(call); @@ -248,9 +248,9 @@ pub mod v1 { } } - impl Named for T + impl Named for T where - T: v2::Named, + T: v2::Named, { type Address = T::Address; @@ -259,7 +259,7 @@ pub mod v1 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: Call, ) -> Result { let c = MaybeHashed::::Value(call); @@ -287,7 +287,7 @@ pub mod v2 { use super::*; /// A type that can be used as a scheduler. - pub trait Anon { + pub trait Anon { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + Debug + TypeInfo + MaxEncodedLen; /// A means of expressing a call by the hash of its encoded data. @@ -300,7 +300,7 @@ pub mod v2 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: MaybeHashed, ) -> Result; @@ -334,7 +334,7 @@ pub mod v2 { } /// A type that can be used as a scheduler. - pub trait Named { + pub trait Named { /// An address which can be used for removing a scheduled task. type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug + MaxEncodedLen; /// A means of expressing a call by the hash of its encoded data. @@ -348,7 +348,7 @@ pub mod v2 { when: DispatchTime, maybe_periodic: Option>, priority: Priority, - origin: Origin, + origin: RuntimeOrigin, call: MaybeHashed, ) -> Result; diff --git a/frame/support/test/compile_pass/src/lib.rs b/frame/support/test/compile_pass/src/lib.rs index 6e4d2363b2f7c..b46f6c48a6d99 100644 --- a/frame/support/test/compile_pass/src/lib.rs +++ b/frame/support/test/compile_pass/src/lib.rs @@ -65,7 +65,7 @@ impl frame_system::Config for Runtime { type BlockHashCount = ConstU64<2400>; type Version = Version; type AccountData = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type BlockNumber = BlockNumber; type AccountId = AccountId; type RuntimeEvent = RuntimeEvent; diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index dd3fbd1f3020d..0ceeed42ff982 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -25,7 +25,7 @@ /// The configuration trait pub trait Config: 'static { /// The runtime origin type. - type Origin: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; + type RuntimeOrigin: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; /// The block number type. type BlockNumber: codec::Codec + codec::EncodeLike + Default + scale_info::TypeInfo; /// The information about the pallet setup in the runtime. @@ -36,7 +36,7 @@ pub trait Config: 'static { frame_support::decl_module! { /// Some test module - pub struct Module for enum Call where origin: T::Origin, system=self {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self {} } /// A PalletInfo implementation which just panics. diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 4306314b4c005..b1ace12936241 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -49,7 +49,7 @@ mod module1 { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -89,7 +89,7 @@ mod module2 { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -134,7 +134,7 @@ mod nested { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -180,7 +180,7 @@ pub mod module3 { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -247,7 +247,7 @@ fn test_pub() -> AccountId { impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type BlockNumber = BlockNumber; type AccountId = AccountId; type RuntimeEvent = RuntimeEvent; diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs index fd6b51db07672..2ca9676406579 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.rs @@ -12,7 +12,7 @@ impl test_pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs index 706d444f23590..5691549c20f34 100644 --- a/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs +++ b/frame/support/test/tests/construct_runtime_ui/old_unsupported_pallet_decl.rs @@ -8,7 +8,7 @@ mod pallet_old { } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::RuntimeOrigin {} } } diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index f73c611d29686..b7ccadb5e3e58 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -46,7 +46,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index f90af88d23a9b..abe5c4cfeb343 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index 60d4b166c41fd..9a6ac5c6251fb 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index 5f7beed686339..4facf85e280c0 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index fb866c161ccb8..322fa2c297285 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 26ca5d6eb4cb0..55cd5b545d6ba 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index c692cd61bae8b..b93a2a92911ea 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -46,39 +46,6 @@ help: if you import `Origin`, refer to it directly 58 - } | -error[E0412]: cannot find type `Origin` in module `pallet` - --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 - | -49 | / construct_runtime! { -50 | | pub enum Runtime where -51 | | Block = Block, -52 | | NodeBlock = Block, -... | -57 | | } -58 | | } - | |_^ not found in `pallet` - | - = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) -help: consider importing one of these items - | -1 | use crate::Origin; - | -1 | use frame_system::Origin; - | -help: if you import `Origin`, refer to it directly - | -49 - construct_runtime! { -50 - pub enum Runtime where -51 - Block = Block, -52 - NodeBlock = Block, -53 - UncheckedExtrinsic = UncheckedExtrinsic -54 - { -55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 - Pallet: pallet::{Pallet, Origin}, -57 - } -58 - } - | - error[E0282]: type annotations needed --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 81c040c660451..0cf305a7dc055 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -21,7 +21,7 @@ impl pallet::Config for Runtime {} impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs index cc7c1ff219d8b..0f662b96e2b13 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { fn integrity_test() {} fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 880695d9b77e2..47ff1c8af8cd2 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -2,7 +2,7 @@ error: `integrity_test` can only be passed once as input. --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { 3 | | fn integrity_test() {} 4 | | 5 | | fn integrity_test() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index 18aaec12c5f39..ea0746b1c501c 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -1,5 +1,5 @@ frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { fn on_initialize() -> Weight { 0 } diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 369be77b8d249..bfefadee99403 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -2,7 +2,7 @@ error: `on_initialize` can only be passed once as input. --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { +2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { 3 | | fn on_initialize() -> Weight { 4 | | 0 ... | diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index 7ce43cd5d44d1..a4e420eb454b6 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -23,7 +23,7 @@ mod tests { use sp_io::TestExternalities; frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } pub trait Config: frame_support_test::Config { @@ -94,7 +94,7 @@ mod tests { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -617,7 +617,7 @@ mod test2 { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } type PairOf = (T, T); @@ -639,7 +639,7 @@ mod test2 { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -695,7 +695,7 @@ mod test3 { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage! { trait Store for Module as Test { @@ -708,7 +708,7 @@ mod test3 { struct TraitImpl {} impl frame_support_test::Config for TraitImpl { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); @@ -726,7 +726,7 @@ mod test_append_and_len { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } #[derive(PartialEq, Eq, Clone, Encode, Decode, scale_info::TypeInfo)] @@ -753,7 +753,7 @@ mod test_append_and_len { struct Test {} impl frame_support_test::Config for Test { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); diff --git a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs index db2cdbdc65492..af74823f64ca6 100644 --- a/frame/support/test/tests/decl_storage_ui/config_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs index b804bf8980383..8805effd9e725 100644 --- a/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/config_get_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs index bc03ff6b4a4f9..5e05887110537 100644 --- a/frame/support/test/tests/decl_storage_ui/get_duplicate.rs +++ b/frame/support/test/tests/decl_storage_ui/get_duplicate.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage!{ diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index c1723c6ad7a1b..a7ad88c801001 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -28,7 +28,7 @@ mod no_instance { pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage! { @@ -53,7 +53,7 @@ mod instance { frame_support::decl_module! { pub struct Module, I: Instance = DefaultInstance> - for enum Call where origin: T::Origin, system=frame_support_test {} + for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage! { diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index 367c7236d0158..f0094b3188ae1 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -18,7 +18,7 @@ pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test {} } frame_support::decl_storage! { @@ -31,7 +31,7 @@ struct Test; impl frame_support_test::Config for Test { type BlockNumber = u32; - type Origin = (); + type RuntimeOrigin = (); type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index c9eebdaff6bc4..043959b67ee6e 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -51,14 +51,14 @@ mod module1 { ::BlockNumber: From, { type RuntimeEvent: From> + Into<::RuntimeEvent>; - type Origin: From>; + type RuntimeOrigin: From>; type SomeParameter: Get; type GenericType: Default + Clone + Codec + EncodeLike + TypeInfo; } frame_support::decl_module! { pub struct Module, I: Instance> for enum Call where - origin: ::Origin, + origin: ::RuntimeOrigin, system = system, T::BlockNumber: From { @@ -155,14 +155,14 @@ mod module2 { pub trait Config: system::Config { type Amount: Parameter + Default; type RuntimeEvent: From> + Into<::RuntimeEvent>; - type Origin: From>; + type RuntimeOrigin: From>; } impl, I: Instance> Currency for Module {} frame_support::decl_module! { pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin, + origin: ::RuntimeOrigin, system = system { fn deposit_event() = default; @@ -228,41 +228,41 @@ mod module3 { } frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin, system=system {} + pub struct Module for enum Call where origin: ::RuntimeOrigin, system=system {} } } impl module1::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module1::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type SomeParameter = ConstU32<100>; type GenericType = u32; } impl module2::Config for Runtime { type Amount = u16; type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; } impl module2::Config for Runtime { type Amount = u32; type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; } impl module2::Config for Runtime { type Amount = u32; type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; } impl module2::Config for Runtime { type Amount = u64; type RuntimeEvent = RuntimeEvent; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; } impl module3::Config for Runtime { type Currency = Module2_2; @@ -277,7 +277,7 @@ pub type Index = u64; impl system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type Hash = H256; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type BlockNumber = BlockNumber; type AccountId = AccountId; type RuntimeEvent = RuntimeEvent; diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 0b24a6aa89828..5d24d54165c1f 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -86,7 +86,7 @@ mod module { pub trait Config: system::Config + TypeInfo {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=system {} + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=system {} } #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] @@ -160,7 +160,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -80,7 +80,7 @@ pub mod module { frame_support::decl_module! { pub struct Module for enum Call - where origin: ::Origin, system=system + where origin: ::RuntimeOrigin, system=system { #[weight = 0] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { @@ -146,7 +146,7 @@ impl Contains for BaseCallFilter { impl system::Config for RuntimeOriginTest { type BaseCallFilter = BaseCallFilter; type Hash = H256; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type BlockNumber = BlockNumber; type AccountId = u32; type RuntimeEvent = RuntimeEvent; @@ -178,40 +178,40 @@ fn origin_default_filter() { let accepted_call = nested::module::Call::fail {}.into(); let rejected_call = module::Call::fail {}.into(); - assert_eq!(Origin::root().filter_call(&accepted_call), true); - assert_eq!(Origin::root().filter_call(&rejected_call), true); - assert_eq!(Origin::none().filter_call(&accepted_call), true); - assert_eq!(Origin::none().filter_call(&rejected_call), false); - assert_eq!(Origin::signed(0).filter_call(&accepted_call), true); - assert_eq!(Origin::signed(0).filter_call(&rejected_call), false); - assert_eq!(Origin::from(Some(0)).filter_call(&accepted_call), true); - assert_eq!(Origin::from(Some(0)).filter_call(&rejected_call), false); - assert_eq!(Origin::from(None).filter_call(&accepted_call), true); - assert_eq!(Origin::from(None).filter_call(&rejected_call), false); - assert_eq!(Origin::from(nested::module::Origin).filter_call(&accepted_call), true); - assert_eq!(Origin::from(nested::module::Origin).filter_call(&rejected_call), false); - - let mut origin = Origin::from(Some(0)); + assert_eq!(RuntimeOrigin::root().filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::root().filter_call(&rejected_call), true); + assert_eq!(RuntimeOrigin::none().filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::none().filter_call(&rejected_call), false); + assert_eq!(RuntimeOrigin::signed(0).filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::signed(0).filter_call(&rejected_call), false); + assert_eq!(RuntimeOrigin::from(Some(0)).filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::from(Some(0)).filter_call(&rejected_call), false); + assert_eq!(RuntimeOrigin::from(None).filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::from(None).filter_call(&rejected_call), false); + assert_eq!(RuntimeOrigin::from(nested::module::Origin).filter_call(&accepted_call), true); + assert_eq!(RuntimeOrigin::from(nested::module::Origin).filter_call(&rejected_call), false); + + let mut origin = RuntimeOrigin::from(Some(0)); origin.add_filter(|c| matches!(c, RuntimeCall::Module(_))); assert_eq!(origin.filter_call(&accepted_call), false); assert_eq!(origin.filter_call(&rejected_call), false); // Now test for root origin and filters: - let mut origin = Origin::from(Some(0)); - origin.set_caller_from(Origin::root()); + let mut origin = RuntimeOrigin::from(Some(0)); + origin.set_caller_from(RuntimeOrigin::root()); assert!(matches!(origin.caller, OriginCaller::system(system::RawOrigin::Root))); // Root origin bypass all filter. assert_eq!(origin.filter_call(&accepted_call), true); assert_eq!(origin.filter_call(&rejected_call), true); - origin.set_caller_from(Origin::from(Some(0))); + origin.set_caller_from(RuntimeOrigin::from(Some(0))); // Back to another signed origin, the filtered are now effective again assert_eq!(origin.filter_call(&accepted_call), true); assert_eq!(origin.filter_call(&rejected_call), false); - origin.set_caller_from(Origin::root()); + origin.set_caller_from(RuntimeOrigin::root()); origin.reset_filter(); // Root origin bypass all filter, even when they are reset. diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 4304aa1533a8b..05c1f14601862 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -552,8 +552,10 @@ pub mod pallet2 { #[frame_support::pallet] pub mod pallet3 { #[pallet::config] - pub trait Config: frame_system::Config::Origin> { - type Origin; + pub trait Config: + frame_system::Config::RuntimeOrigin> + { + type RuntimeOrigin; } #[pallet::pallet] @@ -578,7 +580,7 @@ frame_support::parameter_types!( impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; @@ -618,7 +620,7 @@ impl pallet4::Config for Runtime {} #[cfg(feature = "frame-feature-testing")] impl pallet3::Config for Runtime { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; } pub type Header = sp_runtime::generic::Header; diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 2342f25fb98b9..398137d644ee4 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -70,7 +70,7 @@ mod pallet_old { ); decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::RuntimeOrigin { type Error = Error; fn deposit_event() = default; const SomeConst: T::Balance = T::SomeConst::get(); @@ -226,7 +226,7 @@ pub mod pallet { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index b89ab410621e5..e8b5fe9fa33d4 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -56,7 +56,7 @@ mod pallet_old { decl_module! { pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: T::Origin + where origin: T::RuntimeOrigin { type Error = Error; fn deposit_event() = default; @@ -210,7 +210,7 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 3bbbc559ff9b8..7e05e2ecf783b 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -286,7 +286,7 @@ pub mod pallet2 { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 788e852ba58e9..0066420566fe8 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -43,7 +43,7 @@ frame_support::decl_error!( ); frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::RuntimeOrigin { fn deposit_event() = default; type Error = Error; const Foo: u32 = u32::MAX; @@ -126,7 +126,7 @@ mod tests { impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = sp_core::H256; diff --git a/frame/support/test/tests/storage_layers.rs b/frame/support/test/tests/storage_layers.rs index a3e512302fe4f..6fbbb8ac67bd7 100644 --- a/frame/support/test/tests/storage_layers.rs +++ b/frame/support/test/tests/storage_layers.rs @@ -59,7 +59,7 @@ pub mod decl_pallet { pub trait Config: frame_system::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::RuntimeOrigin { #[weight = 0] pub fn set_value(_origin, value: u32) { DeclValue::put(value); @@ -86,7 +86,7 @@ impl frame_system::Config for Runtime { type BlockLength = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u32; type RuntimeCall = RuntimeCall; @@ -256,11 +256,11 @@ fn storage_layer_in_pallet_call() { TestExternalities::default().execute_with(|| { use sp_runtime::traits::Dispatchable; let call1 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 2 }); - assert_ok!(call1.dispatch(Origin::signed(0))); + assert_ok!(call1.dispatch(RuntimeOrigin::signed(0))); assert_eq!(Value::::get(), 2); let call2 = RuntimeCall::MyPallet(pallet::Call::set_value { value: 1 }); - assert_noop!(call2.dispatch(Origin::signed(0)), Error::::Revert); + assert_noop!(call2.dispatch(RuntimeOrigin::signed(0)), Error::::Revert); }); } @@ -271,12 +271,15 @@ fn storage_layer_in_decl_pallet_call() { use sp_runtime::traits::Dispatchable; let call1 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 2 }); - assert_ok!(call1.dispatch(Origin::signed(0))); + assert_ok!(call1.dispatch(RuntimeOrigin::signed(0))); assert_eq!(decl_pallet::DeclValue::get(), 2); let call2 = RuntimeCall::DeclPallet(decl_pallet::Call::set_value { value: 1 }); - assert_noop!(call2.dispatch(Origin::signed(0)), "Revert!"); + assert_noop!(call2.dispatch(RuntimeOrigin::signed(0)), "Revert!"); // Calling the function directly also works with storage layers. - assert_noop!(decl_pallet::Module::::set_value(Origin::signed(1), 1), "Revert!"); + assert_noop!( + decl_pallet::Module::::set_value(RuntimeOrigin::signed(1), 1), + "Revert!" + ); }); } diff --git a/frame/support/test/tests/storage_transaction.rs b/frame/support/test/tests/storage_transaction.rs index 9e597969d6c89..6fedd75019e37 100644 --- a/frame/support/test/tests/storage_transaction.rs +++ b/frame/support/test/tests/storage_transaction.rs @@ -31,7 +31,7 @@ use sp_std::result; pub trait Config: frame_support_test::Config {} frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=frame_support_test { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=frame_support_test { #[weight = 0] #[transactional] fn value_commits(_origin, v: u32) { @@ -57,7 +57,7 @@ frame_support::decl_storage! { struct Runtime; impl frame_support_test::Config for Runtime { - type Origin = u32; + type RuntimeOrigin = u32; type BlockNumber = u32; type PalletInfo = frame_support_test::PanicPalletInfo; type DbWeight = (); diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index 9736c74dc236d..eff41242917a0 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -22,7 +22,7 @@ use frame_support::{ }; pub trait Config: 'static + Eq + Clone { - type Origin: Into, Self::Origin>> + type RuntimeOrigin: Into, Self::RuntimeOrigin>> + From>; type BaseCallFilter: frame_support::traits::Contains; @@ -36,7 +36,7 @@ pub trait Config: 'static + Eq + Clone { } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin, system=self { + pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { #[weight = 0] fn noop(_origin) {} } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 03d3a65812d56..0881b81eaca7d 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -76,7 +76,7 @@ impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = BlockLength; type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 1c0a7efa03735..a7f28ca30fe87 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -43,7 +43,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = AccountIndex; type BlockNumber = BlockNumber; type RuntimeCall = RuntimeCall; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index f60e3e7349d6b..36360f8fae2c2 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -215,8 +215,8 @@ pub mod pallet { #[pallet::constant] type BlockLength: Get; - /// The `Origin` type used by dispatchable calls. - type Origin: Into, Self::Origin>> + /// The `RuntimeOrigin` type used by dispatchable calls. + type RuntimeOrigin: Into, Self::RuntimeOrigin>> + From> + Clone + OriginTrait; @@ -1701,7 +1701,7 @@ pub mod pallet_prelude { pub use crate::{ensure_none, ensure_root, ensure_signed, ensure_signed_or_root}; /// Type alias for the `Origin` associated type of system config. - pub type OriginFor = ::Origin; + pub type OriginFor = ::RuntimeOrigin; /// Type alias for the `BlockNumber` associated type of system config. pub type BlockNumberFor = ::BlockNumber; diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index fdaaceaf8abe0..1c0511787eb76 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -93,7 +93,7 @@ impl Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index b6fdda8d9cbe2..92563f4ad1747 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -20,7 +20,7 @@ use frame_support::{ assert_noop, assert_ok, dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, }; -use mock::{Origin, *}; +use mock::{RuntimeOrigin, *}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, Header}, @@ -29,8 +29,8 @@ use sp_runtime::{ #[test] fn origin_works() { - let o = Origin::from(RawOrigin::::Signed(1u64)); - let x: Result, Origin> = o.into(); + let o = RuntimeOrigin::from(RawOrigin::::Signed(1u64)); + let x: Result, RuntimeOrigin> = o.into(); assert_eq!(x.unwrap(), RawOrigin::::Signed(1u64)); } @@ -638,16 +638,16 @@ fn ensure_signed_stuff_works() { } } - let signed_origin = Origin::signed(0u64); + let signed_origin = RuntimeOrigin::signed(0u64); assert_ok!(EnsureSigned::try_origin(signed_origin.clone())); assert_ok!(EnsureSignedBy::::try_origin(signed_origin)); #[cfg(feature = "runtime-benchmarks")] { - let successful_origin: Origin = EnsureSigned::successful_origin(); + let successful_origin: RuntimeOrigin = EnsureSigned::successful_origin(); assert_ok!(EnsureSigned::try_origin(successful_origin)); - let successful_origin: Origin = EnsureSignedBy::::successful_origin(); + let successful_origin: RuntimeOrigin = EnsureSignedBy::::successful_origin(); assert_ok!(EnsureSignedBy::::try_origin(successful_origin)); } } diff --git a/frame/timestamp/src/mock.rs b/frame/timestamp/src/mock.rs index 7d3da0e8a0b83..2208510f24fe5 100644 --- a/frame/timestamp/src/mock.rs +++ b/frame/timestamp/src/mock.rs @@ -55,7 +55,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/timestamp/src/tests.rs b/frame/timestamp/src/tests.rs index ef9fd6e39d4b5..6a76fbc4820e6 100644 --- a/frame/timestamp/src/tests.rs +++ b/frame/timestamp/src/tests.rs @@ -24,7 +24,7 @@ use frame_support::assert_ok; fn timestamp_works() { new_test_ext().execute_with(|| { crate::Now::::put(46); - assert_ok!(Timestamp::set(Origin::none(), 69)); + assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); assert_eq!(Timestamp::now(), 69); assert_eq!(Some(69), get_captured_moment()); }); @@ -35,7 +35,7 @@ fn timestamp_works() { fn double_timestamp_should_fail() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); - assert_ok!(Timestamp::set(Origin::none(), 69)); + assert_ok!(Timestamp::set(RuntimeOrigin::none(), 69)); }); } @@ -46,6 +46,6 @@ fn double_timestamp_should_fail() { fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { crate::Now::::put(44); - let _ = Timestamp::set(Origin::none(), 46); + let _ = Timestamp::set(RuntimeOrigin::none(), 46); }); } diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 4956e2a095688..312424e5799ec 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -197,7 +197,7 @@ benchmarks_instance_pallet! { let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, hash) + }: _(reject_origin, hash) impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); } diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index 9b721bd77a8bc..cb0b4458c7fba 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -65,7 +65,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -230,9 +230,9 @@ fn tip_hash() -> H256 { fn tip_new_cannot_be_used_twice() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); assert_noop!( - Tips::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Tips::tip_new(RuntimeOrigin::signed(11), b"awesome.dot".to_vec(), 3, 10), Error::::AlreadyKnown ); }); @@ -242,23 +242,23 @@ fn tip_new_cannot_be_used_twice() { fn report_awesome_and_tip_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); // other reports don't count. assert_noop!( - Tips::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Tips::report_awesome(RuntimeOrigin::signed(1), b"awesome.dot".to_vec(), 3), Error::::AlreadyKnown ); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(10), h, 10)); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips::tip(Origin::signed(12), h, 10)); - assert_noop!(Tips::tip(Origin::signed(9), h, 10), BadOrigin); + assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); + assert_noop!(Tips::tip(RuntimeOrigin::signed(9), h, 10), BadOrigin); System::set_block_number(2); - assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); + assert_ok!(Tips::close_tip(RuntimeOrigin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 102); assert_eq!(Balances::free_balance(3), 8); @@ -269,15 +269,15 @@ fn report_awesome_and_tip_works() { fn report_awesome_from_beneficiary_and_tip_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 0)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u128)); - assert_ok!(Tips::tip(Origin::signed(10), h, 10)); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); System::set_block_number(2); - assert_ok!(Tips::close_tip(Origin::signed(100), h.into())); + assert_ok!(Tips::close_tip(RuntimeOrigin::signed(100), h.into())); assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 110); }); @@ -291,30 +291,33 @@ fn close_tip_works() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); - assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + assert_noop!(Tips::close_tip(RuntimeOrigin::signed(0), h.into()), Error::::StillOpen); - assert_ok!(Tips::tip(Origin::signed(12), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); assert_eq!(last_event(), TipEvent::TipClosing { tip_hash: h }); - assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::Premature); + assert_noop!(Tips::close_tip(RuntimeOrigin::signed(0), h.into()), Error::::Premature); System::set_block_number(2); - assert_noop!(Tips::close_tip(Origin::none(), h.into()), BadOrigin); - assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_noop!(Tips::close_tip(RuntimeOrigin::none(), h.into()), BadOrigin); + assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); assert_eq!(last_event(), TipEvent::TipClosed { tip_hash: h, who: 3, payout: 10 }); - assert_noop!(Tips::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + assert_noop!( + Tips::close_tip(RuntimeOrigin::signed(100), h.into()), + Error::::UnknownTip + ); }); } @@ -328,7 +331,7 @@ fn slash_tip_works() { assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100); - assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); assert_eq!(Balances::reserved_balance(0), 12); assert_eq!(Balances::free_balance(0), 88); @@ -337,10 +340,10 @@ fn slash_tip_works() { assert_eq!(last_event(), TipEvent::NewTip { tip_hash: h }); // can't remove from any origin - assert_noop!(Tips::slash_tip(Origin::signed(0), h), BadOrigin); + assert_noop!(Tips::slash_tip(RuntimeOrigin::signed(0), h), BadOrigin); // can remove from root. - assert_ok!(Tips::slash_tip(Origin::root(), h)); + assert_ok!(Tips::slash_tip(RuntimeOrigin::root(), h)); assert_eq!(last_event(), TipEvent::TipSlashed { tip_hash: h, finder: 0, deposit: 12 }); // tipper slashed @@ -354,26 +357,32 @@ fn retract_tip_works() { new_test_ext().execute_with(|| { // with report awesome Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(10), h, 10)); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips::tip(Origin::signed(12), h, 10)); - assert_noop!(Tips::retract_tip(Origin::signed(10), h), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(0), h)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(RuntimeOrigin::signed(10), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(RuntimeOrigin::signed(0), h)); System::set_block_number(2); - assert_noop!(Tips::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + assert_noop!( + Tips::close_tip(RuntimeOrigin::signed(0), h.into()), + Error::::UnknownTip + ); // with tip new Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips::tip(Origin::signed(12), h, 10)); - assert_noop!(Tips::retract_tip(Origin::signed(0), h), Error::::NotFinder); - assert_ok!(Tips::retract_tip(Origin::signed(10), h)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10)); + assert_noop!(Tips::retract_tip(RuntimeOrigin::signed(0), h), Error::::NotFinder); + assert_ok!(Tips::retract_tip(RuntimeOrigin::signed(10), h)); System::set_block_number(2); - assert_noop!(Tips::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + assert_noop!( + Tips::close_tip(RuntimeOrigin::signed(10), h.into()), + Error::::UnknownTip + ); }); } @@ -381,12 +390,12 @@ fn retract_tip_works() { fn tip_median_calculation_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 0)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips::tip(Origin::signed(12), h, 1000000)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 1000000)); System::set_block_number(2); - assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); }); } @@ -395,17 +404,17 @@ fn tip_median_calculation_works() { fn tip_changing_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Tips::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + assert_ok!(Tips::tip_new(RuntimeOrigin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); let h = tip_hash(); - assert_ok!(Tips::tip(Origin::signed(11), h, 10000)); - assert_ok!(Tips::tip(Origin::signed(12), h, 10000)); - assert_ok!(Tips::tip(Origin::signed(13), h, 0)); - assert_ok!(Tips::tip(Origin::signed(14), h, 0)); - assert_ok!(Tips::tip(Origin::signed(12), h, 1000)); - assert_ok!(Tips::tip(Origin::signed(11), h, 100)); - assert_ok!(Tips::tip(Origin::signed(10), h, 10)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 10000)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 10000)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(13), h, 0)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(14), h, 0)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(12), h, 1000)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(11), h, 100)); + assert_ok!(Tips::tip(RuntimeOrigin::signed(10), h, 10)); System::set_block_number(2); - assert_ok!(Tips::close_tip(Origin::signed(0), h.into())); + assert_ok!(Tips::close_tip(RuntimeOrigin::signed(0), h.into())); assert_eq!(Balances::free_balance(3), 10); }); } @@ -582,24 +591,24 @@ fn report_awesome_and_tip_works_second_instance() { assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); assert_eq!(Balances::free_balance(&Treasury1::account_id()), 201); - assert_ok!(Tips1::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips1::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); // duplicate report in tips1 reports don't count. assert_noop!( - Tips1::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Tips1::report_awesome(RuntimeOrigin::signed(1), b"awesome.dot".to_vec(), 3), Error::::AlreadyKnown ); // but tips is separate - assert_ok!(Tips::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_ok!(Tips::report_awesome(RuntimeOrigin::signed(0), b"awesome.dot".to_vec(), 3)); let h = tip_hash(); - assert_ok!(Tips1::tip(Origin::signed(10), h, 10)); - assert_ok!(Tips1::tip(Origin::signed(11), h, 10)); - assert_ok!(Tips1::tip(Origin::signed(12), h, 10)); - assert_noop!(Tips1::tip(Origin::signed(9), h, 10), BadOrigin); + assert_ok!(Tips1::tip(RuntimeOrigin::signed(10), h, 10)); + assert_ok!(Tips1::tip(RuntimeOrigin::signed(11), h, 10)); + assert_ok!(Tips1::tip(RuntimeOrigin::signed(12), h, 10)); + assert_noop!(Tips1::tip(RuntimeOrigin::signed(9), h, 10), BadOrigin); System::set_block_number(2); - assert_ok!(Tips1::close_tip(Origin::signed(100), h.into())); + assert_ok!(Tips1::close_tip(RuntimeOrigin::signed(100), h.into())); // Treasury 1 unchanged assert_eq!(Balances::free_balance(&Treasury::account_id()), 101); // Treasury 2 gave the funds diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index 1f6113aabdddd..cdf7d17898145 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -87,7 +87,7 @@ impl frame_system::Config for Runtime { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -338,7 +338,7 @@ fn transaction_payment_in_asset_possible() { let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -391,7 +391,7 @@ fn transaction_payment_without_fee() { let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -444,7 +444,7 @@ fn asset_transaction_payment_with_tip_and_refund() { let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -496,7 +496,7 @@ fn payment_from_account_with_only_assets() { let asset_id = 1; let min_balance = 2; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -555,7 +555,7 @@ fn payment_only_with_existing_sufficient_asset() { // create the non-sufficient asset let min_balance = 2; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ false, /* is_sufficient */ @@ -580,7 +580,7 @@ fn converted_fee_is_never_zero_if_input_fee_is_not() { let asset_id = 1; let min_balance = 1; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -645,7 +645,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { let asset_id = 1; let min_balance = 100; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ @@ -702,7 +702,7 @@ fn post_dispatch_fee_is_zero_if_unsigned_pre_dispatch_fee_is_zero() { let asset_id = 1; let min_balance = 100; assert_ok!(Assets::force_create( - Origin::root(), + RuntimeOrigin::root(), asset_id, 42, /* owner */ true, /* is_sufficient */ diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 0447ea6bfd99d..1ad6a2b3b3b6f 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -906,7 +906,7 @@ mod tests { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index e623079ac863b..07144c5617113 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -95,7 +95,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// A dispatchable call. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From>; /// The currency trait. diff --git a/frame/transaction-storage/src/mock.rs b/frame/transaction-storage/src/mock.rs index 1cb725e63e83b..8764b16c31d8d 100644 --- a/frame/transaction-storage/src/mock.rs +++ b/frame/transaction-storage/src/mock.rs @@ -51,7 +51,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; @@ -116,7 +116,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pub fn run_to_block(n: u64, f: impl Fn() -> Option) { while System::block_number() < n { if let Some(proof) = f() { - TransactionStorage::check_proof(Origin::none(), proof).unwrap(); + TransactionStorage::check_proof(RuntimeOrigin::none(), proof).unwrap(); } TransactionStorage::on_finalize(System::block_number()); System::on_finalize(System::block_number()); diff --git a/frame/transaction-storage/src/tests.rs b/frame/transaction-storage/src/tests.rs index 4f5ce1c4b654d..01b71a7851ac3 100644 --- a/frame/transaction-storage/src/tests.rs +++ b/frame/transaction-storage/src/tests.rs @@ -95,7 +95,7 @@ fn checks_proof() { let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_noop!( - TransactionStorage::::check_proof(Origin::none(), proof,), + TransactionStorage::::check_proof(RuntimeOrigin::none(), proof,), Error::::UnexpectedProof, ); run_to_block(11, || None); @@ -103,13 +103,13 @@ fn checks_proof() { let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); assert_noop!( - TransactionStorage::::check_proof(Origin::none(), invalid_proof,), + TransactionStorage::::check_proof(RuntimeOrigin::none(), invalid_proof,), Error::::InvalidProof, ); let proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); - assert_ok!(TransactionStorage::::check_proof(Origin::none(), proof)); + assert_ok!(TransactionStorage::::check_proof(RuntimeOrigin::none(), proof)); }); } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index b3787e2063c45..d718a5fb89521 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -100,7 +100,7 @@ benchmarks_instance_pallet! { )?; let proposal_id = Treasury::::proposal_count() - 1; let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, proposal_id) + }: _(reject_origin, proposal_id) approve_proposal { let p in 0 .. T::MaxApprovals::get() - 1; @@ -113,7 +113,7 @@ benchmarks_instance_pallet! { )?; let proposal_id = Treasury::::proposal_count() - 1; let approve_origin = T::ApproveOrigin::successful_origin(); - }: _(approve_origin, proposal_id) + }: _(approve_origin, proposal_id) remove_approval { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); @@ -125,7 +125,7 @@ benchmarks_instance_pallet! { let proposal_id = Treasury::::proposal_count() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; let reject_origin = T::RejectOrigin::successful_origin(); - }: _(reject_origin, proposal_id) + }: _(reject_origin, proposal_id) on_initialize_proposals { let p in 0 .. T::MaxApprovals::get(); diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index bebbcda2effb6..21b4d2b769c8b 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -149,10 +149,10 @@ pub mod pallet { type Currency: Currency + ReservableCurrency; /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; + type ApproveOrigin: EnsureOrigin; /// Origin from which rejections must come. - type RejectOrigin: EnsureOrigin; + type RejectOrigin: EnsureOrigin; /// The overarching event type. type RuntimeEvent: From> @@ -204,7 +204,7 @@ pub mod pallet { /// The origin required for approving spends from the treasury outside of the proposal /// process. The `Success` value is the maximum amount that this origin is allowed to /// spend at a time. - type SpendOrigin: EnsureOrigin>; + type SpendOrigin: EnsureOrigin>; } /// Number of proposals that have been made. diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index af77b419d67d2..9cfe147ec4ce4 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -60,7 +60,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type RuntimeCall = RuntimeCall; @@ -98,21 +98,21 @@ parameter_types! { pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); } pub struct TestSpendOrigin; -impl frame_support::traits::EnsureOrigin for TestSpendOrigin { +impl frame_support::traits::EnsureOrigin for TestSpendOrigin { type Success = u64; - fn try_origin(o: Origin) -> Result { - Result::, Origin>::from(o).and_then(|o| match o { + fn try_origin(o: RuntimeOrigin) -> Result { + Result::, RuntimeOrigin>::from(o).and_then(|o| match o { frame_system::RawOrigin::Root => Ok(u64::max_value()), frame_system::RawOrigin::Signed(10) => Ok(5), frame_system::RawOrigin::Signed(11) => Ok(10), frame_system::RawOrigin::Signed(12) => Ok(20), frame_system::RawOrigin::Signed(13) => Ok(50), - r => Err(Origin::from(r)), + r => Err(RuntimeOrigin::from(r)), }) } #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(Origin::root()) + fn try_successful_origin() -> Result { + Ok(RuntimeOrigin::root()) } } @@ -158,21 +158,21 @@ fn genesis_config_works() { #[test] fn spend_origin_permissioning_works() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::spend(Origin::signed(1), 1, 1), BadOrigin); + assert_noop!(Treasury::spend(RuntimeOrigin::signed(1), 1, 1), BadOrigin); assert_noop!( - Treasury::spend(Origin::signed(10), 6, 1), + Treasury::spend(RuntimeOrigin::signed(10), 6, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(Origin::signed(11), 11, 1), + Treasury::spend(RuntimeOrigin::signed(11), 11, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(Origin::signed(12), 21, 1), + Treasury::spend(RuntimeOrigin::signed(12), 21, 1), Error::::InsufficientPermission ); assert_noop!( - Treasury::spend(Origin::signed(13), 51, 1), + Treasury::spend(RuntimeOrigin::signed(13), 51, 1), Error::::InsufficientPermission ); }); @@ -183,13 +183,13 @@ fn spend_origin_works() { new_test_ext().execute_with(|| { // Check that accumulate works when we have Some value in Dummy already. Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(Origin::signed(10), 5, 6)); - assert_ok!(Treasury::spend(Origin::signed(11), 10, 6)); - assert_ok!(Treasury::spend(Origin::signed(12), 20, 6)); - assert_ok!(Treasury::spend(Origin::signed(13), 50, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(10), 5, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(11), 10, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(12), 20, 6)); + assert_ok!(Treasury::spend(RuntimeOrigin::signed(13), 50, 6)); >::on_initialize(1); assert_eq!(Balances::free_balance(6), 0); @@ -212,7 +212,7 @@ fn minting_works() { #[test] fn spend_proposal_takes_min_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); assert_eq!(Balances::free_balance(0), 99); assert_eq!(Balances::reserved_balance(0), 1); }); @@ -221,7 +221,7 @@ fn spend_proposal_takes_min_deposit() { #[test] fn spend_proposal_takes_proportional_deposit() { new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); assert_eq!(Balances::free_balance(0), 95); assert_eq!(Balances::reserved_balance(0), 5); }); @@ -231,7 +231,7 @@ fn spend_proposal_takes_proportional_deposit() { fn spend_proposal_fails_when_proposer_poor() { new_test_ext().execute_with(|| { assert_noop!( - Treasury::propose_spend(Origin::signed(2), 100, 3), + Treasury::propose_spend(RuntimeOrigin::signed(2), 100, 3), Error::::InsufficientProposersBalance, ); }); @@ -242,8 +242,8 @@ fn accepted_spend_proposal_ignored_outside_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(1); assert_eq!(Balances::free_balance(3), 0); @@ -269,8 +269,8 @@ fn rejected_spend_proposal_ignored_on_spend_period() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 0); @@ -283,23 +283,32 @@ fn reject_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_noop!( + Treasury::reject_proposal(RuntimeOrigin::root(), 0), + Error::::InvalidIndex + ); }); } #[test] fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), Error::::InvalidIndex); + assert_noop!( + Treasury::reject_proposal(RuntimeOrigin::root(), 0), + Error::::InvalidIndex + ); }); } #[test] fn accept_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); + assert_noop!( + Treasury::approve_proposal(RuntimeOrigin::root(), 0), + Error::::InvalidIndex + ); }); } @@ -308,9 +317,12 @@ fn accept_already_rejected_spend_proposal_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::root(), 0)); - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::InvalidIndex); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(RuntimeOrigin::root(), 0)); + assert_noop!( + Treasury::approve_proposal(RuntimeOrigin::root(), 0), + Error::::InvalidIndex + ); }); } @@ -320,8 +332,8 @@ fn accepted_spend_proposal_enacted_on_spend_period() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Balances::free_balance(3), 100); @@ -335,8 +347,8 @@ fn pot_underflow_should_not_diminish() { Balances::make_free_balance_be(&Treasury::account_id(), 101); assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed @@ -357,14 +369,14 @@ fn treasury_account_doesnt_get_deleted() { assert_eq!(Treasury::pot(), 100); let treasury_balance = Balances::free_balance(&Treasury::account_id()); - assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); >::on_initialize(4); assert_eq!(Treasury::pot(), 0); // Pot is emptied @@ -387,10 +399,10 @@ fn inexistent_account_works() { assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist assert_eq!(Treasury::pot(), 0); // Pot is empty - assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 1)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 1)); >::on_initialize(2); assert_eq!(Treasury::pot(), 0); // Pot hasn't changed assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed @@ -432,14 +444,14 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&0, u64::MAX); for _ in 0..::MaxApprovals::get() { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); } // One too many will fail - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); assert_noop!( - Treasury::approve_proposal(Origin::root(), 0), + Treasury::approve_proposal(RuntimeOrigin::root(), 0), Error::::TooManyApprovals ); }); @@ -450,14 +462,14 @@ fn remove_already_removed_approval_fails() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + assert_ok!(Treasury::propose_spend(RuntimeOrigin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![0]); - assert_ok!(Treasury::remove_approval(Origin::root(), 0)); + assert_ok!(Treasury::remove_approval(RuntimeOrigin::root(), 0)); assert_eq!(Treasury::approvals(), vec![]); assert_noop!( - Treasury::remove_approval(Origin::root(), 0), + Treasury::remove_approval(RuntimeOrigin::root(), 0), Error::::ProposalNotApproved ); }); diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index b02a10270dd7d..d519eedd2787f 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -105,12 +105,12 @@ pub mod pallet { /// The origin which may forcibly create or destroy an item or otherwise alter privileged /// attributes. - type ForceOrigin: EnsureOrigin; + type ForceOrigin: EnsureOrigin; /// Standard collection creation is only allowed if the origin attempting it and the /// collection are in this set. type CreateOrigin: EnsureOriginWithArg< - Self::Origin, + Self::RuntimeOrigin, Self::CollectionId, Success = Self::AccountId, >; diff --git a/frame/uniques/src/mock.rs b/frame/uniques/src/mock.rs index 6808c48007a92..d6ed5cc5cc23e 100644 --- a/frame/uniques/src/mock.rs +++ b/frame/uniques/src/mock.rs @@ -49,7 +49,7 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; diff --git a/frame/uniques/src/tests.rs b/frame/uniques/src/tests.rs index 5ac677758316a..7af54ddbb188c 100644 --- a/frame/uniques/src/tests.rs +++ b/frame/uniques/src/tests.rs @@ -92,14 +92,14 @@ fn basic_setup_works() { #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); assert_eq!(items(), vec![(1, 0, 42)]); - assert_ok!(Uniques::force_create(Origin::root(), 1, 2, true)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 1, 2, true)); assert_eq!(collections(), vec![(1, 0), (2, 1)]); - assert_ok!(Uniques::mint(Origin::signed(2), 1, 69, 1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 1, 69, 1)); assert_eq!(items(), vec![(1, 0, 42), (1, 1, 69)]); }); } @@ -108,32 +108,37 @@ fn basic_minting_should_work() { fn lifecycle_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(Balances::reserved_balance(&1), 2); assert_eq!(collections(), vec![(1, 0)]); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0, 0], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0, 0], + false + )); assert_eq!(Balances::reserved_balance(&1), 5); assert!(CollectionMetadataOf::::contains_key(0)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 10)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 10)); assert_eq!(Balances::reserved_balance(&1), 6); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 20)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 69, 20)); assert_eq!(Balances::reserved_balance(&1), 7); assert_eq!(items(), vec![(10, 0, 42), (20, 0, 69)]); assert_eq!(Collection::::get(0).unwrap().items, 2); assert_eq!(Collection::::get(0).unwrap().item_metadatas, 0); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![42, 42], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![42, 42], false)); assert_eq!(Balances::reserved_balance(&1), 10); assert!(ItemMetadataOf::::contains_key(0, 42)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![69, 69], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![69, 69], false)); assert_eq!(Balances::reserved_balance(&1), 13); assert!(ItemMetadataOf::::contains_key(0, 69)); let w = Collection::::get(0).unwrap().destroy_witness(); assert_eq!(w.items, 2); assert_eq!(w.item_metadatas, 2); - assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_ok!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); assert!(!Collection::::contains_key(0)); @@ -151,19 +156,19 @@ fn lifecycle_should_work() { fn destroy_with_bad_witness_should_not_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_noop!(Uniques::destroy(Origin::signed(1), 0, w), Error::::BadWitness); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_noop!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); }); } #[test] fn mint_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); assert_eq!(Uniques::owner(0, 42).unwrap(), 1); assert_eq!(collections(), vec![(1, 0)]); assert_eq!(items(), vec![(1, 0, 42)]); @@ -173,54 +178,66 @@ fn mint_should_work() { #[test] fn transfer_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_eq!(items(), vec![(3, 0, 42)]); - assert_noop!(Uniques::transfer(Origin::signed(2), 0, 42, 4), Error::::NoPermission); + assert_noop!( + Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 4), + Error::::NoPermission + ); - assert_ok!(Uniques::approve_transfer(Origin::signed(3), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 4)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(3), 0, 42, 2)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 4)); }); } #[test] fn freezing_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::freeze(Origin::signed(1), 0, 42)); - assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::freeze(RuntimeOrigin::signed(1), 0, 42)); + assert_noop!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw(Origin::signed(1), 0, 42)); - assert_ok!(Uniques::freeze_collection(Origin::signed(1), 0)); - assert_noop!(Uniques::transfer(Origin::signed(1), 0, 42, 2), Error::::Frozen); + assert_ok!(Uniques::thaw(RuntimeOrigin::signed(1), 0, 42)); + assert_ok!(Uniques::freeze_collection(RuntimeOrigin::signed(1), 0)); + assert_noop!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2), Error::::Frozen); - assert_ok!(Uniques::thaw_collection(Origin::signed(1), 0)); - assert_ok!(Uniques::transfer(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::thaw_collection(RuntimeOrigin::signed(1), 0)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(1), 0, 42, 2)); }); } #[test] fn origin_guards_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); Balances::make_free_balance_be(&2, 100); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); + assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_noop!( + Uniques::transfer_ownership(RuntimeOrigin::signed(2), 0, 2), + Error::::NoPermission + ); + assert_noop!( + Uniques::set_team(RuntimeOrigin::signed(2), 0, 2, 2, 2), + Error::::NoPermission + ); + assert_noop!(Uniques::freeze(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!(Uniques::thaw(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!( + Uniques::mint(RuntimeOrigin::signed(2), 0, 69, 2), + Error::::NoPermission + ); assert_noop!( - Uniques::transfer_ownership(Origin::signed(2), 0, 2), + Uniques::burn(RuntimeOrigin::signed(2), 0, 42, None), Error::::NoPermission ); - assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); - assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); - assert_noop!(Uniques::mint(Origin::signed(2), 0, 69, 2), Error::::NoPermission); - assert_noop!(Uniques::burn(Origin::signed(2), 0, 42, None), Error::::NoPermission); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_noop!(Uniques::destroy(Origin::signed(2), 0, w), Error::::NoPermission); + assert_noop!(Uniques::destroy(RuntimeOrigin::signed(2), 0, w), Error::::NoPermission); }); } @@ -230,14 +247,14 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); Balances::make_free_balance_be(&3, 100); - assert_ok!(Uniques::create(Origin::signed(1), 0, 1)); + assert_ok!(Uniques::create(RuntimeOrigin::signed(1), 0, 1)); assert_eq!(collections(), vec![(1, 0)]); assert_noop!( - Uniques::transfer_ownership(Origin::signed(1), 0, 2), + Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2), Error::::Unaccepted ); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(2), Some(0))); - assert_ok!(Uniques::transfer_ownership(Origin::signed(1), 0, 2)); + assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(2), Some(0))); + assert_ok!(Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 2)); assert_eq!(collections(), vec![(2, 0)]); assert_eq!(Balances::total_balance(&1), 98); @@ -245,18 +262,23 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 2); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(1), Some(0))); + assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(1), Some(0))); assert_noop!( - Uniques::transfer_ownership(Origin::signed(1), 0, 1), + Uniques::transfer_ownership(RuntimeOrigin::signed(1), 0, 1), Error::::NoPermission ); // Mint and set metadata now and make sure that deposit gets transferred back. - assert_ok!(Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false)); - assert_ok!(Uniques::set_accept_ownership(Origin::signed(3), Some(0))); - assert_ok!(Uniques::transfer_ownership(Origin::signed(2), 0, 3)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(2), + 0, + bvec![0u8; 20], + false + )); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_accept_ownership(RuntimeOrigin::signed(3), Some(0))); + assert_ok!(Uniques::transfer_ownership(RuntimeOrigin::signed(2), 0, 3)); assert_eq!(collections(), vec![(3, 0)]); assert_eq!(Balances::total_balance(&2), 57); assert_eq!(Balances::total_balance(&3), 145); @@ -266,7 +288,7 @@ fn transfer_owner_should_work() { // 2's acceptence from before is reset when it became owner, so it cannot be transfered // without a fresh acceptance. assert_noop!( - Uniques::transfer_ownership(Origin::signed(3), 0, 2), + Uniques::transfer_ownership(RuntimeOrigin::signed(3), 0, 2), Error::::Unaccepted ); }); @@ -275,14 +297,14 @@ fn transfer_owner_should_work() { #[test] fn set_team_should_work() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); - - assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 2)); - assert_ok!(Uniques::freeze(Origin::signed(4), 0, 42)); - assert_ok!(Uniques::thaw(Origin::signed(3), 0, 42)); - assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 3)); - assert_ok!(Uniques::burn(Origin::signed(3), 0, 42, None)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); + + assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 42, 2)); + assert_ok!(Uniques::freeze(RuntimeOrigin::signed(4), 0, 42)); + assert_ok!(Uniques::thaw(RuntimeOrigin::signed(3), 0, 42)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 3)); + assert_ok!(Uniques::burn(RuntimeOrigin::signed(3), 0, 42, None)); }); } @@ -291,59 +313,89 @@ fn set_collection_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown item assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false), + Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 20], false), Error::::UnknownCollection, ); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_collection_metadata(Origin::signed(2), 0, bvec![0u8; 20], false), + Uniques::set_collection_metadata(RuntimeOrigin::signed(2), 0, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 20], + false + )); assert_eq!(Balances::free_balance(&1), 9); assert!(CollectionMetadataOf::::contains_key(0)); // Force origin works, too. - assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 18], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::root(), + 0, + bvec![0u8; 18], + false + )); // Update deposit - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 15], + false + )); assert_eq!(Balances::free_balance(&1), 14); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 25], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 25], + false + )); assert_eq!(Balances::free_balance(&1), 4); // Cannot over-reserve assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 40], false), + Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], true)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0u8; 15], + true + )); assert_noop!( - Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0u8; 15], false), + Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![0u8; 15], false), Error::::Frozen, ); assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(1), 0), + Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 0), Error::::Frozen ); // Clear Metadata - assert_ok!(Uniques::set_collection_metadata(Origin::root(), 0, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::root(), + 0, + bvec![0u8; 15], + false + )); assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(2), 0), + Uniques::clear_collection_metadata(RuntimeOrigin::signed(2), 0), Error::::NoPermission ); assert_noop!( - Uniques::clear_collection_metadata(Origin::signed(1), 1), + Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 1), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_collection_metadata(Origin::signed(1), 0)); + assert_ok!(Uniques::clear_collection_metadata(RuntimeOrigin::signed(1), 0)); assert!(!CollectionMetadataOf::::contains_key(0)); }); } @@ -354,53 +406,56 @@ fn set_item_metadata_should_work() { Balances::make_free_balance_be(&1, 30); // Cannot add metadata to unknown item - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); // Cannot add metadata to unowned item assert_noop!( - Uniques::set_metadata(Origin::signed(2), 0, 42, bvec![0u8; 20], false), + Uniques::set_metadata(RuntimeOrigin::signed(2), 0, 42, bvec![0u8; 20], false), Error::::NoPermission, ); // Successfully add metadata and take deposit - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 20], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 20], false)); assert_eq!(Balances::free_balance(&1), 8); assert!(ItemMetadataOf::::contains_key(0, 42)); // Force origin works, too. - assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 18], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 18], false)); // Update deposit - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false)); assert_eq!(Balances::free_balance(&1), 13); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 25], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 25], false)); assert_eq!(Balances::free_balance(&1), 3); // Cannot over-reserve assert_noop!( - Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 40], false), + Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 40], false), BalancesError::::InsufficientBalance, ); // Can't set or clear metadata once frozen - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], true)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], true)); assert_noop!( - Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0u8; 15], false), + Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0u8; 15], false), Error::::Frozen, ); - assert_noop!(Uniques::clear_metadata(Origin::signed(1), 0, 42), Error::::Frozen); + assert_noop!( + Uniques::clear_metadata(RuntimeOrigin::signed(1), 0, 42), + Error::::Frozen + ); // Clear Metadata - assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::root(), 0, 42, bvec![0u8; 15], false)); assert_noop!( - Uniques::clear_metadata(Origin::signed(2), 0, 42), + Uniques::clear_metadata(RuntimeOrigin::signed(2), 0, 42), Error::::NoPermission ); assert_noop!( - Uniques::clear_metadata(Origin::signed(1), 1, 42), + Uniques::clear_metadata(RuntimeOrigin::signed(1), 1, 42), Error::::UnknownCollection ); - assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); + assert_ok!(Uniques::clear_metadata(RuntimeOrigin::signed(1), 0, 42)); assert!(!ItemMetadataOf::::contains_key(0, 42)); }); } @@ -410,11 +465,23 @@ fn set_attribute_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); + assert_ok!(Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + bvec![0], + bvec![0] + )); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + bvec![1], + bvec![0] + )); assert_eq!( attributes(0), vec![ @@ -425,7 +492,13 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + None, + bvec![0], + bvec![0; 10] + )); assert_eq!( attributes(0), vec![ @@ -436,7 +509,7 @@ fn set_attribute_should_work() { ); assert_eq!(Balances::reserved_balance(1), 18); - assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); + assert_ok!(Uniques::clear_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![1])); assert_eq!( attributes(0), vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] @@ -444,7 +517,7 @@ fn set_attribute_should_work() { assert_eq!(Balances::reserved_balance(1), 15); let w = Collection::::get(0).unwrap().destroy_witness(); - assert_ok!(Uniques::destroy(Origin::signed(1), 0, w)); + assert_ok!(Uniques::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(attributes(0), vec![]); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -455,11 +528,23 @@ fn set_attribute_should_respect_freeze() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0])); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + bvec![0], + bvec![0] + )); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + bvec![0], + bvec![0] + )); assert_eq!( attributes(0), vec![ @@ -470,15 +555,33 @@ fn set_attribute_should_respect_freeze() { ); assert_eq!(Balances::reserved_balance(1), 9); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![], true)); + assert_ok!(Uniques::set_collection_metadata(RuntimeOrigin::signed(1), 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0]), e); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1])); + assert_noop!( + Uniques::set_attribute(RuntimeOrigin::signed(1), 0, None, bvec![0], bvec![0]), + e + ); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(0), + bvec![0], + bvec![1] + )); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 0, bvec![], true)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 0, bvec![], true)); let e = Error::::Frozen; - assert_noop!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![1]), e); - assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![1])); + assert_noop!( + Uniques::set_attribute(RuntimeOrigin::signed(1), 0, Some(0), bvec![0], bvec![1]), + e + ); + assert_ok!(Uniques::set_attribute( + RuntimeOrigin::signed(1), + 0, + Some(1), + bvec![0], + bvec![1] + )); }); } @@ -487,32 +590,42 @@ fn force_item_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 69, 2)); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 69, 2)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0; 20], + false + )); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); // force item status to be free holding - assert_ok!(Uniques::force_item_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 142, bvec![0; 20], false)); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 169, bvec![0; 20], false)); + assert_ok!(Uniques::force_item_status(RuntimeOrigin::root(), 0, 1, 1, 1, 1, true, false)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 142, 1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 169, 2)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 142, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 169, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); - assert_ok!(Uniques::redeposit(Origin::signed(1), 0, bvec![0, 42, 50, 69, 100])); + assert_ok!(Uniques::redeposit(RuntimeOrigin::signed(1), 0, bvec![0, 42, 50, 69, 100])); assert_eq!(Balances::reserved_balance(1), 63); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 42, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 42, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 42); - assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); + assert_ok!(Uniques::set_metadata(RuntimeOrigin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 21); - assert_ok!(Uniques::set_collection_metadata(Origin::signed(1), 0, bvec![0; 20], false)); + assert_ok!(Uniques::set_collection_metadata( + RuntimeOrigin::signed(1), + 0, + bvec![0; 20], + false + )); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -521,23 +634,29 @@ fn force_item_status_should_work() { fn burn_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, false)); - assert_ok!(Uniques::set_team(Origin::signed(1), 0, 2, 3, 4)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, false)); + assert_ok!(Uniques::set_team(RuntimeOrigin::signed(1), 0, 2, 3, 4)); assert_noop!( - Uniques::burn(Origin::signed(5), 0, 42, Some(5)), + Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(5)), Error::::UnknownCollection ); - assert_ok!(Uniques::mint(Origin::signed(2), 0, 42, 5)); - assert_ok!(Uniques::mint(Origin::signed(2), 0, 69, 5)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 42, 5)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(2), 0, 69, 5)); assert_eq!(Balances::reserved_balance(1), 2); - assert_noop!(Uniques::burn(Origin::signed(0), 0, 42, None), Error::::NoPermission); - assert_noop!(Uniques::burn(Origin::signed(5), 0, 42, Some(6)), Error::::WrongOwner); + assert_noop!( + Uniques::burn(RuntimeOrigin::signed(0), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(6)), + Error::::WrongOwner + ); - assert_ok!(Uniques::burn(Origin::signed(5), 0, 42, Some(5))); - assert_ok!(Uniques::burn(Origin::signed(3), 0, 69, Some(5))); + assert_ok!(Uniques::burn(RuntimeOrigin::signed(5), 0, 42, Some(5))); + assert_ok!(Uniques::burn(RuntimeOrigin::signed(3), 0, 69, Some(5))); assert_eq!(Balances::reserved_balance(1), 0); }); } @@ -545,31 +664,37 @@ fn burn_works() { #[test] fn approval_lifecycle_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); - assert_ok!(Uniques::transfer(Origin::signed(3), 0, 42, 4)); - assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 3), Error::::NoPermission); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4)); + assert_noop!( + Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 3), + Error::::NoPermission + ); assert!(Item::::get(0, 42).unwrap().approved.is_none()); - assert_ok!(Uniques::approve_transfer(Origin::signed(4), 0, 42, 2)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 2)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(4), 0, 42, 2)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 2)); }); } #[test] fn approved_account_gets_reset_after_transfer() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); - assert_ok!(Uniques::transfer(Origin::signed(2), 0, 42, 5)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(2), 0, 42, 5)); // this shouldn't work because we have just transfered the item to another account. - assert_noop!(Uniques::transfer(Origin::signed(3), 0, 42, 4), Error::::NoPermission); + assert_noop!( + Uniques::transfer(RuntimeOrigin::signed(3), 0, 42, 4), + Error::::NoPermission + ); // The new owner can transfer fine: - assert_ok!(Uniques::transfer(Origin::signed(5), 0, 42, 6)); + assert_ok!(Uniques::transfer(RuntimeOrigin::signed(5), 0, 42, 6)); }); } @@ -581,47 +706,50 @@ fn approved_account_gets_reset_after_buy_item() { Balances::make_free_balance_be(&2, 100); - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, item, 1)); - assert_ok!(Uniques::approve_transfer(Origin::signed(1), 0, item, 5)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, item, 1)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(1), 0, item, 5)); - assert_ok!(Uniques::set_price(Origin::signed(1), 0, item, Some(price), None)); + assert_ok!(Uniques::set_price(RuntimeOrigin::signed(1), 0, item, Some(price), None)); - assert_ok!(Uniques::buy_item(Origin::signed(2), 0, item, price)); + assert_ok!(Uniques::buy_item(RuntimeOrigin::signed(2), 0, item, price)); // this shouldn't work because the item has been bough and the approved account should be // reset. - assert_noop!(Uniques::transfer(Origin::signed(5), 0, item, 4), Error::::NoPermission); + assert_noop!( + Uniques::transfer(RuntimeOrigin::signed(5), 0, item, 4), + Error::::NoPermission + ); }); } #[test] fn cancel_approval_works() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 1, 42, None), + Uniques::cancel_approval(RuntimeOrigin::signed(2), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 43, None), + Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(3), 0, 42, None), + Uniques::cancel_approval(RuntimeOrigin::signed(3), 0, 42, None), Error::::NoPermission ); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), + Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(Origin::signed(2), 0, 42, None), + Uniques::cancel_approval(RuntimeOrigin::signed(2), 0, 42, None), Error::::NoDelegate ); }); @@ -630,26 +758,26 @@ fn cancel_approval_works() { #[test] fn cancel_approval_works_with_admin() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 1, 42, None), + Uniques::cancel_approval(RuntimeOrigin::signed(1), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 43, None), + Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), + Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(Origin::signed(1), 0, 42, None), + Uniques::cancel_approval(RuntimeOrigin::signed(1), 0, 42, None), Error::::NoDelegate ); }); @@ -658,26 +786,26 @@ fn cancel_approval_works_with_admin() { #[test] fn cancel_approval_works_with_force() { new_test_ext().execute_with(|| { - assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); - assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), 0, 1, true)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(1), 0, 42, 2)); - assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); + assert_ok!(Uniques::approve_transfer(RuntimeOrigin::signed(2), 0, 42, 3)); assert_noop!( - Uniques::cancel_approval(Origin::root(), 1, 42, None), + Uniques::cancel_approval(RuntimeOrigin::root(), 1, 42, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 43, None), + Uniques::cancel_approval(RuntimeOrigin::root(), 0, 43, None), Error::::UnknownCollection ); assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), + Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, Some(4)), Error::::WrongDelegate ); - assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); + assert_ok!(Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, Some(3))); assert_noop!( - Uniques::cancel_approval(Origin::root(), 0, 42, None), + Uniques::cancel_approval(RuntimeOrigin::root(), 0, 42, None), Error::::NoDelegate ); }); @@ -691,11 +819,11 @@ fn max_supply_should_work() { let max_supply = 2; // validate set_collection_max_supply - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_id, true)); assert!(!CollectionMaxSupply::::contains_key(collection_id)); assert_ok!(Uniques::set_collection_max_supply( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, max_supply )); @@ -708,7 +836,7 @@ fn max_supply_should_work() { assert_noop!( Uniques::set_collection_max_supply( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, max_supply + 1 ), @@ -716,16 +844,16 @@ fn max_supply_should_work() { ); // validate we can't mint more to max supply - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 0, user_id)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, 1, user_id)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 0, user_id)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 1, user_id)); assert_noop!( - Uniques::mint(Origin::signed(user_id), collection_id, 2, user_id), + Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, 2, user_id), Error::::MaxSupplyReached ); // validate we remove the CollectionMaxSupply record when we destroy the collection assert_ok!(Uniques::destroy( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, Collection::::get(collection_id).unwrap().destroy_witness() )); @@ -741,13 +869,13 @@ fn set_price_should_work() { let item_1 = 1; let item_2 = 2; - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_id, true)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_id, true)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_1, user_id)); - assert_ok!(Uniques::mint(Origin::signed(user_id), collection_id, item_2, user_id)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, item_1, user_id)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_id), collection_id, item_2, user_id)); assert_ok!(Uniques::set_price( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, item_1, Some(1), @@ -755,7 +883,7 @@ fn set_price_should_work() { )); assert_ok!(Uniques::set_price( - Origin::signed(user_id), + RuntimeOrigin::signed(user_id), collection_id, item_2, Some(2), @@ -778,7 +906,13 @@ fn set_price_should_work() { })); // validate we can unset the price - assert_ok!(Uniques::set_price(Origin::signed(user_id), collection_id, item_2, None, None)); + assert_ok!(Uniques::set_price( + RuntimeOrigin::signed(user_id), + collection_id, + item_2, + None, + None + )); assert!(events().contains(&Event::::ItemPriceRemoved { collection: collection_id, item: item_2 @@ -805,14 +939,14 @@ fn buy_item_should_work() { Balances::make_free_balance_be(&user_2, initial_balance); Balances::make_free_balance_be(&user_3, initial_balance); - assert_ok!(Uniques::force_create(Origin::root(), collection_id, user_1, true)); + assert_ok!(Uniques::force_create(RuntimeOrigin::root(), collection_id, user_1, true)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_1, user_1)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_2, user_1)); - assert_ok!(Uniques::mint(Origin::signed(user_1), collection_id, item_3, user_1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_1, user_1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_2, user_1)); + assert_ok!(Uniques::mint(RuntimeOrigin::signed(user_1), collection_id, item_3, user_1)); assert_ok!(Uniques::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_1, Some(price_1), @@ -820,7 +954,7 @@ fn buy_item_should_work() { )); assert_ok!(Uniques::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_2, Some(price_2), @@ -829,12 +963,17 @@ fn buy_item_should_work() { // can't buy for less assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, 1), + Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_1, 1), Error::::BidTooLow ); // pass the higher price to validate it will still deduct correctly - assert_ok!(Uniques::buy_item(Origin::signed(user_2), collection_id, item_1, price_1 + 1,)); + assert_ok!(Uniques::buy_item( + RuntimeOrigin::signed(user_2), + collection_id, + item_1, + price_1 + 1, + )); // validate the new owner & balances let item = Item::::get(collection_id, item_1).unwrap(); @@ -844,18 +983,23 @@ fn buy_item_should_work() { // can't buy from yourself assert_noop!( - Uniques::buy_item(Origin::signed(user_1), collection_id, item_2, price_2), + Uniques::buy_item(RuntimeOrigin::signed(user_1), collection_id, item_2, price_2), Error::::NoPermission ); // can't buy when the item is listed for a specific buyer assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_2, price_2), + Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_2, price_2), Error::::NoPermission ); // can buy when I'm a whitelisted buyer - assert_ok!(Uniques::buy_item(Origin::signed(user_3), collection_id, item_2, price_2,)); + assert_ok!(Uniques::buy_item( + RuntimeOrigin::signed(user_3), + collection_id, + item_2, + price_2, + )); assert!(events().contains(&Event::::ItemBought { collection: collection_id, @@ -870,14 +1014,14 @@ fn buy_item_should_work() { // can't buy when item is not for sale assert_noop!( - Uniques::buy_item(Origin::signed(user_2), collection_id, item_3, price_2), + Uniques::buy_item(RuntimeOrigin::signed(user_2), collection_id, item_3, price_2), Error::::NotForSale ); // ensure we can't buy an item when the collection or an item is frozen { assert_ok!(Uniques::set_price( - Origin::signed(user_1), + RuntimeOrigin::signed(user_1), collection_id, item_3, Some(price_1), @@ -885,26 +1029,32 @@ fn buy_item_should_work() { )); // freeze collection - assert_ok!(Uniques::freeze_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Uniques::freeze_collection(RuntimeOrigin::signed(user_1), collection_id)); let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, }); - assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::Frozen + ); - assert_ok!(Uniques::thaw_collection(Origin::signed(user_1), collection_id)); + assert_ok!(Uniques::thaw_collection(RuntimeOrigin::signed(user_1), collection_id)); // freeze item - assert_ok!(Uniques::freeze(Origin::signed(user_1), collection_id, item_3)); + assert_ok!(Uniques::freeze(RuntimeOrigin::signed(user_1), collection_id, item_3)); let buy_item_call = mock::RuntimeCall::Uniques(crate::Call::::buy_item { collection: collection_id, item: item_3, bid_price: price_1, }); - assert_noop!(buy_item_call.dispatch(Origin::signed(user_2)), Error::::Frozen); + assert_noop!( + buy_item_call.dispatch(RuntimeOrigin::signed(user_2)), + Error::::Frozen + ); } }); } diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index c29eded25aa5b..07bc14951cb3b 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -30,7 +30,7 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { } benchmarks! { - where_clause { where ::PalletsOrigin: Clone } + where_clause { where ::PalletsOrigin: Clone } batch { let c in 0 .. 1000; let mut calls: Vec<::RuntimeCall> = Vec::new(); @@ -68,8 +68,8 @@ benchmarks! { dispatch_as { let caller = account("caller", SEED, SEED); let call = Box::new(frame_system::Call::remark { remark: vec![] }.into()); - let origin: T::Origin = RawOrigin::Signed(caller).into(); - let pallets_origin: ::PalletsOrigin = origin.caller().clone(); + let origin: T::RuntimeOrigin = RawOrigin::Signed(caller).into(); + let pallets_origin: ::PalletsOrigin = origin.caller().clone(); let pallets_origin = Into::::into(pallets_origin); }: _(RawOrigin::Root, Box::new(pallets_origin), call) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 08fb34ed9e3c1..bc19b5625a71a 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -87,17 +87,17 @@ pub mod pallet { /// The overarching call type. type RuntimeCall: Parameter - + Dispatchable + + Dispatchable + GetDispatchInfo + From> - + UnfilteredDispatchable + + UnfilteredDispatchable + IsSubType> + IsType<::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + - Into<::Origin> + - IsType<<::Origin as frame_support::traits::OriginTrait>::PalletsOrigin>; + Into<::RuntimeOrigin> + + IsType<<::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 93b80672f55e2..65d9c966ebdfd 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -106,7 +106,7 @@ impl frame_system::Config for Test { type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -199,12 +199,16 @@ fn call_foobar(err: bool, start_weight: Weight, end_weight: Option) -> R fn as_derivative_works() { new_test_ext().execute_with(|| { let sub_1_0 = Utility::derivative_account_id(1, 0); - assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); + assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), sub_1_0, 5)); assert_err_ignore_postinfo!( - Utility::as_derivative(Origin::signed(1), 1, Box::new(call_transfer(6, 3)),), + Utility::as_derivative(RuntimeOrigin::signed(1), 1, Box::new(call_transfer(6, 3)),), BalancesError::::InsufficientBalance ); - assert_ok!(Utility::as_derivative(Origin::signed(1), 0, Box::new(call_transfer(2, 3)),)); + assert_ok!(Utility::as_derivative( + RuntimeOrigin::signed(1), + 0, + Box::new(call_transfer(2, 3)), + )); assert_eq!(Balances::free_balance(sub_1_0), 2); assert_eq!(Balances::free_balance(2), 13); }); @@ -224,7 +228,7 @@ fn as_derivative_handles_weight_refund() { call: Box::new(inner_call), }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -235,7 +239,7 @@ fn as_derivative_handles_weight_refund() { call: Box::new(inner_call), }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff); @@ -247,7 +251,7 @@ fn as_derivative_handles_weight_refund() { call: Box::new(inner_call), }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_noop!( result, DispatchErrorWithPostInfo { @@ -267,7 +271,7 @@ fn as_derivative_handles_weight_refund() { call: Box::new(inner_call), }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_noop!( result, DispatchErrorWithPostInfo { @@ -287,7 +291,7 @@ fn as_derivative_filters() { new_test_ext().execute_with(|| { assert_err_ignore_postinfo!( Utility::as_derivative( - Origin::signed(1), + RuntimeOrigin::signed(1), 1, Box::new(RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, @@ -310,7 +314,7 @@ fn batch_with_root_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - Origin::root(), + RuntimeOrigin::root(), vec![ RuntimeCall::Balances(BalancesCall::force_transfer { source: 1, @@ -337,7 +341,7 @@ fn batch_with_signed_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 5)] ),); assert_eq!(Balances::free_balance(1), 0); @@ -349,7 +353,7 @@ fn batch_with_signed_works() { fn batch_with_signed_filters() { new_test_ext().execute_with(|| { assert_ok!(Utility::batch( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![RuntimeCall::Balances(pallet_balances::Call::transfer_keep_alive { dest: 2, value: 1 @@ -371,7 +375,7 @@ fn batch_early_exit_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5),] ),); assert_eq!(Balances::free_balance(1), 5); @@ -409,7 +413,7 @@ fn batch_handles_weight_refund() { let batch_calls = vec![inner_call; batch_len as usize]; let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -418,7 +422,7 @@ fn batch_handles_weight_refund() { let batch_calls = vec![inner_call; batch_len as usize]; let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -429,7 +433,7 @@ fn batch_handles_weight_refund() { let batch_calls = vec![good_call, bad_call]; let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -444,7 +448,7 @@ fn batch_handles_weight_refund() { let batch_len = batch_calls.len() as u64; let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -457,7 +461,7 @@ fn batch_handles_weight_refund() { let batch_calls = vec![good_call, bad_call.clone(), bad_call]; let call = RuntimeCall::Utility(UtilityCall::batch { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); System::assert_last_event( utility::Event::BatchInterrupted { index: 1, error: DispatchError::Other("") }.into(), @@ -476,7 +480,7 @@ fn batch_all_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::batch_all( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![call_transfer(2, 5), call_transfer(2, 5)] ),); assert_eq!(Balances::free_balance(1), 0); @@ -496,7 +500,7 @@ fn batch_all_revert() { calls: vec![call_transfer(2, 5), call_transfer(2, 10), call_transfer(2, 5)], }); assert_noop!( - batch_all_calls.dispatch(Origin::signed(1)), + batch_all_calls.dispatch(RuntimeOrigin::signed(1)), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some( @@ -525,7 +529,7 @@ fn batch_all_handles_weight_refund() { let batch_calls = vec![inner_call; batch_len as usize]; let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -534,7 +538,7 @@ fn batch_all_handles_weight_refund() { let batch_calls = vec![inner_call; batch_len as usize]; let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_ok!(result); // Diff is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -545,7 +549,7 @@ fn batch_all_handles_weight_refund() { let batch_calls = vec![good_call, bad_call]; let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); // No weight is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -557,7 +561,7 @@ fn batch_all_handles_weight_refund() { let batch_len = batch_calls.len() as u64; let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); @@ -567,7 +571,7 @@ fn batch_all_handles_weight_refund() { let batch_calls = vec![good_call, bad_call.clone(), bad_call]; let call = RuntimeCall::Utility(UtilityCall::batch_all { calls: batch_calls }); let info = call.get_dispatch_info(); - let result = call.dispatch(Origin::signed(1)); + let result = call.dispatch(RuntimeOrigin::signed(1)); assert_err_ignore_postinfo!(result, "The cake is a lie."); assert_eq!( extract_actual_weight(&result, &info), @@ -590,7 +594,7 @@ fn batch_all_does_not_nest() { assert_eq!(Balances::free_balance(2), 10); // A nested batch_all call will not pass the filter, and fail with `BadOrigin`. assert_noop!( - Utility::batch_all(Origin::signed(1), vec![batch_all.clone()]), + Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_all.clone()]), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { actual_weight: Some(::WeightInfo::batch_all(1) + info.weight), @@ -606,7 +610,7 @@ fn batch_all_does_not_nest() { let batch_nested = RuntimeCall::Utility(UtilityCall::batch { calls: vec![batch_all] }); // Batch will end with `Ok`, but does not actually execute as we can see from the event // and balances. - assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); + assert_ok!(Utility::batch_all(RuntimeOrigin::signed(1), vec![batch_nested])); System::assert_has_event( utility::Event::BatchInterrupted { index: 0, @@ -623,8 +627,14 @@ fn batch_all_does_not_nest() { fn batch_limit() { new_test_ext().execute_with(|| { let calls = vec![RuntimeCall::System(SystemCall::remark { remark: vec![] }); 40_000]; - assert_noop!(Utility::batch(Origin::signed(1), calls.clone()), Error::::TooManyCalls); - assert_noop!(Utility::batch_all(Origin::signed(1), calls), Error::::TooManyCalls); + assert_noop!( + Utility::batch(RuntimeOrigin::signed(1), calls.clone()), + Error::::TooManyCalls + ); + assert_noop!( + Utility::batch_all(RuntimeOrigin::signed(1), calls), + Error::::TooManyCalls + ); }); } @@ -634,7 +644,7 @@ fn force_batch_works() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_ok!(Utility::force_batch( - Origin::signed(1), + RuntimeOrigin::signed(1), vec![ call_transfer(2, 5), call_foobar(true, Weight::from_ref_time(75), None), @@ -650,12 +660,12 @@ fn force_batch_works() { assert_eq!(Balances::free_balance(2), 20); assert_ok!(Utility::force_batch( - Origin::signed(2), + RuntimeOrigin::signed(2), vec![call_transfer(1, 5), call_transfer(1, 5),] ),); System::assert_last_event(utility::Event::BatchCompleted.into()); - assert_ok!(Utility::force_batch(Origin::signed(1), vec![call_transfer(2, 50),]),); + assert_ok!(Utility::force_batch(RuntimeOrigin::signed(1), vec![call_transfer(2, 50),]),); System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); }); } diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index cc4a3523459d7..c4e520b37c8c8 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -67,7 +67,7 @@ impl frame_system::Config for Test { type OnNewAccount = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type PalletInfo = PalletInfo; type SS58Prefix = (); type SystemWeightInfo = (); diff --git a/frame/whitelist/src/benchmarking.rs b/frame/whitelist/src/benchmarking.rs index 9e55d38fd8140..e0a758b2ddfdf 100644 --- a/frame/whitelist/src/benchmarking.rs +++ b/frame/whitelist/src/benchmarking.rs @@ -33,7 +33,7 @@ benchmarks! { whitelist_call { let origin = T::WhitelistOrigin::successful_origin(); let call_hash = Default::default(); - }: _(origin, call_hash) + }: _(origin, call_hash) verify { ensure!( WhitelistedCall::::contains_key(call_hash), @@ -50,7 +50,7 @@ benchmarks! { let call_hash = Default::default(); Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, call_hash) + }: _(origin, call_hash) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), @@ -82,7 +82,7 @@ benchmarks! { let encoded_call = encoded_call.try_into().expect("encoded_call must be small enough"); T::PreimageProvider::note_preimage(encoded_call); - }: _(origin, call_hash, call_weight) + }: _(origin, call_hash, call_weight) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), @@ -105,7 +105,7 @@ benchmarks! { Pallet::::whitelist_call(origin.clone(), call_hash) .expect("whitelisting call must be successful"); - }: _(origin, Box::new(call)) + }: _(origin, Box::new(call)) verify { ensure!( !WhitelistedCall::::contains_key(call_hash), diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index c5621a239f3a6..55d5c42b8f4bc 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -66,7 +66,7 @@ pub mod pallet { /// The overarching call type. type RuntimeCall: IsType<::RuntimeCall> - + Dispatchable + + Dispatchable + GetDispatchInfo + FullCodec + TypeInfo @@ -74,10 +74,10 @@ pub mod pallet { + Parameter; /// Required origin for whitelisting a call. - type WhitelistOrigin: EnsureOrigin; + type WhitelistOrigin: EnsureOrigin; /// Required origin for dispatching whitelisted call with root origin. - type DispatchWhitelistedOrigin: EnsureOrigin; + type DispatchWhitelistedOrigin: EnsureOrigin; /// The handler of pre-images. // NOTE: recipient is only needed for benchmarks. diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index b6eac316bc113..44aea86be6f19 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -58,7 +58,7 @@ impl frame_system::Config for Test { type BlockWeights = (); type BlockLength = (); type DbWeight = (); - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Index = u64; type BlockNumber = u64; type Hash = H256; diff --git a/frame/whitelist/src/tests.rs b/frame/whitelist/src/tests.rs index ebb111149fd42..fd6558e83f30e 100644 --- a/frame/whitelist/src/tests.rs +++ b/frame/whitelist/src/tests.rs @@ -32,35 +32,35 @@ fn test_whitelist_call_and_remove() { let call_hash = ::Hashing::hash(&encoded_call[..]); assert_noop!( - Whitelist::remove_whitelisted_call(Origin::root(), call_hash), + Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash), crate::Error::::CallIsNotWhitelisted, ); assert_noop!( - Whitelist::whitelist_call(Origin::signed(1), call_hash), + Whitelist::whitelist_call(RuntimeOrigin::signed(1), call_hash), DispatchError::BadOrigin, ); - assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::whitelist_call(Origin::root(), call_hash), + Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash), crate::Error::::CallAlreadyWhitelisted, ); assert_noop!( - Whitelist::remove_whitelisted_call(Origin::signed(1), call_hash), + Whitelist::remove_whitelisted_call(RuntimeOrigin::signed(1), call_hash), DispatchError::BadOrigin, ); - assert_ok!(Whitelist::remove_whitelisted_call(Origin::root(), call_hash)); + assert_ok!(Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash)); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::remove_whitelisted_call(Origin::root(), call_hash), + Whitelist::remove_whitelisted_call(RuntimeOrigin::root(), call_hash), crate::Error::::CallIsNotWhitelisted, ); }); @@ -75,41 +75,45 @@ fn test_whitelist_call_and_execute() { let call_hash = ::Hashing::hash(&encoded_call[..]); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), crate::Error::::CallIsNotWhitelisted, ); - assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::signed(1), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(RuntimeOrigin::signed(1), call_hash, call_weight), DispatchError::BadOrigin, ); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), crate::Error::::UnavailablePreImage, ); - assert_ok!(Preimage::note_preimage(Origin::root(), encoded_call)); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), encoded_call)); assert!(Preimage::preimage_requested(&call_hash)); assert_noop!( Whitelist::dispatch_whitelisted_call( - Origin::root(), + RuntimeOrigin::root(), call_hash, call_weight - Weight::from_ref_time(1) ), crate::Error::::InvalidCallWeightWitness, ); - assert_ok!(Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight)); + assert_ok!(Whitelist::dispatch_whitelisted_call( + RuntimeOrigin::root(), + call_hash, + call_weight + )); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), crate::Error::::CallIsNotWhitelisted, ); }); @@ -126,10 +130,14 @@ fn test_whitelist_call_and_execute_failing_call() { let encoded_call = call.encode(); let call_hash = ::Hashing::hash(&encoded_call[..]); - assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); - assert_ok!(Preimage::note_preimage(Origin::root(), encoded_call)); + assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), encoded_call)); assert!(Preimage::preimage_requested(&call_hash)); - assert_ok!(Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight)); + assert_ok!(Whitelist::dispatch_whitelisted_call( + RuntimeOrigin::root(), + call_hash, + call_weight + )); assert!(!Preimage::preimage_requested(&call_hash)); }); } @@ -142,18 +150,18 @@ fn test_whitelist_call_and_execute_without_note_preimage() { })); let call_hash = ::Hashing::hash_of(&call); - assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); + assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); assert!(Preimage::preimage_requested(&call_hash)); assert_ok!(Whitelist::dispatch_whitelisted_call_with_preimage( - Origin::root(), + RuntimeOrigin::root(), call.clone() )); assert!(!Preimage::preimage_requested(&call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call_with_preimage(Origin::root(), call), + Whitelist::dispatch_whitelisted_call_with_preimage(RuntimeOrigin::root(), call), crate::Error::::CallIsNotWhitelisted, ); }); @@ -171,11 +179,11 @@ fn test_whitelist_call_and_execute_decode_consumes_all() { let call_hash = ::Hashing::hash(&call[..]); - assert_ok!(Preimage::note_preimage(Origin::root(), call)); - assert_ok!(Whitelist::whitelist_call(Origin::root(), call_hash)); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::root(), call)); + assert_ok!(Whitelist::whitelist_call(RuntimeOrigin::root(), call_hash)); assert_noop!( - Whitelist::dispatch_whitelisted_call(Origin::root(), call_hash, call_weight), + Whitelist::dispatch_whitelisted_call(RuntimeOrigin::root(), call_hash, call_weight), crate::Error::::UndecodableCall, ); }); diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index 5d6c657a68977..fd7745c6031ff 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -39,12 +39,13 @@ pub struct CheckedExtrinsic { pub function: Call, } -impl traits::Applyable for CheckedExtrinsic +impl traits::Applyable + for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, + Call: Member + Dispatchable, Extra: SignedExtension, - Origin: From>, + RuntimeOrigin: From>, { type Call = Call; @@ -78,7 +79,7 @@ where U::pre_dispatch(&self.function)?; (None, None) }; - let res = self.function.dispatch(Origin::from(maybe_who)); + let res = self.function.dispatch(RuntimeOrigin::from(maybe_who)); let post_info = match res { Ok(info) => info, Err(err) => err.post_info, diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 003fa62c9e088..d16a37e6a2059 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -358,8 +358,15 @@ where impl Applyable for TestXt where - Call: - 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, + Call: 'static + + Sized + + Send + + Sync + + Clone + + Eq + + Codec + + Debug + + Dispatchable, Extra: SignedExtension, Origin: From>, { diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 495dfa66c5462..1c48b1933431d 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1018,7 +1018,7 @@ pub trait Dispatchable { /// Every function call from your runtime has an origin, which specifies where the extrinsic was /// generated from. In the case of a signed extrinsic (transaction), the origin contains an /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. - type Origin; + type RuntimeOrigin; /// ... type Config; /// An opaque set of information attached to the transaction. This could be constructed anywhere @@ -1029,7 +1029,8 @@ pub trait Dispatchable { /// with information about a `Dispatchable` that is ownly known post dispatch. type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; /// Actually dispatch this call and return the result of it. - fn dispatch(self, origin: Self::Origin) -> crate::DispatchResultWithInfo; + fn dispatch(self, origin: Self::RuntimeOrigin) + -> crate::DispatchResultWithInfo; } /// Shortcut to reference the `Info` type of a `Dispatchable`. @@ -1038,11 +1039,14 @@ pub type DispatchInfoOf = ::Info; pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { - type Origin = (); + type RuntimeOrigin = (); type Config = (); type Info = (); type PostInfo = (); - fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> crate::DispatchResultWithInfo { panic!("This implementation should not be used for actual dispatch."); } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 4ed159fd7e2ed..5601de632eae3 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -233,11 +233,14 @@ impl ExtrinsicT for Extrinsic { } impl sp_runtime::traits::Dispatchable for Extrinsic { - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type Config = (); type Info = (); type PostInfo = (); - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + fn dispatch( + self, + _origin: Self::RuntimeOrigin, + ) -> sp_runtime::DispatchResultWithInfo { panic!("This implementation should not be used for actual dispatch."); } } @@ -441,22 +444,22 @@ impl GetRuntimeBlockType for Runtime { } #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq, TypeInfo, MaxEncodedLen)] -pub struct Origin; +pub struct RuntimeOrigin; -impl From> for Origin { +impl From> for RuntimeOrigin { fn from(_o: frame_system::Origin) -> Self { unimplemented!("Not required in tests!") } } -impl From for Result, Origin> { - fn from(_origin: Origin) -> Result, Origin> { +impl From for Result, RuntimeOrigin> { + fn from(_origin: RuntimeOrigin) -> Result, RuntimeOrigin> { unimplemented!("Not required in tests!") } } -impl frame_support::traits::OriginTrait for Origin { +impl frame_support::traits::OriginTrait for RuntimeOrigin { type Call = ::RuntimeCall; - type PalletsOrigin = Origin; + type PalletsOrigin = RuntimeOrigin; type AccountId = ::AccountId; fn add_filter(&mut self, _filter: impl Fn(&Self::Call) -> bool + 'static) { @@ -584,7 +587,7 @@ impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; - type Origin = Origin; + type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = Extrinsic; type Index = u64; type BlockNumber = u64; diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 45a6f66416d8c..0cb3e7d70f6df 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -40,7 +40,7 @@ const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::RuntimeOrigin {} } decl_storage! { diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index 2ee007c84f0aa..ad90c24167587 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -47,7 +47,7 @@ use sp_storage::{StorageData, StorageKey}; /// # struct TestRuntime; /// # /// # decl_module! { -/// # pub struct Module for enum Call where origin: T::Origin {} +/// # pub struct Module for enum Call where origin: T::RuntimeOrigin {} /// # } /// # /// pub type Loc = (i64, i64, i64); From 409167ef427e1b1b32a0cb9c05117e8fe73a7b1a Mon Sep 17 00:00:00 2001 From: Xavier Lau Date: Wed, 21 Sep 2022 14:12:20 +0800 Subject: [PATCH 154/348] Use `array-bytes` for All Array/Bytes/Hex Operations (#12190) * Use `array-bytes` for All Array/Bytes/Hex Operations Signed-off-by: Xavier Lau * Reorder * Self Review * Format * Fix Tests * Bump `array-bytes` * Optimize large test res Signed-off-by: Xavier Lau Co-authored-by: parity-processbot <> --- Cargo.lock | 76 +++++------ bin/node-template/runtime/Cargo.toml | 2 - bin/node/bench/Cargo.toml | 2 +- bin/node/bench/src/generator.rs | 6 +- bin/node/cli/Cargo.toml | 2 +- bin/node/cli/src/chain_spec.rs | 56 ++++---- bin/node/runtime/Cargo.toml | 2 - client/beefy/Cargo.toml | 2 +- client/beefy/src/lib.rs | 5 +- client/beefy/src/tests.rs | 2 +- client/cli/Cargo.toml | 2 +- client/cli/src/commands/generate_node_key.rs | 4 +- client/cli/src/commands/inspect_key.rs | 8 +- client/cli/src/commands/inspect_node_key.rs | 3 +- client/cli/src/commands/sign.rs | 2 +- client/cli/src/commands/utils.rs | 15 +-- client/cli/src/commands/vanity.rs | 4 +- client/cli/src/commands/verify.rs | 4 +- client/cli/src/error.rs | 10 +- client/cli/src/params/node_key_params.rs | 2 +- client/executor/Cargo.toml | 2 +- client/executor/src/integration_tests/mod.rs | 35 ++--- client/finality-grandpa/Cargo.toml | 2 +- .../finality-grandpa/src/communication/mod.rs | 5 +- .../src/communication/tests.rs | 2 +- client/keystore/Cargo.toml | 2 +- client/keystore/src/local.rs | 8 +- client/network/Cargo.toml | 2 +- client/network/light/Cargo.toml | 2 +- .../light/src/light_client_requests.rs | 5 +- client/network/src/config.rs | 2 +- client/network/src/protocol.rs | 9 +- client/network/src/transactions.rs | 5 +- client/network/sync/Cargo.toml | 2 +- .../network/sync/src/block_request_handler.rs | 5 +- .../network/sync/src/state_request_handler.rs | 5 +- .../network/sync/src/warp_request_handler.rs | 5 +- client/offchain/Cargo.toml | 2 +- client/offchain/src/api.rs | 16 +-- client/service/test/Cargo.toml | 3 +- client/service/test/src/client/mod.rs | 35 ++--- client/transaction-pool/Cargo.toml | 2 +- client/transaction-pool/tests/pool.rs | 2 +- frame/alliance/Cargo.toml | 6 +- frame/beefy-mmr/Cargo.toml | 6 +- frame/beefy-mmr/primitives/Cargo.toml | 7 +- frame/beefy-mmr/primitives/src/lib.rs | 82 +++++++---- frame/beefy-mmr/src/tests.rs | 54 ++++---- frame/benchmarking/Cargo.toml | 2 +- frame/benchmarking/src/lib.rs | 8 +- frame/contracts/Cargo.toml | 2 +- frame/contracts/src/exec.rs | 10 +- frame/contracts/src/wasm/mod.rs | 27 ++-- frame/executive/Cargo.toml | 2 +- frame/executive/src/lib.rs | 31 +++-- frame/merkle-mountain-range/Cargo.toml | 2 +- frame/transaction-storage/Cargo.toml | 4 +- frame/transaction-storage/src/benchmarking.rs | 127 +++++++++--------- primitives/beefy/Cargo.toml | 3 +- primitives/beefy/src/commitment.rs | 71 ++-------- primitives/beefy/src/witness.rs | 14 +- .../beefy/test-res/large-raw-commitment | Bin 0 -> 44638 bytes primitives/core/Cargo.toml | 7 +- primitives/core/src/crypto.rs | 12 +- primitives/core/src/ecdsa.rs | 29 ++-- primitives/core/src/ed25519.rs | 29 ++-- primitives/core/src/sr25519.rs | 52 +++---- primitives/merkle-mountain-range/Cargo.toml | 2 +- primitives/merkle-mountain-range/src/lib.rs | 11 +- primitives/state-machine/Cargo.toml | 2 +- primitives/state-machine/src/basic.rs | 8 +- .../src/overlayed_changes/mod.rs | 8 +- primitives/state-machine/src/testing.rs | 6 +- primitives/trie/Cargo.toml | 2 +- primitives/trie/src/lib.rs | 27 ++-- test-utils/client/Cargo.toml | 2 +- test-utils/client/src/lib.rs | 2 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- .../frame/benchmarking-cli/src/block/bench.rs | 4 +- 79 files changed, 520 insertions(+), 510 deletions(-) create mode 100644 primitives/beefy/test-res/large-raw-commitment diff --git a/Cargo.lock b/Cargo.lock index d04f82f4bbecc..5791c8762d955 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,6 +112,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" +[[package]] +name = "array-bytes" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a913633b0c922e6b745072795f50d90ebea78ba31a57e2ac8c2fc7b50950949" + [[package]] name = "arrayref" version = "0.3.6" @@ -446,12 +452,12 @@ dependencies = [ name = "beefy-gadget" version = "4.0.0-dev" dependencies = [ + "array-bytes", "async-trait", "beefy-primitives", "fnv", "futures", "futures-timer", - "hex", "log", "parity-scale-codec", "parking_lot 0.12.1", @@ -513,10 +519,9 @@ dependencies = [ name = "beefy-merkle-tree" version = "4.0.0-dev" dependencies = [ + "array-bytes", "beefy-primitives", "env_logger", - "hex", - "hex-literal", "log", "sp-api", "tiny-keccak", @@ -526,8 +531,7 @@ dependencies = [ name = "beefy-primitives" version = "4.0.0-dev" dependencies = [ - "hex", - "hex-literal", + "array-bytes", "parity-scale-codec", "scale-info", "sp-api", @@ -2112,9 +2116,9 @@ dependencies = [ name = "frame-benchmarking" version = "4.0.0-dev" dependencies = [ + "array-bytes", "frame-support", "frame-system", - "hex-literal", "linregress", "log", "parity-scale-codec", @@ -2138,6 +2142,7 @@ name = "frame-benchmarking-cli" version = "4.0.0-dev" dependencies = [ "Inflector", + "array-bytes", "chrono", "clap 3.1.18", "comfy-table", @@ -2147,7 +2152,6 @@ dependencies = [ "gethostname", "handlebars", "hash-db", - "hex", "itertools", "kvdb", "lazy_static", @@ -2238,10 +2242,10 @@ dependencies = [ name = "frame-executive" version = "4.0.0-dev" dependencies = [ + "array-bytes", "frame-support", "frame-system", "frame-try-runtime", - "hex-literal", "pallet-balances", "pallet-transaction-payment", "parity-scale-codec", @@ -2854,12 +2858,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex-literal" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ebdb29d2ea9ed0083cd8cece49bbd968021bd99b0849edb4a9a7ee0fdf6a4e0" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -3355,7 +3353,6 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", - "hex-literal", "log", "node-primitives", "pallet-alliance", @@ -4637,12 +4634,12 @@ dependencies = [ name = "node-bench" version = "0.9.0-dev" dependencies = [ + "array-bytes", "clap 3.1.18", "derive_more", "fs_extra", "futures", "hash-db", - "hex", "kitchensink-runtime", "kvdb", "kvdb-rocksdb", @@ -4674,6 +4671,7 @@ dependencies = [ name = "node-cli" version = "3.0.0-dev" dependencies = [ + "array-bytes", "assert_cmd", "async-std", "clap 3.1.18", @@ -4683,7 +4681,6 @@ dependencies = [ "frame-system", "frame-system-rpc-runtime-api", "futures", - "hex-literal", "jsonrpsee", "kitchensink-runtime", "log", @@ -4912,7 +4909,6 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", - "hex-literal", "pallet-aura", "pallet-balances", "pallet-grandpa", @@ -5181,11 +5177,10 @@ dependencies = [ name = "pallet-alliance" version = "4.0.0-dev" dependencies = [ + "array-bytes", "frame-benchmarking", "frame-support", "frame-system", - "hex", - "hex-literal", "log", "pallet-balances", "pallet-collective", @@ -5416,12 +5411,11 @@ dependencies = [ name = "pallet-beefy-mmr" version = "4.0.0-dev" dependencies = [ + "array-bytes", "beefy-merkle-tree", "beefy-primitives", "frame-support", "frame-system", - "hex", - "hex-literal", "log", "pallet-beefy", "pallet-mmr", @@ -5493,13 +5487,13 @@ dependencies = [ name = "pallet-contracts" version = "4.0.0-dev" dependencies = [ + "array-bytes", "assert_matches", "bitflags", "env_logger", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "impl-trait-for-tuples", "log", "pallet-balances", @@ -5860,12 +5854,12 @@ dependencies = [ name = "pallet-mmr" version = "4.0.0-dev" dependencies = [ + "array-bytes", "ckb-merkle-mountain-range", "env_logger", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "itertools", "parity-scale-codec", "scale-info", @@ -6453,10 +6447,10 @@ dependencies = [ name = "pallet-transaction-storage" version = "4.0.0-dev" dependencies = [ + "array-bytes", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal", "log", "pallet-balances", "parity-scale-codec", @@ -7897,11 +7891,11 @@ dependencies = [ name = "sc-cli" version = "0.10.0-dev" dependencies = [ + "array-bytes", "chrono", "clap 3.1.18", "fdlimit", "futures", - "hex", "libp2p", "log", "names", @@ -8243,9 +8237,9 @@ dependencies = [ name = "sc-executor" version = "0.10.0-dev" dependencies = [ + "array-bytes", "criterion", "env_logger", - "hex-literal", "lazy_static", "lru", "num_cpus", @@ -8339,6 +8333,7 @@ name = "sc-finality-grandpa" version = "0.10.0-dev" dependencies = [ "ahash", + "array-bytes", "assert_matches", "async-trait", "dyn-clone", @@ -8346,7 +8341,6 @@ dependencies = [ "fork-tree", "futures", "futures-timer", - "hex", "log", "parity-scale-codec", "parking_lot 0.12.1", @@ -8427,8 +8421,8 @@ dependencies = [ name = "sc-keystore" version = "4.0.0-dev" dependencies = [ + "array-bytes", "async-trait", - "hex", "parking_lot 0.12.1", "serde_json", "sp-application-crypto", @@ -8442,6 +8436,7 @@ dependencies = [ name = "sc-network" version = "0.10.0-dev" dependencies = [ + "array-bytes", "assert_matches", "async-std", "async-trait", @@ -8454,7 +8449,6 @@ dependencies = [ "fork-tree", "futures", "futures-timer", - "hex", "ip_network", "libp2p", "linked-hash-map", @@ -8565,8 +8559,8 @@ dependencies = [ name = "sc-network-light" version = "0.10.0-dev" dependencies = [ + "array-bytes", "futures", - "hex", "libp2p", "log", "parity-scale-codec", @@ -8585,9 +8579,9 @@ dependencies = [ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ + "array-bytes", "fork-tree", "futures", - "hex", "libp2p", "log", "lru", @@ -8647,11 +8641,11 @@ dependencies = [ name = "sc-offchain" version = "4.0.0-dev" dependencies = [ + "array-bytes", "bytes", "fnv", "futures", "futures-timer", - "hex", "hyper", "hyper-rustls", "lazy_static", @@ -8875,10 +8869,9 @@ dependencies = [ name = "sc-service-test" version = "2.0.0" dependencies = [ + "array-bytes", "fdlimit", "futures", - "hex", - "hex-literal", "log", "parity-scale-codec", "parking_lot 0.12.1", @@ -9019,11 +9012,11 @@ dependencies = [ name = "sc-transaction-pool" version = "4.0.0-dev" dependencies = [ + "array-bytes", "assert_matches", "criterion", "futures", "futures-timer", - "hex", "linked-hash-map", "log", "parity-scale-codec", @@ -9761,6 +9754,7 @@ dependencies = [ name = "sp-core" version = "6.0.0" dependencies = [ + "array-bytes", "base58", "bitflags", "blake2", @@ -9771,8 +9765,6 @@ dependencies = [ "futures", "hash-db", "hash256-std-hasher", - "hex", - "hex-literal", "impl-serde", "lazy_static", "libsecp256k1", @@ -9953,7 +9945,7 @@ dependencies = [ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ - "hex-literal", + "array-bytes", "log", "parity-scale-codec", "serde", @@ -10171,9 +10163,9 @@ dependencies = [ name = "sp-state-machine" version = "0.12.0" dependencies = [ + "array-bytes", "assert_matches", "hash-db", - "hex-literal", "log", "num-traits", "parity-scale-codec", @@ -10288,10 +10280,10 @@ name = "sp-trie" version = "6.0.0" dependencies = [ "ahash", + "array-bytes", "criterion", "hash-db", "hashbrown 0.12.3", - "hex-literal", "lazy_static", "lru", "memory-db", @@ -10594,9 +10586,9 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ + "array-bytes", "async-trait", "futures", - "hex", "parity-scale-codec", "sc-client-api", "sc-client-db", diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index c6c4b2ea911ec..45ab4939e311c 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -46,7 +46,6 @@ pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-fe # Used for runtime benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -hex-literal = { version = "0.3.4", optional = true } # Local Dependencies pallet-template = { version = "4.0.0-dev", default-features = false, path = "../pallets/template" } @@ -93,7 +92,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", - "hex-literal", "pallet-balances/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-template/runtime-benchmarks", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 878c5d07c8e78..a9c367ae8aa3d 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/paritytech/substrate/" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +array-bytes = "4.1" clap = { version = "3.1.18", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } @@ -34,7 +35,6 @@ sp-tracing = { version = "5.0.0", path = "../../../primitives/tracing" } hash-db = "0.15.2" tempfile = "3.1.0" fs_extra = "1" -hex = "0.4.0" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 863928c719429..76bd3a3240c51 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -37,8 +37,10 @@ pub fn generate_trie( let (db, overlay) = { let mut overlay = HashMap::new(); overlay.insert( - hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") - .expect("null key is valid"), + array_bytes::hex2bytes( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ) + .expect("null key is valid"), Some(vec![0]), ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index a435945168e0d..edb434e3c04e3 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -34,12 +34,12 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies +array-bytes = "4.1" clap = { version = "3.1.18", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } jsonrpsee = { version = "0.15.1", features = ["server"] } futures = "0.3.21" -hex-literal = "0.3.4" log = "0.4.17" rand = "0.8" diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 77e2f73dd6e18..8d74f2bde0f44 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -19,7 +19,6 @@ //! Substrate chain configurations. use grandpa_primitives::AuthorityId as GrandpaId; -use hex_literal::hex; use kitchensink_runtime::{ constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, @@ -98,84 +97,83 @@ fn staging_testnet_config_genesis() -> GenesisConfig { )> = vec![ ( // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy - hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), + array_bytes::hex_n_into_unchecked("9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"), // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq - hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), + array_bytes::hex_n_into_unchecked("781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"), // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC - hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"] + array_bytes::hex2array_unchecked("9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332") .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") .unchecked_into(), // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + array_bytes::hex2array_unchecked("6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106") .unchecked_into(), ), ( // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 - hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), + array_bytes::hex_n_into_unchecked("68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"), // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF - hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), + array_bytes::hex_n_into_unchecked("c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"), // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE - hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"] + array_bytes::hex2array_unchecked("7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f") .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") .unchecked_into(), // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + array_bytes::hex2array_unchecked("482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e") .unchecked_into(), ), ( // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp - hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), + array_bytes::hex_n_into_unchecked("547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"), // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 - hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), + array_bytes::hex_n_into_unchecked("9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"), // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d - hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"] + array_bytes::hex2array_unchecked("5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440") .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") .unchecked_into(), // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + array_bytes::hex2array_unchecked("482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a") .unchecked_into(), ), ( // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 - hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), + array_bytes::hex_n_into_unchecked("f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"), // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn - hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), + array_bytes::hex_n_into_unchecked("66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"), // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 - hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"] + array_bytes::hex2array_unchecked("3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef") .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") .unchecked_into(), // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + array_bytes::hex2array_unchecked("00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378") .unchecked_into(), ), ]; // generated with secret: subkey inspect "$secret"/fir - let root_key: AccountId = hex![ + let root_key: AccountId = array_bytes::hex_n_into_unchecked( // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo - "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" - ] - .into(); + "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809", + ); let endowed_accounts: Vec = vec![root_key.clone()]; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 6a81c16780240..d52b1aaccfcfa 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -21,7 +21,6 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = ] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } static_assertions = "1.1.0" -hex-literal = { version = "0.3.4", optional = true } log = { version = "0.4.17", default-features = false } # primitives @@ -252,7 +251,6 @@ runtime-benchmarks = [ "pallet-vesting/runtime-benchmarks", "pallet-whitelist/runtime-benchmarks", "frame-system-benchmarking/runtime-benchmarks", - "hex-literal", ] try-runtime = [ "frame-try-runtime", diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 5cc2952e26ba5..47a3be859cbbb 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -9,12 +9,12 @@ description = "BEEFY Client gadget for substrate" homepage = "https://substrate.io" [dependencies] +array-bytes = "4.1" async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3" futures-timer = "3.0.1" -hex = "0.4.2" log = "0.4" parking_lot = "0.12.1" thiserror = "1.0" diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index cdb7fca10320d..41eeec43d64bd 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -69,9 +69,10 @@ pub(crate) mod beefy_protocol_name { genesis_hash: &Hash, chain_spec: &Box, ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), - None => format!("/{}", hex::encode(genesis_hash)), + Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), + None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), }; format!("{}{}", chain_prefix, NAME).into() } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index e0058e5238d6a..26c85592ecb85 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -99,7 +99,7 @@ fn beefy_protocol_name() { // Create protocol name using random genesis hash. let genesis_hash = H256::random(); - let expected = format!("/{}/beefy/1", hex::encode(genesis_hash)); + let expected = format!("/{}/beefy/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); assert_eq!(proto_name.to_string(), expected); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 50d110d40eabd..e5cd6167596c0 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" chrono = "0.4.10" clap = { version = "3.1.18", features = ["derive"] } fdlimit = "0.2.1" futures = "0.3.21" -hex = "0.4.2" libp2p = "0.46.1" log = "0.4.17" names = { version = "0.13.0", default-features = false } diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index 6b2f12531458c..3afd99d60a123 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -57,7 +57,7 @@ impl GenerateNodeKeyCmd { let file_data = if self.bin { secret.as_ref().to_owned() } else { - hex::encode(secret.as_ref()).into_bytes() + array_bytes::bytes2hex("", secret.as_ref()).into_bytes() }; match &self.file { @@ -85,6 +85,6 @@ mod tests { assert!(generate.run().is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); - assert!(hex::decode(buf).is_ok()); + assert!(array_bytes::hex2bytes(&buf).is_ok()); } } diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index 14bb059503df9..59f105dc52a5c 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -127,7 +127,7 @@ fn expect_public_from_phrase( ) -> Result<(), Error> { let secret_uri = SecretUri::from_str(suri).map_err(|e| format!("{:?}", e))?; let expected_public = if let Some(public) = expect_public.strip_prefix("0x") { - let hex_public = hex::decode(&public) + let hex_public = array_bytes::hex2bytes(public) .map_err(|_| format!("Invalid expected public key hex: `{}`", expect_public))?; Pair::Public::try_from(&hex_public) .map_err(|_| format!("Invalid expected public key: `{}`", expect_public))? @@ -208,7 +208,7 @@ mod tests { .expect("Valid") .0 .public(); - let valid_public_hex = format!("0x{}", hex::encode(valid_public.as_slice())); + let valid_public_hex = array_bytes::bytes2hex("0x", valid_public.as_slice()); let valid_accountid = format!("{}", valid_public.into_account()); // It should fail with the invalid public key @@ -226,7 +226,7 @@ mod tests { .0 .public(); let valid_public_hex_with_password = - format!("0x{}", hex::encode(&valid_public_with_password.as_slice())); + array_bytes::bytes2hex("0x", valid_public_with_password.as_slice()); let valid_accountid_with_password = format!("{}", &valid_public_with_password.into_account()); @@ -248,7 +248,7 @@ mod tests { .0 .public(); let valid_public_hex_with_password_and_derivation = - format!("0x{}", hex::encode(&valid_public_with_password_and_derivation.as_slice())); + array_bytes::bytes2hex("0x", valid_public_with_password_and_derivation.as_slice()); // They should still be valid, because we check the base secret key. check_cmd(&seed_with_password_and_derivation, &valid_public_hex_with_password, true); diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index e1617c1d085df..a9dff9c760d33 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -66,7 +66,8 @@ impl InspectNodeKeyCmd { if !self.bin { // With hex input, give to the user a bit of tolerance about whitespaces let keyhex = String::from_utf8_lossy(&file_data); - file_data = hex::decode(keyhex.trim()).map_err(|_| "failed to decode secret as hex")?; + file_data = array_bytes::hex2bytes(keyhex.trim()) + .map_err(|_| "failed to decode secret as hex")?; } let secret = diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index e0a5fce353ef4..7d93390326717 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -70,7 +70,7 @@ fn sign( message: Vec, ) -> error::Result { let pair = utils::pair_from_suri::

(suri, password)?; - Ok(hex::encode(pair.sign(&message))) + Ok(array_bytes::bytes2hex("", pair.sign(&message).as_ref())) } #[cfg(test)] diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 95849065471b4..1ce2b23221691 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -203,7 +203,7 @@ where Pair: sp_core::Pair, Pair::Public: Into, { - let public = decode_hex(public_str)?; + let public = array_bytes::hex2bytes(public_str)?; let public_key = Pair::Public::try_from(&public) .map_err(|_| "Failed to construct public key from given hex")?; @@ -273,26 +273,17 @@ where format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) } -/// helper method for decoding hex -pub fn decode_hex>(message: T) -> Result, Error> { - let mut message = message.as_ref(); - if message[..2] == [b'0', b'x'] { - message = &message[2..] - } - Ok(hex::decode(message)?) -} - /// checks if message is Some, otherwise reads message from stdin and optionally decodes hex pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result, Error> { let mut message = vec![]; match msg { Some(m) => { - message = decode_hex(m)?; + message = array_bytes::hex2bytes(m.as_str())?; }, None => { std::io::stdin().lock().read_to_end(&mut message)?; if should_decode { - message = decode_hex(&message)?; + message = array_bytes::hex2bytes(array_bytes::hex_bytes2hex_str(&message)?)?; } }, } diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index 6a1bf77f6c8b0..289dc1705c3a3 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -178,7 +178,7 @@ mod tests { #[test] fn test_generation_with_single_char() { let seed = generate_key::("ab", default_ss58_version()).unwrap(); - assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + assert!(sr25519::Pair::from_seed_slice(&array_bytes::hex2bytes_unchecked(&seed)) .unwrap() .public() .to_ss58check() @@ -190,7 +190,7 @@ mod tests { let seed = generate_key::("ab", Ss58AddressFormatRegistry::PolkadotAccount.into()) .unwrap(); - assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + assert!(sr25519::Pair::from_seed_slice(&array_bytes::hex2bytes_unchecked(&seed)) .unwrap() .public() .to_ss58check_with_version(Ss58AddressFormatRegistry::PolkadotAccount.into()) diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index b004a948a7a48..6389cdbde2c17 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -55,7 +55,7 @@ impl VerifyCmd { /// Run the command pub fn run(&self) -> error::Result<()> { let message = utils::read_message(self.message.as_ref(), self.hex)?; - let sig_data = utils::decode_hex(&self.sig)?; + let sig_data = array_bytes::hex2bytes(&self.sig)?; let uri = utils::read_uri(self.uri.as_ref())?; let uri = if let Some(uri) = uri.strip_prefix("0x") { uri } else { &uri }; @@ -71,7 +71,7 @@ where let signature = Pair::Signature::try_from(&sig_data).map_err(|_| error::Error::SignatureFormatInvalid)?; - let pubkey = if let Ok(pubkey_vec) = hex::decode(uri) { + let pubkey = if let Ok(pubkey_vec) = array_bytes::hex2bytes(uri) { Pair::Public::from_slice(pubkey_vec.as_slice()) .map_err(|_| error::Error::KeyFormatInvalid)? } else { diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index f38a95e0115f1..a0f843e73bf53 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -69,8 +69,8 @@ pub enum Error { #[error("Key storage issue encountered")] KeyStorage(#[from] sc_keystore::Error), - #[error("Invalid hexadecimal string data")] - HexDataConversion(#[from] hex::FromHexError), + #[error("Invalid hexadecimal string data, {0:?}")] + HexDataConversion(array_bytes::Error), /// Application specific error chain sequence forwarder. #[error(transparent)] @@ -97,3 +97,9 @@ impl From for Error { Error::InvalidUri(e) } } + +impl From for Error { + fn from(e: array_bytes::Error) -> Error { + Error::HexDataConversion(e) + } +} diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index d51b6143ed393..00ce9e8027aab 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -176,7 +176,7 @@ mod tests { let file = tmp.path().join("mysecret").to_path_buf(); let key = ed25519::SecretKey::generate(); - fs::write(&file, hex::encode(key.as_ref())).expect("Writes secret key"); + fs::write(&file, array_bytes::bytes2hex("", key.as_ref())).expect("Writes secret key"); check_key(file.clone(), &key); fs::write(&file, &key).expect("Writes secret key"); diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 2fe7e822b36c3..264db0e89b1c8 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -37,8 +37,8 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } sp-wasm-interface = { version = "6.0.0", path = "../../primitives/wasm-interface" } [dev-dependencies] +array-bytes = "4.1" wat = "1.0" -hex-literal = "0.3.4" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } sp-state-machine = { version = "0.12.0", path = "../../primitives/state-machine" } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 25280f856f2a5..9ffb7f502f5c6 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -21,7 +21,6 @@ mod linux; mod sandbox; use codec::{Decode, Encode}; -use hex_literal::hex; use sc_executor_common::{error::Error, runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ @@ -391,16 +390,18 @@ fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_sha2_256", &[0], wasm_method, &mut ext,).unwrap(), - hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") - .to_vec() - .encode(), + array_bytes::hex2bytes_unchecked( + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ) + .encode(), ); assert_eq!( call_in_wasm("test_sha2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") - .to_vec() - .encode(), + array_bytes::hex2bytes_unchecked( + "c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a" + ) + .encode(), ); } @@ -410,16 +411,18 @@ fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_twox_256", &[0], wasm_method, &mut ext,).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") - .to_vec() - .encode(), + array_bytes::hex2bytes_unchecked( + "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" + ) + .encode(), ); assert_eq!( call_in_wasm("test_twox_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") - .to_vec() - .encode(), + array_bytes::hex2bytes_unchecked( + "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" + ) + .encode(), ); } @@ -429,12 +432,12 @@ fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); assert_eq!( call_in_wasm("test_twox_128", &[0], wasm_method, &mut ext,).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), + array_bytes::hex2bytes_unchecked("99e9d85137db46ef4bbea33613baafd5").encode(), ); assert_eq!( call_in_wasm("test_twox_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) .unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), + array_bytes::hex2bytes_unchecked("b27dfd7f223f177f2a13647b533599af").encode(), ); } @@ -704,7 +707,7 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { &[0], ) .unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), + array_bytes::hex2bytes_unchecked("99e9d85137db46ef4bbea33613baafd5").encode() ); }) }) diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 5950b108ca4ab..83c6051946aff 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ahash = "0.7.6" +array-bytes = "4.1" async-trait = "0.1.57" dyn-clone = "1.0" finality-grandpa = { version = "0.16.0", features = ["derive-codec"] } futures = "0.3.21" futures-timer = "3.0.1" -hex = "0.4.2" log = "0.4.17" parity-scale-codec = { version = "3.0.0", features = ["derive"] } parking_lot = "0.12.1" diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index d211c10ae62be..12cb2601f4c26 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -83,9 +83,10 @@ pub mod grandpa_protocol_name { genesis_hash: &Hash, chain_spec: &Box, ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", hex::encode(genesis_hash), fork_id), - None => format!("/{}", hex::encode(genesis_hash)), + Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), + None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), }; format!("{}{}", chain_prefix, NAME).into() } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 1f607e8d68c02..b73f6cdecdd4f 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -646,7 +646,7 @@ fn grandpa_protocol_name() { // Create protocol name using random genesis hash. let genesis_hash = sp_core::H256::random(); - let expected = format!("/{}/grandpa/1", hex::encode(genesis_hash)); + let expected = format!("/{}/grandpa/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); let proto_name = grandpa_protocol_name::standard_name(&genesis_hash, &chain_spec); assert_eq!(proto_name.to_string(), expected); diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 9b7afba759c60..ff963f9d446f6 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -14,8 +14,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" async-trait = "0.1.57" -hex = "0.4.0" parking_lot = "0.12.1" serde_json = "1.0.85" thiserror = "1.0" diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 19be6715ffe3f..54ff6a5b164a8 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -512,8 +512,8 @@ impl KeystoreInner { /// Returns `None` if the keystore only exists in-memory and there isn't any path to provide. fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { let mut buf = self.path.as_ref()?.clone(); - let key_type = hex::encode(key_type.0); - let key = hex::encode(public); + let key_type = array_bytes::bytes2hex("", &key_type.0); + let key = array_bytes::bytes2hex("", public); buf.push(key_type + key.as_str()); Some(buf) } @@ -534,7 +534,7 @@ impl KeystoreInner { // skip directories and non-unicode file names (hex is unicode) if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - match hex::decode(name) { + match array_bytes::hex2bytes(name) { Ok(ref hex) if hex.len() > 4 => { if hex[0..4] != id.0 { continue @@ -739,7 +739,7 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); + let file_name = temp_dir.path().join(array_bytes::bytes2hex("", &SR25519.0[..2])); fs::write(file_name, "test").expect("Invalid file is written"); assert!(SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty()); diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index ae115220d6843..e96749df40aa2 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -14,6 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" async-trait = "0.1" asynchronous-codec = "0.6" bitflags = "1.3.2" @@ -24,7 +25,6 @@ either = "1.5.3" fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" -hex = "0.4.0" ip_network = "0.4.1" libp2p = "0.46.1" linked_hash_set = "0.1.3" diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index c1a0fb4759320..c2a77c3b577ba 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +array-bytes = "4.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -hex = "0.4.0" libp2p = "0.46.1" log = "0.4.16" prost = "0.10" diff --git a/client/network/light/src/light_client_requests.rs b/client/network/light/src/light_client_requests.rs index b58426cf15992..61b549d0f0984 100644 --- a/client/network/light/src/light_client_requests.rs +++ b/client/network/light/src/light_client_requests.rs @@ -27,10 +27,11 @@ use std::time::Duration; /// Generate the light client protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { - format!("/{}/{}/light/2", hex::encode(genesis_hash), fork_id) + format!("/{}/{}/light/2", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { - format!("/{}/light/2", hex::encode(genesis_hash)) + format!("/{}/light/2", array_bytes::bytes2hex("", genesis_hash)) } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index d7ca8b48a7c88..202a628884d79 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -585,7 +585,7 @@ impl NodeKeyConfig { f, |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { if s.len() == 64 { - hex::decode(&s).ok() + array_bytes::hex2bytes(&s).ok() } else { None } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 17187f2d63c1c..c7a3cf4b2160f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -369,10 +369,15 @@ where let block_announces_protocol = { let genesis_hash = chain.hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); + let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { - format!("/{}/{}/block-announces/1", hex::encode(genesis_hash), fork_id) + format!( + "/{}/{}/block-announces/1", + array_bytes::bytes2hex("", genesis_hash), + fork_id + ) } else { - format!("/{}/block-announces/1", hex::encode(genesis_hash)) + format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) } }; diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 1cf532f33ddc6..da4547aefeab3 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -143,10 +143,11 @@ impl TransactionsHandlerPrototype { genesis_hash: Hash, fork_id: Option, ) -> Self { + let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { - format!("/{}/{}/transactions/1", hex::encode(genesis_hash), fork_id) + format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { - format!("/{}/transactions/1", hex::encode(genesis_hash)) + format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash)) }; let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 7c8f8adafd214..269214aeff3f7 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -17,11 +17,11 @@ targets = ["x86_64-unknown-linux-gnu"] prost-build = "0.10" [dependencies] +array-bytes = "4.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -hex = "0.4.0" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index cc61be2b57256..a6b39591e7945 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -80,10 +80,11 @@ pub fn generate_protocol_config>( /// Generate the block protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { - format!("/{}/{}/sync/2", hex::encode(genesis_hash), fork_id) + format!("/{}/{}/sync/2", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { - format!("/{}/sync/2", hex::encode(genesis_hash)) + format!("/{}/sync/2", array_bytes::bytes2hex("", genesis_hash)) } } diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 6cf6482a44f8b..abbbcad2e378f 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -69,10 +69,11 @@ pub fn generate_protocol_config>( /// Generate the state protocol name from the genesis hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { - format!("/{}/{}/state/2", hex::encode(genesis_hash), fork_id) + format!("/{}/{}/state/2", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { - format!("/{}/state/2", hex::encode(genesis_hash)) + format!("/{}/state/2", array_bytes::bytes2hex("", genesis_hash)) } } diff --git a/client/network/sync/src/warp_request_handler.rs b/client/network/sync/src/warp_request_handler.rs index 394bc68449099..e675bf45cad91 100644 --- a/client/network/sync/src/warp_request_handler.rs +++ b/client/network/sync/src/warp_request_handler.rs @@ -54,10 +54,11 @@ pub fn generate_request_response_config>( /// Generate the grandpa warp sync protocol name from the genesi hash and fork id. fn generate_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> String { + let genesis_hash = genesis_hash.as_ref(); if let Some(fork_id) = fork_id { - format!("/{}/{}/sync/warp", hex::encode(genesis_hash), fork_id) + format!("/{}/{}/sync/warp", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { - format!("/{}/sync/warp", hex::encode(genesis_hash)) + format!("/{}/sync/warp", array_bytes::bytes2hex("", genesis_hash)) } } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index ff97f29961155..5ebb21406efbf 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -13,12 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" bytes = "1.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" -hex = "0.4" hyper = { version = "0.14.16", features = ["stream", "http2"] } hyper-rustls = { version = "0.23.0", features = ["http2"] } libp2p = { version = "0.46.1", default-features = false } diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 6d6c52c989c34..7d3dd8302f343 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -79,8 +79,8 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?hex::encode(key), - value = ?hex::encode(value), + key = ?array_bytes::bytes2hex("", key), + value = ?array_bytes::bytes2hex("", value), "Write", ); match kind { @@ -93,7 +93,7 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?hex::encode(key), + key = ?array_bytes::bytes2hex("", key), "Clear", ); match kind { @@ -112,9 +112,9 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?hex::encode(key), - new_value = ?hex::encode(new_value), - old_value = ?old_value.as_ref().map(hex::encode), + key = ?array_bytes::bytes2hex("", key), + new_value = ?array_bytes::bytes2hex("", new_value), + old_value = ?old_value.as_ref().map(|s| array_bytes::bytes2hex("", s)), "CAS", ); match kind { @@ -132,8 +132,8 @@ impl offchain::DbExternalities for Db { tracing::debug!( target: "offchain-worker::storage", ?kind, - key = ?hex::encode(key), - result = ?result.as_ref().map(hex::encode), + key = ?array_bytes::bytes2hex("", key), + result = ?result.as_ref().map(|s| array_bytes::bytes2hex("", s)), "Read", ); result diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 92df5381c202b..1f934a6e5355f 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -12,10 +12,9 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" fdlimit = "0.2.1" futures = "0.3.21" -hex = "0.4" -hex-literal = "0.3.4" log = "0.4.17" parity-scale-codec = "3.0.0" parking_lot = "0.12.1" diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index f02b1321d2922..cb648664b4006 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -17,7 +17,6 @@ // along with this program. If not, see . use futures::executor::block_on; -use hex_literal::hex; use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ @@ -155,7 +154,9 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec = client @@ -1604,25 +1605,29 @@ fn storage_keys_iter_prefix_and_start_key_works() { .collect(); assert_eq!( res, - [child_root.clone(), hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec(),] + [ + child_root.clone(), + array_bytes::hex2bytes_unchecked("3a636f6465"), + array_bytes::hex2bytes_unchecked("3a686561707061676573"), + ] ); let res: Vec<_> = client .storage_keys_iter( &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(hex!("3a636f6465").to_vec())), + Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) .unwrap() .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); + assert_eq!(res, [array_bytes::hex2bytes_unchecked("3a686561707061676573")]); let res: Vec<_> = client .storage_keys_iter( &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(hex!("3a686561707061676573").to_vec())), + Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))), ) .unwrap() .map(|x| x.0) @@ -1653,13 +1658,13 @@ fn storage_keys_iter_prefix_and_start_key_works() { fn storage_keys_iter_works() { let client = substrate_test_runtime_client::new(); - let prefix = StorageKey(hex!("").to_vec()); + let prefix = StorageKey(array_bytes::hex2bytes_unchecked("")); let res: Vec<_> = client .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .take(8) - .map(|x| hex::encode(&x.0)) + .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); assert_eq!( res, @@ -1679,11 +1684,11 @@ fn storage_keys_iter_works() { .storage_keys_iter( &BlockId::Number(0), Some(&prefix), - Some(&StorageKey(hex!("3a636f6465").to_vec())), + Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) .unwrap() .take(7) - .map(|x| hex::encode(&x.0)) + .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); assert_eq!( res, @@ -1702,13 +1707,13 @@ fn storage_keys_iter_works() { .storage_keys_iter( &BlockId::Number(0), Some(&prefix), - Some(&StorageKey( - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - )), + Some(&StorageKey(array_bytes::hex2bytes_unchecked( + "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", + ))), ) .unwrap() .take(5) - .map(|x| hex::encode(x.0)) + .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); assert_eq!( res, diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 9aa05694b6619..cd7cb297e8c4a 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -34,9 +34,9 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } [dev-dependencies] +array-bytes = "4.1" assert_matches = "1.3.0" criterion = "0.3" -hex = "0.4" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 17c2cfa8a1e06..f04a27cf81e1d 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -880,7 +880,7 @@ fn should_not_accept_old_signatures() { // generated with schnorrkel 0.1.1 from `_bytes` let old_singature = sp_core::sr25519::Signature::try_from( - &hex::decode( + &array_bytes::hex2bytes( "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426\ cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108", ) diff --git a/frame/alliance/Cargo.toml b/frame/alliance/Cargo.toml index 24e047091009b..399822a2215f5 100644 --- a/frame/alliance/Cargo.toml +++ b/frame/alliance/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex = { version = "0.4", default-features = false, features = ["alloc"], optional = true } +array-bytes = { version = "4.1", optional = true } sha2 = { version = "0.10.1", default-features = false, optional = true } log = { version = "0.4.14", default-features = false } @@ -33,7 +33,7 @@ pallet-identity = { version = "4.0.0-dev", path = "../identity", default-feature pallet-collective = { version = "4.0.0-dev", path = "../collective", default-features = false, optional = true } [dev-dependencies] -hex-literal = "0.3.1" +array-bytes = "4.1" sha2 = "0.10.1" pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-collective = { version = "4.0.0-dev", path = "../collective" } @@ -55,7 +55,7 @@ std = [ "pallet-identity/std", ] runtime-benchmarks = [ - "hex", + "array-bytes", "sha2", "frame-benchmarking/runtime-benchmarks", "sp-runtime/runtime-benchmarks", diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml index e8366943c85b0..62fabd387a167 100644 --- a/frame/beefy-mmr/Cargo.toml +++ b/frame/beefy-mmr/Cargo.toml @@ -9,8 +9,8 @@ repository = "https://github.com/paritytech/substrate" homepage = "https://substrate.io" [dependencies] +array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex = { version = "0.4", optional = true } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } @@ -27,18 +27,18 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -hex-literal = "0.3" +array-bytes = "4.1" sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } [features] default = ["std"] std = [ + "array-bytes", "beefy-merkle-tree/std", "beefy-primitives/std", "codec/std", "frame-support/std", "frame-system/std", - "hex", "log/std", "pallet-beefy/std", "pallet-mmr/std", diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index f30a418def042..1aa2573c7f680 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -9,7 +9,7 @@ description = "A no-std/Substrate compatible library to construct binary merkle homepage = "https://substrate.io" [dependencies] -hex = { version = "0.4", default-features = false, optional = true } +array-bytes = { version = "4.1", optional = true } log = { version = "0.4", default-features = false, optional = true } tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } @@ -17,12 +17,11 @@ beefy-primitives = { version = "4.0.0-dev", default-features = false, path = ".. sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } [dev-dependencies] +array-bytes = "4.1" env_logger = "0.9" -hex = "0.4" -hex-literal = "0.3" [features] -debug = ["hex", "hex/std", "log"] +debug = ["array-bytes", "log"] default = ["debug", "keccak", "std"] keccak = ["tiny-keccak"] std = [ diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 664fd18199dd0..38831d7914715 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -235,7 +235,11 @@ where #[cfg(feature = "debug")] log::debug!( "[merkle_proof] Proof: {:?}", - collect_proof.proof.iter().map(hex::encode).collect::>() + collect_proof + .proof + .iter() + .map(|s| array_bytes::bytes2hex("", s)) + .collect::>() ); MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } @@ -308,10 +312,10 @@ where #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - hex::encode(a), - hex::encode(b), - hex::encode(hash), - hex::encode(combined) + array_bytes::bytes2hex("", &a), + array_bytes::bytes2hex("", &b), + array_bytes::bytes2hex("", &hash), + array_bytes::bytes2hex("", &combined) ); position /= 2; width = ((width - 1) / 2) + 1; @@ -348,7 +352,11 @@ where visitor.visit(index, &a, &b); #[cfg(feature = "debug")] - log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); + log::debug!( + " {:?}\n {:?}", + a.as_ref().map(|s| array_bytes::bytes2hex("", s)), + b.as_ref().map(|s| array_bytes::bytes2hex("", s)) + ); index += 2; match (a, b) { @@ -369,7 +377,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(hex::encode).collect::>() + next.iter().map(|s| array_bytes::bytes2hex("", s)).collect::>() ); return Err(next) }, @@ -395,7 +403,6 @@ sp_api::decl_runtime_apis! { #[cfg(test)] mod tests { use super::*; - use hex_literal::hex; #[test] fn should_generate_empty_root() { @@ -408,7 +415,7 @@ mod tests { // then assert_eq!( - hex::encode(&out), + array_bytes::bytes2hex("", &out), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -417,14 +424,16 @@ mod tests { fn should_generate_single_root() { // given let _ = env_logger::try_init(); - let data = vec![hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b")]; + let data = vec![array_bytes::hex2array_unchecked::<20>( + "E04CC55ebEE1cBCE552f250e85c57B70B2E2625b", + )]; // when let out = merkle_root::(data); // then assert_eq!( - hex::encode(&out), + array_bytes::bytes2hex("", &out), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -434,8 +443,8 @@ mod tests { // given let _ = env_logger::try_init(); let data = vec![ - hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), - hex!("25451A4de12dcCc2D166922fA938E900fCc4ED24"), + array_bytes::hex2array_unchecked::<20>("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), + array_bytes::hex2array_unchecked::<20>("25451A4de12dcCc2D166922fA938E900fCc4ED24"), ]; // when @@ -443,7 +452,7 @@ mod tests { // then assert_eq!( - hex::encode(&out), + array_bytes::bytes2hex("", &out), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -452,7 +461,7 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!(hex::encode(&merkle_root::(data)), root); + assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); }; test( @@ -511,11 +520,19 @@ mod tests { )); // then - assert_eq!(hex::encode(proof0.root), hex::encode(proof1.root)); - assert_eq!(hex::encode(proof2.root), hex::encode(proof1.root)); + assert_eq!( + array_bytes::bytes2hex("", &proof0.root), + array_bytes::bytes2hex("", &proof1.root) + ); + assert_eq!( + array_bytes::bytes2hex("", &proof2.root), + array_bytes::bytes2hex("", &proof1.root) + ); assert!(!verify_proof::( - &hex!("fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239"), + &array_bytes::hex2array_unchecked( + "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" + ), proof0.proof, data.len(), proof0.leaf_index, @@ -779,17 +796,19 @@ mod tests { "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; - let root = hex!("72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53"); + let root = array_bytes::hex2array_unchecked( + "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", + ); let data = addresses .into_iter() - .map(|address| hex::decode(&address[2..]).unwrap()) + .map(|address| array_bytes::hex2bytes_unchecked(&address)) .collect::>(); for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!(hex::encode(&proof.root), hex::encode(&root)); + assert_eq!(array_bytes::bytes2hex("", &proof.root), array_bytes::bytes2hex("", &root)); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -810,14 +829,25 @@ mod tests { MerkleProof { root, proof: vec![ - hex!("340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f"), - hex!("ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f"), - hex!("d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79"), - hex!("ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e"), + array_bytes::hex2array_unchecked( + "340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f" + ), + array_bytes::hex2array_unchecked( + "ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f" + ), + array_bytes::hex2array_unchecked( + "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" + ), + array_bytes::hex2array_unchecked( + "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" + ), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, - leaf: hex!("c26B34D375533fFc4c5276282Fa5D660F3d8cbcB").to_vec(), + leaf: array_bytes::hex2array_unchecked::<20>( + "c26B34D375533fFc4c5276282Fa5D660F3d8cbcB" + ) + .to_vec(), } ); } diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index eaa50004ae848..b9851d9eef6cb 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -22,7 +22,6 @@ use beefy_primitives::{ ValidatorSet, }; use codec::{Decode, Encode}; -use hex_literal::hex; use sp_core::H256; use sp_io::TestExternalities; @@ -70,9 +69,9 @@ fn should_contain_mmr_digest() { beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot( - hex!("95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc").into() - )) + beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( + "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" + ))) ] ); @@ -85,15 +84,15 @@ fn should_contain_mmr_digest() { beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(1), mock_beefy_id(2)], 1).unwrap() )), - beefy_log(ConsensusLog::MmrRoot( - hex!("95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc").into() - )), + beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( + "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" + ))), beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new(vec![mock_beefy_id(3), mock_beefy_id(4)], 2).unwrap() )), - beefy_log(ConsensusLog::MmrRoot( - hex!("a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2").into() - )), + beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( + "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" + ))), ] ); }); @@ -120,11 +119,13 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 2, len: 2, - root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") - .into(), + root: array_bytes::hex_n_into_unchecked( + "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" + ) }, - leaf_extra: hex!("55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648") - .to_vec(), + leaf_extra: array_bytes::hex2bytes_unchecked( + "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" + ) } ); @@ -143,11 +144,13 @@ fn should_contain_valid_leaf_data() { beefy_next_authority_set: BeefyNextAuthoritySet { id: 3, len: 2, - root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") - .into(), + root: array_bytes::hex_n_into_unchecked( + "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5" + ) }, - leaf_extra: hex!("55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648") - .to_vec() + leaf_extra: array_bytes::hex2bytes_unchecked( + "55b8e9e1cc9f0db7776fac0ca66318ef8acfb8ec26db11e373120583e07ee648" + ) } ); } @@ -161,8 +164,9 @@ fn should_update_authorities() { // check current authority set assert_eq!(0, auth_set.id); assert_eq!(2, auth_set.len); - let want: H256 = - hex!("176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96").into(); + let want = array_bytes::hex_n_into_unchecked::( + "176e73f1bf656478b728e28dd1a7733c98621b8acf830bff585949763dca7a96", + ); assert_eq!(want, auth_set.root); // next authority set should have same validators but different id @@ -180,8 +184,9 @@ fn should_update_authorities() { assert_eq!(1, auth_set.id); // check next auth set assert_eq!(2, next_auth_set.id); - let want: H256 = - hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); + let want = array_bytes::hex_n_into_unchecked::( + "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", + ); assert_eq!(2, next_auth_set.len); assert_eq!(want, next_auth_set.root); @@ -195,8 +200,9 @@ fn should_update_authorities() { assert_eq!(2, auth_set.id); // check next auth set assert_eq!(3, next_auth_set.id); - let want: H256 = - hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5").into(); + let want = array_bytes::hex_n_into_unchecked::( + "9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5", + ); assert_eq!(2, next_auth_set.len); assert_eq!(want, next_auth_set.root); }); diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 49603915ca6da..61aa2b9b900c6 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -31,7 +31,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives sp-storage = { version = "6.0.0", default-features = false, path = "../../primitives/storage" } [dev-dependencies] -hex-literal = "0.3.4" +array-bytes = "4.1" rusty-fork = { version = "0.3.0", default-features = false } sp-keystore = { version = "0.12.0", path = "../../primitives/keystore" } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 4401fb059f90b..a221eccb82c85 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -1695,13 +1695,13 @@ pub fn show_benchmark_debug_info( /// use frame_benchmarking::TrackedStorageKey; /// let whitelist: Vec = vec![ /// // Block Number -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), +/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac"), /// // Total Issuance -/// hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), +/// array_bytes::hex_into_unchecked("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80"), /// // Execution Phase -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), +/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a"), /// // Event Count -/// hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), +/// array_bytes::hex_into_unchecked("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850"), /// ]; /// ``` /// diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index e9a90504d7db7..30fbad680ebe5 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -45,9 +45,9 @@ sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../p sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] +array-bytes = "4.1" assert_matches = "1" env_logger = "0.9" -hex-literal = "0.3" pretty_assertions = "1" wat = "1" diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 2b5cf6353256c..38b47a915cddc 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1379,7 +1379,6 @@ mod tests { use codec::{Decode, Encode}; use frame_support::{assert_err, assert_ok, parameter_types}; use frame_system::{EventRecord, Phase}; - use hex_literal::hex; use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; use sp_core::Bytes; @@ -3210,13 +3209,12 @@ mod tests { #[test] fn ecdsa_to_eth_address_returns_proper_value() { let bob_ch = MockLoader::insert(Call, |ctx, _| { - let pubkey_compressed: [u8; 33] = - hex!("028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91")[..] - .try_into() - .unwrap(); + let pubkey_compressed = array_bytes::hex2array_unchecked( + "028db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd91", + ); assert_eq!( ctx.ext.ecdsa_to_eth_address(&pubkey_compressed).unwrap(), - hex!("09231da7b19A016f9e576d23B16277062F4d46A8")[..] + array_bytes::hex2array_unchecked::<20>("09231da7b19A016f9e576d23B16277062F4d46A8") ); exec_success() }); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 085bc5a8c6423..126a37e9401ec 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -275,7 +275,6 @@ mod tests { }; use assert_matches::assert_matches; use frame_support::{assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight}; - use hex_literal::hex; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; use sp_core::{Bytes, H256}; @@ -1828,10 +1827,9 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes( - hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F") - .to_vec() - ), + data: array_bytes::hex_into_unchecked( + "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" + ) }, ); } @@ -1901,7 +1899,9 @@ mod tests { flags: ReturnFlags::empty(), data: Bytes( ( - hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), + array_bytes::hex2array_unchecked::<32>( + "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" + ), 42u64, ) .encode() @@ -2110,7 +2110,7 @@ mod tests { fn seal_return_with_success_status() { let output = execute( CODE_RETURN_WITH_DATA, - hex!("00000000445566778899").to_vec(), + array_bytes::hex2bytes_unchecked("00000000445566778899"), MockExt::default(), ) .unwrap(); @@ -2119,7 +2119,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes(hex!("445566778899").to_vec()), + data: Bytes(array_bytes::hex2bytes_unchecked("445566778899")), } ); assert!(!output.did_revert()); @@ -2127,15 +2127,18 @@ mod tests { #[test] fn return_with_revert_status() { - let output = - execute(CODE_RETURN_WITH_DATA, hex!("010000005566778899").to_vec(), MockExt::default()) - .unwrap(); + let output = execute( + CODE_RETURN_WITH_DATA, + array_bytes::hex2bytes_unchecked("010000005566778899"), + MockExt::default(), + ) + .unwrap(); assert_eq!( output, ExecReturnValue { flags: ReturnFlags::REVERT, - data: Bytes(hex!("5566778899").to_vec()), + data: Bytes(array_bytes::hex2bytes_unchecked("5566778899")), } ); assert!(output.did_revert()); diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 1ae22ddbb0260..cd5c793ee2ee0 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -27,7 +27,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives sp-tracing = { version = "5.0.0", default-features = false, path = "../../primitives/tracing" } [dev-dependencies] -hex-literal = "0.3.4" +array-bytes = "4.1" pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../transaction-payment" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 4926384e42018..01b71aab745d5 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -602,8 +602,6 @@ where mod tests { use super::*; - use hex_literal::hex; - use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, @@ -947,11 +945,15 @@ mod tests { fn block_import_works() { block_import_works_inner( new_test_ext_v0(1), - hex!("1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5").into(), + array_bytes::hex_n_into_unchecked( + "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5", + ), ); block_import_works_inner( new_test_ext(1), - hex!("75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5").into(), + array_bytes::hex_n_into_unchecked( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + ), ); } fn block_import_works_inner(mut ext: sp_io::TestExternalities, state_root: H256) { @@ -961,10 +963,9 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root, - extrinsics_root: hex!( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - ) - .into(), + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), digest: Digest { logs: vec![] }, }, extrinsics: vec![], @@ -981,10 +982,9 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: [0u8; 32].into(), - extrinsics_root: hex!( - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - ) - .into(), + extrinsics_root: array_bytes::hex_n_into_unchecked( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314", + ), digest: Digest { logs: vec![] }, }, extrinsics: vec![], @@ -1000,10 +1000,9 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5" - ) - .into(), + state_root: array_bytes::hex_n_into_unchecked( + "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + ), extrinsics_root: [0u8; 32].into(), digest: Digest { logs: vec![] }, }, diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 43d7d6ba2eed7..91ec19d731094 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -25,8 +25,8 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primit sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] +array-bytes = "4.1" env_logger = "0.9" -hex-literal = "0.3" itertools = "0.10.3" [features] diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index 61b71b000a616..a6e177af1853d 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -13,8 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = { version = "4.1", optional = true } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -hex-literal = { version = "0.3.4", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -34,7 +34,7 @@ sp-transaction-storage-proof = { version = "4.0.0-dev", default-features = true, [features] default = ["std"] -runtime-benchmarks = ["frame-benchmarking/runtime-benchmarks", "hex-literal"] +runtime-benchmarks = ["array-bytes", "frame-benchmarking/runtime-benchmarks"] std = [ "log/std", "frame-benchmarking?/std", diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index cab4c92317bb5..c7fbd00fb565d 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -40,67 +40,68 @@ use crate::Pallet as TransactionStorage; // build_proof(hash.as_slice(), transactions).unwrap().encode() // ``` // while hardforcing target chunk key in `build_proof` to [22, 21, 1, 0]. -const PROOF: &[u8] = &hex_literal::hex!( - " - 0104000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000000 - 0000000000000000000000000000000000000000000000000000000000000000000000000000 - 00000000000000000000000000000000000000000000000000000000000014cd0780ffff8030 - 2eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba0080302eb0a6d2 - f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15 - f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1 - 004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e304 - 8cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697 - eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a - 30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302e - b0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b - 834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e7 - 29d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c10046 - 57e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf2 - 06d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb1 - 53f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba - bd058077778010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de - 808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f - 19117affa96e077905fe48a99723a065969c638593b7d9ab57b538438010fd81bc1359802f0b - 871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bc - cee5e71d5cf6b1faff338ad7120b0256c283008010fd81bc1359802f0b871aeb95e4410a8ec9 - 2b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff - 338ad7120b0256c28380221ce17f19117affa96e077905fe48a99723a065969c638593b7d9ab - 57b538438010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de80 - 8da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f19 - 117affa96e077905fe48a99723a065969c638593b7d9ab57b53843cd0780ffff804509f59593 - fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c00804509f59593fd47b1a9 - 7189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba6 - 5a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0 - 346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f983 - 6e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf89 - 1a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c8045 - 09f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd - 47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189 - 127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a56 - 49cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb03466 - 37f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e15 - 5eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a93 - 9c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939ccd0780ff - ff8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e - 776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea - 05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f - 015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d - 06feafa3610fc44a5b2ef543cb81008078916e776c64ccea05e958559f015c082d9d06feafa3 - 610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b - 2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb81 - 8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e77 - 6c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05 - e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f01 - 5c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06 - feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610f - c44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef5 - 43cb811044010000 - " -); +const PROOF: &str = "\ + 0104000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000000000000000\ + 00000000000000000000000000000000000000000000000000000000000014cd0780ffff8030\ + 2eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba0080302eb0a6d2\ + f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15\ + f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1\ + 004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e304\ + 8cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697\ + eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a\ + 30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302e\ + b0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b\ + 834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e7\ + 29d1c1004657e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c10046\ + 57e3048cf206d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf2\ + 06d697eeb153f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb1\ + 53f61a30ba80302eb0a6d2f63b834d15f1e729d1c1004657e3048cf206d697eeb153f61a30ba\ + bd058077778010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de\ + 808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f\ + 19117affa96e077905fe48a99723a065969c638593b7d9ab57b538438010fd81bc1359802f0b\ + 871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bc\ + cee5e71d5cf6b1faff338ad7120b0256c283008010fd81bc1359802f0b871aeb95e4410a8ec9\ + 2b93af10ea767a2027cf4734e8de808da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff\ + 338ad7120b0256c28380221ce17f19117affa96e077905fe48a99723a065969c638593b7d9ab\ + 57b538438010fd81bc1359802f0b871aeb95e4410a8ec92b93af10ea767a2027cf4734e8de80\ + 8da338e6b722f7bf2051901bd5bccee5e71d5cf6b1faff338ad7120b0256c28380221ce17f19\ + 117affa96e077905fe48a99723a065969c638593b7d9ab57b53843cd0780ffff804509f59593\ + fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c00804509f59593fd47b1a9\ + 7189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba6\ + 5a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0\ + 346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f983\ + 6e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf89\ + 1a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c8045\ + 09f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd\ + 47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189\ + 127ba65a5649cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a56\ + 49cfb0346637f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb03466\ + 37f9836e155eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e15\ + 5eaf891a939c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a93\ + 9c804509f59593fd47b1a97189127ba65a5649cfb0346637f9836e155eaf891a939ccd0780ff\ + ff8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e\ + 776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea\ + 05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f\ + 015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d\ + 06feafa3610fc44a5b2ef543cb81008078916e776c64ccea05e958559f015c082d9d06feafa3\ + 610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b\ + 2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb81\ + 8078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e77\ + 6c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05\ + e958559f015c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f01\ + 5c082d9d06feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06\ + feafa3610fc44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610f\ + c44a5b2ef543cb818078916e776c64ccea05e958559f015c082d9d06feafa3610fc44a5b2ef5\ + 43cb811044010000\ +"; +fn proof() -> Vec { + array_bytes::hex2bytes_unchecked(PROOF) +} type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -159,8 +160,8 @@ benchmarks! { )?; } run_to_block::(StoragePeriod::::get() + T::BlockNumber::one()); - let mut encoded_proof = PROOF; - let proof = TransactionStorageProof::decode(&mut encoded_proof).unwrap(); + let encoded_proof = proof(); + let proof = TransactionStorageProof::decode(&mut &*encoded_proof).unwrap(); }: check_proof(RawOrigin::None, proof) verify { assert_last_event::(Event::ProofChecked.into()); diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index 320a30770ab42..1c1afde36ce5c 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -22,8 +22,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] -hex = "0.4.3" -hex-literal = "0.3" +array-bytes = "4.1" sp-keystore = { version = "0.12.0", path = "../keystore" } [features] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index ddf58474e77a0..4880d4b69ab01 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -301,7 +301,6 @@ impl From> for VersionedFinalityProof { #[cfg(test)] mod tests { - use sp_core::{keccak_256, Pair}; use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; @@ -314,6 +313,8 @@ mod tests { type TestSignedCommitment = SignedCommitment; type TestVersionedFinalityProof = VersionedFinalityProof; + const LARGE_RAW_COMMITMENT: &[u8] = include_bytes!("../test-res/large-raw-commitment"); + // The mock signatures are equivalent to the ones produced by the BEEFY keystore fn mock_signatures() -> (crypto::Signature, crypto::Signature) { let store: SyncCryptoStorePtr = KeyStore::new().into(); @@ -351,7 +352,7 @@ mod tests { assert_eq!(decoded, Ok(commitment)); assert_eq!( encoded, - hex_literal::hex!( + array_bytes::hex2bytes_unchecked( "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000" ) ); @@ -379,12 +380,14 @@ mod tests { assert_eq!(decoded, Ok(signed)); assert_eq!( encoded, - hex_literal::hex!( - "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000 - 04300400000008558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746 - cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba012d6e1f8105c337a86cdd9a - aacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b - 6a0046395a71681be3d0c2a00" + array_bytes::hex2bytes_unchecked( + "\ + 046d68343048656c6c6f20576f726c64210500000000000000000000000000000000000000000000000\ + 4300400000008558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746c\ + c321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba012d6e1f8105c337a86cdd9aa\ + acdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b6\ + a0046395a71681be3d0c2a00\ + " ) ); } @@ -481,56 +484,6 @@ mod tests { // then assert_eq!(decoded, Ok(signed)); - assert_eq!( - encoded, - hex_literal::hex!( - "046d68343048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000 - 05020000000000000000000000000000000000000000000000000000000000000000000000000000000 - 000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - fffffffffff0000040000b10a558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed - 4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad812 - 79df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d1 - 0dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2 - ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01 - 558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e9 - 9a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72 - d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bc - b7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc3 - 21f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc9855 - 80e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0 - c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da - 8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279d - f0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd - 3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac8 - 0a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558 - 455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a8 - 30e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d94 - 8d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb78 - 16f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f - 2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e - 4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33 - c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da848 - 0c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df07 - 95cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd - 68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a0 - 9abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455 - ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e - 314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1 - 107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f - 9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f231 - 9a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb - 75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86 - e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c7 - 46cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795c - c985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68c - e3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09ab - ed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad8 - 1279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314 - d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107 - b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba - 01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01" - ) - ); + assert_eq!(encoded, LARGE_RAW_COMMITMENT); } } diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs index aae0608150534..cebfc3de85049 100644 --- a/primitives/beefy/src/witness.rs +++ b/primitives/beefy/src/witness.rs @@ -148,12 +148,14 @@ mod tests { assert_eq!(decoded, Ok(witness)); assert_eq!( encoded, - hex_literal::hex!( - "046d683048656c6c6f20576f726c642105000000000000000000000000000000000000000000000010 - 0000010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c - 746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86 - cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bc - a2324b6a0046395a71681be3d0c2a00" + array_bytes::hex2bytes_unchecked( + "\ + 046d683048656c6c6f20576f726c642105000000000000000000000000000000000000000000000010\ + 0000010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c\ + 746cc321f2319a5e99a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a8\ + 6cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487b\ + ca2324b6a0046395a71681be3d0c2a00\ + " ) ); } diff --git a/primitives/beefy/test-res/large-raw-commitment b/primitives/beefy/test-res/large-raw-commitment new file mode 100644 index 0000000000000000000000000000000000000000..d5dbbe402a88e734367395e5af0305f5a47a0872 GIT binary patch literal 44638 zcmeI)F-t;G7zW^@=#W%8gtJb8lR;zccL^uSp_~*Pt_adlGzHavP;nQep($tzT#6_V zC+DVyK-!$@58T!5x#JxiMDIQ4JMa5sx}C#n)Q;m`>9p66Tjgv>zOw1$`$>26f19{M z$b|5itKZfiZuZBktFyPe#%}HDyuXa*h3m>|Zfp8;GKsb?;#v9kVAvc?s$YBaoyAAz z^Y~?UbpJhizbvkwQ$E0V;CJA^;CbMg=)K_GXnkOfWSwZOY` + AsRef<[u8]> + Public + Derive> Ss58Codec for T { let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { - let d = hex::decode(stripped).map_err(|_| PublicError::InvalidFormat)?; + let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? @@ -614,10 +614,7 @@ impl sp_std::str::FromStr for AccountId32 { fn from_str(s: &str) -> Result { let hex_or_ss58_without_prefix = s.trim_start_matches("0x"); if hex_or_ss58_without_prefix.len() == 64 { - let mut bytes = [0u8; 32]; - hex::decode_to_slice(hex_or_ss58_without_prefix, &mut bytes) - .map_err(|_| "invalid hex address.") - .map(|_| Self::from(bytes)) + array_bytes::hex_n_into(hex_or_ss58_without_prefix).map_err(|_| "invalid hex address.") } else { Self::from_ss58check(s).map_err(|_| "invalid ss58 address.") } @@ -943,7 +940,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { password_override.or_else(|| password.as_ref().map(|p| p.expose_secret().as_str())); let (root, seed) = if let Some(stripped) = phrase.expose_secret().strip_prefix("0x") { - hex::decode(stripped) + array_bytes::hex2bytes(stripped) .ok() .and_then(|seed_vec| { let mut seed = Self::Seed::default(); @@ -1127,7 +1124,6 @@ pub mod key_types { mod tests { use super::*; use crate::DeriveJunction; - use hex_literal::hex; #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { @@ -1269,7 +1265,7 @@ mod tests { fn interpret_std_seed_should_work() { assert_eq!( TestPair::from_string("0x0123456789abcdef", None), - Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) + Ok(TestPair::Seed(array_bytes::hex2bytes_unchecked("0123456789abcdef"))) ); } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index d56f65fd289e7..ca6b800625bc2 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -229,7 +229,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&hex::encode(self)) + serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) } } @@ -239,7 +239,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -597,7 +597,6 @@ mod test { set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, DEV_PHRASE, }; - use hex_literal::hex; use serde_json; #[test] @@ -612,31 +611,35 @@ mod test { #[test] fn seed_and_derive_should_work() { - let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); + let seed = array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + ); let pair = Pair::from_seed(&seed); assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap(); assert_eq!( derived.0.seed(), - hex!("b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61") + array_bytes::hex2array_unchecked::<32>( + "b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61" + ) ); } #[test] fn test_vector_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let public = pair.public(); assert_eq!( public, Public::from_full( - &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], + &array_bytes::hex2bytes_unchecked("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4"), ).unwrap(), ); let message = b""; - let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -653,11 +656,11 @@ mod test { assert_eq!( public, Public::from_full( - &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], + &array_bytes::hex2bytes_unchecked("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4"), ).unwrap(), ); let message = b""; - let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = array_bytes::hex2array_unchecked("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -680,10 +683,10 @@ mod test { assert_eq!( public, Public::from_full( - &hex!("5676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba995840f3de562156558efbfdac3f16af0065e5f66795f4dd8262a228ef8c6d813")[..], + &array_bytes::hex2bytes_unchecked("5676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba995840f3de562156558efbfdac3f16af0065e5f66795f4dd8262a228ef8c6d813"), ).unwrap(), ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); assert!(Pair::verify(&signature, &message[..], &public)); diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 0553cf4843df5..e85eb87c9fd83 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -228,7 +228,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&hex::encode(self)) + serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) } } @@ -238,7 +238,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -551,7 +551,6 @@ impl CryptoType for Pair { mod test { use super::*; use crate::crypto::DEV_PHRASE; - use hex_literal::hex; use serde_json; #[test] @@ -566,31 +565,35 @@ mod test { #[test] fn seed_and_derive_should_work() { - let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); + let seed = array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + ); let pair = Pair::from_seed(&seed); assert_eq!(pair.seed(), seed); let path = vec![DeriveJunction::Hard([0u8; 32])]; let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; assert_eq!( derived.seed(), - hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") + array_bytes::hex2array_unchecked::<32>( + "ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c" + ) ); } #[test] fn test_vector_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let public = pair.public(); assert_eq!( public, - Public::from_raw(hex!( + Public::from_raw(array_bytes::hex2array_unchecked( "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" )) ); let message = b""; - let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = array_bytes::hex2array_unchecked("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -606,12 +609,12 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(hex!( + Public::from_raw(array_bytes::hex2array_unchecked( "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" )) ); let message = b""; - let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = array_bytes::hex2array_unchecked("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); assert!(pair.sign(&message[..]) == signature); assert!(Pair::verify(&signature, &message[..], &public)); @@ -633,11 +636,11 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(hex!( + Public::from_raw(array_bytes::hex2array_unchecked( "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" )) ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); assert!(Pair::verify(&signature, &message[..], &public)); diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index ef033c2099b5f..9064fb7427393 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -233,7 +233,7 @@ impl Serialize for Signature { where S: Serializer, { - serializer.serialize_str(&hex::encode(self)) + serializer.serialize_str(&array_bytes::bytes2hex("", self.as_ref())) } } @@ -243,7 +243,7 @@ impl<'de> Deserialize<'de> for Signature { where D: Deserializer<'de>, { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) + let signature_hex = array_bytes::hex2bytes(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) .map_err(|e| de::Error::custom(format!("{:?}", e))) @@ -664,7 +664,6 @@ pub fn verify_batch( mod compatibility_test { use super::*; use crate::crypto::DEV_PHRASE; - use hex_literal::hex; // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are // still functional. @@ -673,7 +672,9 @@ mod compatibility_test { fn derive_soft_known_pair_should_work() { let pair = Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap(); // known address of DEV_PHRASE with 1.1 - let known = hex!("d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e"); + let known = array_bytes::hex2bytes_unchecked( + "d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e", + ); assert_eq!(pair.public().to_raw_vec(), known); } @@ -681,17 +682,19 @@ mod compatibility_test { fn derive_hard_known_pair_should_work() { let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); // known address of DEV_PHRASE with 1.1 - let known = hex!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"); + let known = array_bytes::hex2bytes_unchecked( + "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d", + ); assert_eq!(pair.public().to_raw_vec(), known); } #[test] fn verify_known_old_message_should_work() { - let public = Public::from_raw(hex!( - "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918" + let public = Public::from_raw(array_bytes::hex2array_unchecked( + "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918", )); // signature generated by the 1.1 version with the same ^^ public key. - let signature = Signature::from_raw(hex!( + let signature = Signature::from_raw(array_bytes::hex2array_unchecked( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" )); let message = b"Verifying that I am the owner of 5G9hQLdsKQswNPgB499DeA5PkFBbgkLPJWkkS6FAM6xGQ8xD. Hash: 221455a3\n"; @@ -704,7 +707,6 @@ mod compatibility_test { mod test { use super::*; use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; - use hex_literal::hex; use serde_json; #[test] @@ -745,8 +747,8 @@ mod test { #[test] fn derive_soft_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let derive_1 = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; let derive_1b = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; @@ -757,8 +759,8 @@ mod test { #[test] fn derive_hard_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let derive_1 = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; let derive_1b = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; @@ -769,8 +771,8 @@ mod test { #[test] fn derive_soft_public_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let path = Some(DeriveJunction::soft(1)); let pair_1 = pair.derive(path.into_iter(), None).unwrap().0; @@ -780,8 +782,8 @@ mod test { #[test] fn derive_hard_public_should_fail() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let path = Some(DeriveJunction::hard(1)); assert!(pair.public().derive(path.into_iter()).is_none()); @@ -789,13 +791,13 @@ mod test { #[test] fn sr_test_vector_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + let pair = Pair::from_seed(&array_bytes::hex2array_unchecked( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", )); let public = pair.public(); assert_eq!( public, - Public::from_raw(hex!( + Public::from_raw(array_bytes::hex2array_unchecked( "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" )) ); @@ -840,11 +842,11 @@ mod test { let public = pair.public(); assert_eq!( public, - Public::from_raw(hex!( + Public::from_raw(array_bytes::hex2array_unchecked( "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" )) ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let message = array_bytes::hex2bytes_unchecked("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); assert!(Pair::verify(&signature, &message[..], &public)); } @@ -865,11 +867,11 @@ mod test { // schnorrkel-js. // // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed(&hex!( - "0000000000000000000000000000000000000000000000000000000000000000" + let pk = Pair::from_seed(&array_bytes::hex2array_unchecked( + "0000000000000000000000000000000000000000000000000000000000000000", )); let public = pk.public(); - let js_signature = Signature::from_raw(hex!( + let js_signature = Signature::from_raw(array_bytes::hex2array_unchecked( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" )); assert!(Pair::verify_deprecated(&js_signature, b"SUBSTRATE", &public)); diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index 2be3f592b2b20..e7e203942e845 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] -hex-literal = "0.3" +array-bytes = "4.1" [features] default = ["std"] diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 8a2e901aefddf..29a7e3d1a6fb6 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -535,11 +535,16 @@ mod tests { cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() ); // check encoding correctness - assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); + assert_eq!( + &encoded[0], + &array_bytes::hex2bytes_unchecked("00343048656c6c6f20576f726c6421") + ); assert_eq!( encoded[1].as_slice(), - hex_literal::hex!("01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd") - .as_ref() + array_bytes::hex2bytes_unchecked( + "01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd" + ) + .as_slice() ); } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 5de6eb7112eea..860bca2a9de18 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -31,7 +31,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-trie = { version = "6.0.0", default-features = false, path = "../trie" } [dev-dependencies] -hex-literal = "0.3.4" +array-bytes = "4.1" pretty_assertions = "1.2.1" rand = "0.7.2" sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 236a515a2412d..fdc50e3f8f207 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -356,7 +356,6 @@ impl sp_externalities::ExtensionStore for BasicExternalities { #[cfg(test)] mod tests { use super::*; - use hex_literal::hex; use sp_core::{ map, storage::{well_known_keys::CODE, Storage, StorageChild}, @@ -368,10 +367,11 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = - hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + let root = array_bytes::hex2bytes_unchecked( + "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa", + ); - assert_eq!(&ext.storage_root(StateVersion::default())[..], &ROOT); + assert_eq!(&ext.storage_root(StateVersion::default())[..], &root); } #[test] diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 001b4b656c34e..ef5c02d2ec1fa 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -743,7 +743,6 @@ impl<'a> OverlayedExtensions<'a> { mod tests { use super::*; use crate::{ext::Ext, InMemoryBackend}; - use hex_literal::hex; use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; @@ -870,10 +869,11 @@ mod tests { let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); - const ROOT: [u8; 32] = - hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + let root = array_bytes::hex2bytes_unchecked( + "39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa", + ); - assert_eq!(&ext.storage_root(state_version)[..], &ROOT); + assert_eq!(&ext.storage_root(state_version)[..], &root); } #[test] diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 46b7573d9d1a6..e94d34b5560cd 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -334,7 +334,6 @@ where #[cfg(test)] mod tests { use super::*; - use hex_literal::hex; use sp_core::{storage::ChildInfo, traits::Externalities, H256}; use sp_runtime::traits::BlakeTwo256; @@ -346,8 +345,9 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = - H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); + let root = array_bytes::hex_n_into_unchecked::( + "ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489", + ); assert_eq!(H256::from_slice(ext.storage_root(Default::default()).as_slice()), root); } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 291615c9354c1..6d2d57b590e6a 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -36,8 +36,8 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] +array-bytes = "4.1" criterion = "0.3.3" -hex-literal = "0.3.4" trie-bench = "0.31.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fafa2a2891ce4..c2ab12dbee14c 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -542,7 +542,6 @@ mod tests { use super::*; use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; - use hex_literal::hex; use sp_core::Blake2Hasher; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; @@ -845,8 +844,14 @@ mod tests { } fn iterator_works_inner() { let pairs = vec![ - (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), - (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), + ( + array_bytes::hex2bytes_unchecked("0103000000000000000464"), + array_bytes::hex2bytes_unchecked("0400000000"), + ), + ( + array_bytes::hex2bytes_unchecked("0103000000000000000469"), + array_bytes::hex2bytes_unchecked("0401000000"), + ), ]; let mut mdb = MemoryDB::default(); @@ -859,7 +864,7 @@ mod tests { let mut iter_pairs = Vec::new(); for pair in iter { let (key, value) = pair.unwrap(); - iter_pairs.push((key, value.to_vec())); + iter_pairs.push((key, value)); } assert_eq!(pairs, iter_pairs); @@ -868,15 +873,15 @@ mod tests { #[test] fn proof_non_inclusion_works() { let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), + (array_bytes::hex2bytes_unchecked("0102"), array_bytes::hex2bytes_unchecked("01")), + (array_bytes::hex2bytes_unchecked("0203"), array_bytes::hex2bytes_unchecked("0405")), ]; let mut memdb = MemoryDB::default(); let mut root = Default::default(); populate_trie::(&mut memdb, &mut root, &pairs); - let non_included_key: Vec = hex!("0909").to_vec(); + let non_included_key: Vec = array_bytes::hex2bytes_unchecked("0909"); let proof = generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) .unwrap(); @@ -893,7 +898,7 @@ mod tests { assert!(verify_trie_proof::>( &root, &proof, - &[(non_included_key, Some(hex!("1010").to_vec()))], + &[(non_included_key, Some(array_bytes::hex2bytes_unchecked("1010")))], ) .is_err()); } @@ -901,8 +906,8 @@ mod tests { #[test] fn proof_inclusion_works() { let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), + (array_bytes::hex2bytes_unchecked("0102"), array_bytes::hex2bytes_unchecked("01")), + (array_bytes::hex2bytes_unchecked("0203"), array_bytes::hex2bytes_unchecked("0405")), ]; let mut memdb = MemoryDB::default(); @@ -932,7 +937,7 @@ mod tests { assert!(verify_trie_proof::( &root, &proof, - &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + &[(array_bytes::hex2bytes_unchecked("4242"), Some(pairs[0].1.clone()))] ) .is_err()); diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index f8a1f437d45e1..fcac37441ba98 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -12,10 +12,10 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -hex = "0.4" serde = "1.0.136" serde_json = "1.0.85" sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 3115be58425e6..be4549c9957c0 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -346,7 +346,7 @@ impl RpcHandlersExt for RpcHandlers { "params": ["0x{}"], "id": 0 }}"#, - hex::encode(extrinsic.encode()) + array_bytes::bytes2hex("", &extrinsic.encode()) )) .await .expect("valid JSON-RPC request object; qed"); diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 9e5cb12070af9..108d0d338c2b3 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -13,13 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +array-bytes = "4.1" chrono = "0.4" clap = { version = "3.1.18", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } comfy-table = { version = "6.0.0", default-features = false } handlebars = "4.2.2" hash-db = "0.15.2" -hex = "0.4.3" Inflector = "0.11.4" itertools = "0.10.3" kvdb = "0.11.0" diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 36215c8a0586d..32282b314b804 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -131,7 +131,9 @@ where fn consumed_weight(&self, block: &BlockId) -> Result { // Hard-coded key for System::BlockWeight. It could also be passed in as argument // for the benchmark, but I think this should work as well. - let hash = hex::decode("26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96")?; + let hash = array_bytes::hex2bytes( + "26aa394eea5630e07c48ae0c9558cef734abf5cb34d6244378cddbf18e849d96", + )?; let key = StorageKey(hash); let mut raw_weight = &self From 4e53bdc85f240162e554bb2b22461822ef38af76 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 21 Sep 2022 10:57:02 +0200 Subject: [PATCH 155/348] Bound staking storage items (#12230) * replace pallet level unboundedness to individual storage items * bound structs * bounding history depth * defensive error * use the era history depth from config * clean up history depth from storage in v11 * keep the name HistoryDepth for the new configuration value * use u32 for history depth in node runtime * improve doc comments * add HistoryDepth to mock runtimes with pallet-staking * rustfmt * refactor and doc improve * apply re-benchmarked weight for staking * pr feedback improvements * test for claimed rewards following the expected bounds * refactor test to calculate first and last reward era programmatically * verify previous eras cannot be claimed * add migration v12 * ".git/.scripts/bench-bot.sh" pallet dev pallet_staking * fix compiler error * corrupting history depth does not lead to catastrophic issue * fix new line * remove unused import * fmt * add test to document scenario where history depth is reduced without migration * fmt * Update frame/staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/staking/src/migrations.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * doc for all storage items bounded by HistoryDepth * Update frame/staking/src/pallet/mod.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/staking/src/tests.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * pr feedback fixes * Update frame/staking/src/tests.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove extra checks * fix merge * fmt Co-authored-by: command-bot <> Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: kianenigma --- bin/node/runtime/src/lib.rs | 2 + frame/babe/src/mock.rs | 1 + frame/grandpa/src/mock.rs | 1 + .../nomination-pools/benchmarking/src/mock.rs | 1 + .../nomination-pools/test-staking/src/mock.rs | 1 + frame/offences/benchmarking/src/mock.rs | 1 + frame/session/benchmarking/src/mock.rs | 1 + frame/staking/src/benchmarking.rs | 21 +- frame/staking/src/lib.rs | 38 +- frame/staking/src/migrations.rs | 52 +- frame/staking/src/mock.rs | 2 + frame/staking/src/pallet/impls.rs | 25 +- frame/staking/src/pallet/mod.rs | 109 ++-- frame/staking/src/slashing.rs | 4 +- frame/staking/src/tests.rs | 348 ++++++++--- frame/staking/src/weights.rs | 545 +++++++++--------- 16 files changed, 669 insertions(+), 483 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 21ed23d6244a5..0d8f00550ed1a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -535,6 +535,7 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 256; pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub OffchainRepeat: BlockNumber = 5; + pub HistoryDepth: u32 = 84; } pub struct StakingBenchmarkingConfig; @@ -572,6 +573,7 @@ impl pallet_staking::Config for Runtime { // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = HistoryDepth; type OnStakerSlash = NominationPools; type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 8264cd11ddc88..46aeabe743fe2 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -204,6 +204,7 @@ impl pallet_staking::Config for Test { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index a26000c0f7f2c..573a74d2bfae8 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -208,6 +208,7 @@ impl pallet_staking::Config for Test { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 7841686114dfd..0f617f0bf6f4b 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -115,6 +115,7 @@ impl pallet_staking::Config for Runtime { type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 39111b7f813c7..8c04e40d71df4 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -129,6 +129,7 @@ impl pallet_staking::Config for Runtime { type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type OnStakerSlash = Pools; type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index c6d8589870f52..c4c1d2aca8d07 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -182,6 +182,7 @@ impl pallet_staking::Config for Test { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 60b17213d2f1c..f86ddfeedc08b 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -174,6 +174,7 @@ impl pallet_staking::Config for Test { type ElectionProvider = onchain::UnboundedExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type OnStakerSlash = (); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index c270b5933e647..1ea05bba3b579 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -660,25 +660,6 @@ benchmarks! { assert!(original_bonded < new_bonded); } - set_history_depth { - let e in 1 .. 100; - HistoryDepth::::put(e); - CurrentEra::::put(e); - let dummy = || -> T::AccountId { codec::Decode::decode(&mut TrailingZeroInput::zeroes()).unwrap() }; - for i in 0 .. e { - >::insert(i, dummy(), Exposure::>::default()); - >::insert(i, dummy(), Exposure::>::default()); - >::insert(i, dummy(), ValidatorPrefs::default()); - >::insert(i, BalanceOf::::one()); - >::insert(i, EraRewardPoints::::default()); - >::insert(i, BalanceOf::::one()); - ErasStartSessionIndex::::insert(i, i); - } - }: _(RawOrigin::Root, EraIndex::zero(), u32::MAX) - verify { - assert_eq!(HistoryDepth::::get(), 0); - } - reap_stash { let s in 1 .. MAX_SPANS; // clean up any existing state. @@ -698,7 +679,7 @@ benchmarks! { active: T::Currency::minimum_balance() - One::one(), total: T::Currency::minimum_balance() - One::one(), unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), }; Ledger::::insert(&controller, l); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 4ff9e99306727..56fcb1965bc13 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -299,12 +299,12 @@ pub mod weights; mod pallet; -use codec::{Decode, Encode, HasCompact}; +use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ parameter_types, traits::{Currency, Defensive, Get}, weights::Weight, - BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, + BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -354,7 +354,7 @@ parameter_types! { } /// Information regarding the active era (era in used in session). -#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveEraInfo { /// Index of era. pub index: EraIndex, @@ -395,7 +395,7 @@ pub enum StakerStatus { } /// A destination account for payment. -#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub enum RewardDestination { /// Pay into the stash account, increasing the amount at stake accordingly. Staked, @@ -416,7 +416,7 @@ impl Default for RewardDestination { } /// Preference of what happens regarding validation. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, Default, MaxEncodedLen)] pub struct ValidatorPrefs { /// Reward that validator takes up-front; only the rest is split between themselves and /// nominators. @@ -429,8 +429,8 @@ pub struct ValidatorPrefs { } /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub struct UnlockChunk { +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub struct UnlockChunk { /// Amount of funds to be unlocked. #[codec(compact)] value: Balance, @@ -440,7 +440,16 @@ pub struct UnlockChunk { } /// The ledger of a (bonded) stash. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo)] +#[derive( + PartialEqNoBound, + EqNoBound, + CloneNoBound, + Encode, + Decode, + RuntimeDebugNoBound, + TypeInfo, + MaxEncodedLen, +)] #[scale_info(skip_type_params(T))] pub struct StakingLedger { /// The stash account whose balance is actually locked and at stake. @@ -459,7 +468,7 @@ pub struct StakingLedger { pub unlocking: BoundedVec>, MaxUnlockingChunks>, /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. - pub claimed_rewards: Vec, + pub claimed_rewards: BoundedVec, } impl StakingLedger { @@ -470,7 +479,7 @@ impl StakingLedger { total: Zero::zero(), active: Zero::zero(), unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), } } @@ -676,7 +685,9 @@ impl StakingLedger { } /// A record of the nominations made by a specific account. -#[derive(PartialEqNoBound, EqNoBound, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo)] +#[derive( + PartialEqNoBound, EqNoBound, Clone, Encode, Decode, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen, +)] #[codec(mel_bound())] #[scale_info(skip_type_params(T))] pub struct Nominations { @@ -850,7 +861,7 @@ impl VoterBagsList + V12_0_0, // remove `HistoryDepth`. } impl Default for Releases { diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 427570e5cf14f..f47545af694cf 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -20,6 +20,53 @@ use super::*; use frame_election_provider_support::SortedListProvider; use frame_support::traits::OnRuntimeUpgrade; +pub mod v12 { + use super::*; + use frame_support::{pallet_prelude::ValueQuery, storage_alias}; + + #[storage_alias] + type HistoryDepth = StorageValue, u32, ValueQuery>; + + /// Clean up `HistoryDepth` from storage. + /// + /// We will be depending on the configurable value of `HistoryDepth` post + /// this release. + pub struct MigrateToV12(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV12 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + frame_support::ensure!( + T::HistoryDepth::get() == HistoryDepth::::get(), + "Provided value of HistoryDepth should be same as the existing storage value" + ); + + Ok(Default::default()) + } + + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if StorageVersion::::get() == Releases::V11_0_0 { + HistoryDepth::::kill(); + StorageVersion::::put(Releases::V12_0_0); + + log!(info, "v12 applied successfully"); + T::DbWeight::get().reads_writes(1, 2) + } else { + log!(warn, "Skipping v12, should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V12_0_0, + "v12 not applied" + ); + Ok(()) + } + } +} + pub mod v11 { use super::*; use frame_support::{ @@ -82,11 +129,6 @@ pub mod v11 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - frame_support::ensure!( - StorageVersion::::get() == crate::Releases::V11_0_0, - "wrong version after the upgrade" - ); - let old_pallet_name = N::get(); let new_pallet_name =

::name(); diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 5682184929a5b..385087f9bec41 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -236,6 +236,7 @@ const THRESHOLDS: [sp_npos_elections::VoteWeight; 9] = parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static MaxNominations: u32 = 16; + pub static HistoryDepth: u32 = 80; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } @@ -301,6 +302,7 @@ impl crate::pallet::pallet::Config for Test { type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = HistoryDepth; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; type WeightInfo = (); diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 62faaaf68ef62..abb8649169fdd 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -25,8 +25,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, EstimateNextNewSession, Get, Imbalance, - LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, + Currency, CurrencyToVote, Defensive, DefensiveResult, EstimateNextNewSession, Get, + Imbalance, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, }, weights::Weight, }; @@ -101,7 +101,7 @@ impl Pallet { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; - let history_depth = Self::history_depth(); + let history_depth = T::HistoryDepth::get(); ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward @@ -123,11 +123,18 @@ impl Pallet { ledger .claimed_rewards .retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { Ok(_) => return Err(Error::::AlreadyClaimed .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))), - Err(pos) => ledger.claimed_rewards.insert(pos, era), + Err(pos) => ledger + .claimed_rewards + .try_insert(pos, era) + // Since we retain era entries in `claimed_rewards` only upto + // `HistoryDepth`, following bound is always expected to be + // satisfied. + .defensive_map_err(|_| Error::::BoundNotMet)?, } let exposure = >::get(&era, &ledger.stash); @@ -417,7 +424,7 @@ impl Pallet { ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); // Clean old era information. - if let Some(old_era) = new_planned_era.checked_sub(Self::history_depth() + 1) { + if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) { Self::clear_era_information(old_era); } @@ -966,7 +973,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), }, ); @@ -984,7 +991,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), }, ); Self::do_add_validator( @@ -1025,7 +1032,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), }, ); Self::do_add_validator( @@ -1046,7 +1053,7 @@ impl ElectionDataProvider for Pallet { active: stake, total: stake, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: Default::default(), }, ); Self::do_add_nominator( diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 49f53efbb08b0..d113c0bb2243c 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -22,10 +22,12 @@ use frame_support::{ dispatch::Codec, pallet_prelude::*, traits::{ - Currency, CurrencyToVote, Defensive, DefensiveSaturating, EnsureOrigin, - EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, UnixTime, + Currency, CurrencyToVote, Defensive, DefensiveResult, DefensiveSaturating, EnsureOrigin, + EstimateNextNewSession, Get, LockIdentifier, LockableCurrency, OnUnbalanced, TryCollect, + UnixTime, }, weights::Weight, + BoundedVec, }; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ @@ -58,7 +60,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(crate) trait Store)] - #[pallet::without_storage_info] pub struct Pallet(_); /// Possible operations on the configuration values of this pallet. @@ -124,6 +125,28 @@ pub mod pallet { #[pallet::constant] type MaxNominations: Get; + /// Number of eras to keep in history. + /// + /// Following information is kept for eras in `[current_era - + /// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`, + /// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`, + /// `ErasTotalStake`, `ErasStartSessionIndex`, + /// `StakingLedger.claimed_rewards`. + /// + /// Must be more than the number of eras delayed by session. + /// I.e. active era must always be in history. I.e. `active_era > + /// current_era - history_depth` must be guaranteed. + /// + /// If migrating an existing pallet from storage value to config value, + /// this should be set to same value or greater as in storage. + /// + /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` + /// item `StakingLedger.claimed_rewards`. Setting this value lower than + /// the existing value can lead to inconsistencies and will need to be + /// handled properly in a migration. + #[pallet::constant] + type HistoryDepth: Get; + /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). type RewardRemainder: OnUnbalanced>; @@ -230,22 +253,6 @@ pub mod pallet { type WeightInfo: WeightInfo; } - #[pallet::type_value] - pub(crate) fn HistoryDepthOnEmpty() -> u32 { - 84u32 - } - - /// Number of eras to keep in history. - /// - /// Information is kept for eras in `[current_era - history_depth; current_era]`. - /// - /// Must be more than the number of eras delayed by session otherwise. I.e. active era must - /// always be in history. I.e. `active_era > current_era - history_depth` must be - /// guaranteed. - #[pallet::storage] - #[pallet::getter(fn history_depth)] - pub(crate) type HistoryDepth = StorageValue<_, u32, ValueQuery, HistoryDepthOnEmpty>; - /// The ideal number of staking participants. #[pallet::storage] #[pallet::getter(fn validator_count)] @@ -261,6 +268,7 @@ pub mod pallet { /// invulnerables) and restricted to testnets. #[pallet::storage] #[pallet::getter(fn invulnerables)] + #[pallet::unbounded] pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; /// Map from all locked "stash" accounts to the controller account. @@ -364,6 +372,7 @@ pub mod pallet { /// If stakers hasn't been set or has been removed then empty exposure is returned. #[pallet::storage] #[pallet::getter(fn eras_stakers)] + #[pallet::unbounded] pub type ErasStakers = StorageDoubleMap< _, Twox64Concat, @@ -386,6 +395,7 @@ pub mod pallet { /// Is it removed after `HISTORY_DEPTH` eras. /// If stakers hasn't been set or has been removed then empty exposure is returned. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn eras_stakers_clipped)] pub type ErasStakersClipped = StorageDoubleMap< _, @@ -425,6 +435,7 @@ pub mod pallet { /// Rewards for the last `HISTORY_DEPTH` eras. /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn eras_reward_points)] pub type ErasRewardPoints = StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; @@ -456,6 +467,7 @@ pub mod pallet { /// All unapplied slashes that are queued for later. #[pallet::storage] + #[pallet::unbounded] pub type UnappliedSlashes = StorageMap< _, Twox64Concat, @@ -469,6 +481,7 @@ pub mod pallet { /// Must contains information for eras for the range: /// `[active_era - bounding_duration; active_era]` #[pallet::storage] + #[pallet::unbounded] pub(crate) type BondedEras = StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; @@ -491,6 +504,7 @@ pub mod pallet { /// Slashing spans for stash accounts. #[pallet::storage] + #[pallet::unbounded] pub(crate) type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; @@ -522,6 +536,7 @@ pub mod pallet { /// whether a given validator has previously offended using binary search. It gets cleared when /// the era ends. #[pallet::storage] + #[pallet::unbounded] #[pallet::getter(fn offending_validators)] pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; @@ -540,7 +555,6 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { - pub history_depth: u32, pub validator_count: u32, pub minimum_validator_count: u32, pub invulnerables: Vec, @@ -559,7 +573,6 @@ pub mod pallet { impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { - history_depth: 84u32, validator_count: Default::default(), minimum_validator_count: Default::default(), invulnerables: Default::default(), @@ -578,7 +591,6 @@ pub mod pallet { #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { fn build(&self) { - HistoryDepth::::put(self.history_depth); ValidatorCount::::put(self.validator_count); MinimumValidatorCount::::put(self.minimum_validator_count); Invulnerables::::put(&self.invulnerables); @@ -729,6 +741,8 @@ pub mod pallet { TooManyValidators, /// Commission is too low. Must be at least `MinCommission`. CommissionTooLow, + /// Some bound is not met. + BoundNotMet, } #[pallet::hooks] @@ -830,7 +844,7 @@ pub mod pallet { >::insert(&stash, payee); let current_era = CurrentEra::::get().unwrap_or(0); - let history_depth = Self::history_depth(); + let history_depth = T::HistoryDepth::get(); let last_reward_era = current_era.saturating_sub(history_depth); let stash_balance = T::Currency::free_balance(&stash); @@ -841,7 +855,12 @@ pub mod pallet { total: value, active: value, unlocking: Default::default(), - claimed_rewards: (last_reward_era..current_era).collect(), + claimed_rewards: (last_reward_era..current_era) + .try_collect() + // Since last_reward_era is calculated as `current_era - + // HistoryDepth`, following bound is always expected to be + // satisfied. + .defensive_map_err(|_| Error::::BoundNotMet)?, }; Self::update_ledger(&controller, &item); Ok(()) @@ -1466,48 +1485,6 @@ pub mod pallet { Ok(Some(T::WeightInfo::rebond(removed_chunks)).into()) } - /// Set `HistoryDepth` value. This function will delete any history information - /// when `HistoryDepth` is reduced. - /// - /// Parameters: - /// - `new_history_depth`: The new history depth you would like to set. - /// - `era_items_deleted`: The number of items that will be deleted by this dispatch. This - /// should report all the storage items that will be deleted by clearing old era history. - /// Needed to report an accurate weight for the dispatch. Trusted by `Root` to report an - /// accurate number. - /// - /// RuntimeOrigin must be root. - /// - /// # - /// - E: Number of history depths removed, i.e. 10 -> 7 = 3 - /// - Weight: O(E) - /// - DB Weight: - /// - Reads: Current Era, History Depth - /// - Writes: History Depth - /// - Clear Prefix Each: Era Stakers, EraStakersClipped, ErasValidatorPrefs - /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, - /// ErasStartSessionIndex - /// # - #[pallet::weight(T::WeightInfo::set_history_depth(*_era_items_deleted))] - pub fn set_history_depth( - origin: OriginFor, - #[pallet::compact] new_history_depth: EraIndex, - #[pallet::compact] _era_items_deleted: u32, - ) -> DispatchResult { - ensure_root(origin)?; - if let Some(current_era) = Self::current_era() { - HistoryDepth::::mutate(|history_depth| { - let last_kept = current_era.saturating_sub(*history_depth); - let new_last_kept = current_era.saturating_sub(new_history_depth); - for era_index in last_kept..new_last_kept { - Self::clear_era_information(era_index); - } - *history_depth = new_history_depth - }) - } - Ok(()) - } - /// Remove all data structures concerning a staker/stash once it is at a state where it can /// be considered `dust` in the staking system. The requirements are: /// diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 7372c4390f816..f3272a25fab5c 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -53,7 +53,7 @@ use crate::{ BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, Pallet, Perbill, SessionInterface, Store, UnappliedSlash, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, traits::{Currency, Defensive, Get, Imbalance, OnUnbalanced}, @@ -182,7 +182,7 @@ impl SlashingSpans { } /// A slashing-span record for a particular stash. -#[derive(Encode, Decode, Default, TypeInfo)] +#[derive(Encode, Decode, Default, TypeInfo, MaxEncodedLen)] pub(crate) struct SpanRecord { slashed: Balance, paid_out: Balance, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index cc2b37d2ba910..6798a78030f9e 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -148,14 +148,14 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { + Staking::ledger(&10).unwrap(), + StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![] - }) + claimed_rewards: bounded_vec![], + } ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( @@ -165,7 +165,7 @@ fn basic_setup_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }) ); // Account 1 does not control any stash @@ -188,7 +188,7 @@ fn basic_setup_works() { total: 500, active: 500, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -433,7 +433,7 @@ fn staking_should_work() { total: 1500, active: 1500, unlocking: Default::default(), - claimed_rewards: vec![0], + claimed_rewards: bounded_vec![0], }) ); // e.g. it cannot reserve more than 500 that it has free from the total 2000 @@ -1022,7 +1022,7 @@ fn reward_destination_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1045,7 +1045,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: vec![0], + claimed_rewards: bounded_vec![0], }) ); @@ -1073,7 +1073,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: vec![0, 1], + claimed_rewards: bounded_vec![0, 1], }) ); @@ -1102,7 +1102,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: Default::default(), - claimed_rewards: vec![0, 1, 2], + claimed_rewards: bounded_vec![0, 1, 2], }) ); // Check that amount in staked account is NOT increased. @@ -1164,7 +1164,7 @@ fn bond_extra_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1181,7 +1181,7 @@ fn bond_extra_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1195,7 +1195,7 @@ fn bond_extra_works() { total: 1000000, active: 1000000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); }); @@ -1233,7 +1233,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); assert_eq!( @@ -1251,7 +1251,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); // Exposure is a snapshot! only updated after the next era update. @@ -1272,7 +1272,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); // Exposure is now updated. @@ -1290,7 +1290,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }), ); @@ -1303,7 +1303,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }), ); @@ -1319,7 +1319,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 100, unlocking: bounded_vec![UnlockChunk { value: 1000, era: 2 + 3 }], - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }), ); @@ -1335,7 +1335,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 100, active: 100, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], }), ); }) @@ -1405,7 +1405,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1424,7 +1424,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 2 + 3 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1437,7 +1437,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1450,7 +1450,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1463,7 +1463,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1476,7 +1476,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1491,7 +1491,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1504,7 +1504,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); }) @@ -1531,7 +1531,7 @@ fn rebond_is_fifo() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1546,7 +1546,7 @@ fn rebond_is_fifo() { total: 1000, active: 600, unlocking: bounded_vec![UnlockChunk { value: 400, era: 2 + 3 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1564,7 +1564,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, ], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1583,7 +1583,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, ], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1599,7 +1599,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, ], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); }) @@ -1628,7 +1628,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 100, unlocking: bounded_vec![UnlockChunk { value: 900, era: 1 + 3 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -1641,7 +1641,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 200, unlocking: bounded_vec![UnlockChunk { value: 800, era: 1 + 3 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); // Event emitted should be correct @@ -1656,7 +1656,7 @@ fn rebond_emits_right_value_in_event() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); // Event emitted should be correct, only 800 @@ -1692,7 +1692,7 @@ fn reward_to_stake_works() { total: 69, active: 69, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }, ); @@ -1756,7 +1756,7 @@ fn reap_stash_works() { total: 5, active: 5, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }, ); @@ -1891,7 +1891,7 @@ fn bond_with_no_staked_value() { active: 0, total: 5, unlocking: bounded_vec![UnlockChunk { value: 5, era: 3 }], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); @@ -2990,7 +2990,7 @@ fn staker_cannot_bail_deferred_slash() { active: 0, total: 500, stash: 101, - claimed_rewards: Default::default(), + claimed_rewards: bounded_vec![], unlocking: bounded_vec![UnlockChunk { era: 4u32, value: 500 }], } ); @@ -3397,7 +3397,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // * double claim of one era fails ExtBuilder::default().nominate(true).build_and_execute(|| { // Consumed weight for all payout_stakers dispatches that fail - let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); let init_balance_10 = Balances::total_balance(&10); let init_balance_100 = Balances::total_balance(&100); @@ -3432,7 +3432,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert!(total_payout_2 != total_payout_0); assert!(total_payout_2 != total_payout_1); - mock::start_active_era(Staking::history_depth() + 1); + mock::start_active_era(HistoryDepth::get() + 1); let active_era = active_era(); @@ -3440,7 +3440,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let current_era = Staking::current_era().unwrap(); // Last kept is 1: - assert!(current_era - Staking::history_depth() == 1); + assert!(current_era - HistoryDepth::get() == 1); assert_noop!( Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 0), // Fail: Era out of history @@ -3604,25 +3604,6 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( }); } -#[test] -fn set_history_depth_works() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(10); - Staking::set_history_depth(RuntimeOrigin::root(), 20, 0).unwrap(); - assert!(::ErasTotalStake::contains_key(10 - 4)); - assert!(::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(RuntimeOrigin::root(), 4, 0).unwrap(); - assert!(::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(RuntimeOrigin::root(), 3, 0).unwrap(); - assert!(!::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(RuntimeOrigin::root(), 8, 0).unwrap(); - assert!(!::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - }); -} - #[test] fn test_payout_stakers() { // Test that payout_stakers work in general, including that only the top @@ -3686,7 +3667,7 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![1] + claimed_rewards: bounded_vec![1] }) ); @@ -3717,11 +3698,15 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (1..=14).collect() + claimed_rewards: (1..=14).collect::>().try_into().unwrap() }) ); - for i in 16..100 { + let last_era = 99; + let history_depth = HistoryDepth::get(); + let expected_last_reward_era = last_era - 1; + let expected_start_reward_era = last_era - history_depth; + for i in 16..=last_era { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); @@ -3729,8 +3714,16 @@ fn test_payout_stakers() { } // We clean it up as history passes - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98)); + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era + )); + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era + )); assert_eq!( Staking::ledger(&10), Some(StakingLedger { @@ -3738,7 +3731,7 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![15, 98] + claimed_rewards: bounded_vec![expected_start_reward_era, expected_last_reward_era] }) ); @@ -3753,7 +3746,13 @@ fn test_payout_stakers() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![15, 23, 42, 69, 98] + claimed_rewards: bounded_vec![ + expected_start_reward_era, + 23, + 42, + 69, + expected_last_reward_era + ] }) ); }); @@ -3764,7 +3763,7 @@ fn payout_stakers_handles_basic_errors() { // Here we will test payouts handle all errors. ExtBuilder::default().has_stakers(false).build_and_execute(|| { // Consumed weight for all payout_stakers dispatches that fail - let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); // Same setup as the test above let balance = 1000; @@ -3794,32 +3793,47 @@ fn payout_stakers_handles_basic_errors() { Error::::NotStash.with_weight(err_weight) ); - for i in 3..100 { + let last_era = 99; + for i in 3..=last_era { Staking::reward_by_ids(vec![(11, 1)]); // compute and ensure the reward amount is greater than zero. let _ = current_total_payout_for_duration(reward_time_per_era()); mock::start_active_era(i); } - // We are at era 99, with history depth of 84 - // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. + + let history_depth = HistoryDepth::get(); + let expected_last_reward_era = last_era - 1; + let expected_start_reward_era = last_era - history_depth; + + // We are at era last_era=99. Given history_depth=80, we should be able + // to payout era starting from expected_start_reward_era=19 through + // expected_last_reward_era=98 (80 total eras), but not 18 or 99. assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 14), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era - 1), Error::::InvalidEraToReward.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 99), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era + 1), Error::::InvalidEraToReward.with_weight(err_weight) ); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98)); + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + expected_start_reward_era + )); + assert_ok!(Staking::payout_stakers( + RuntimeOrigin::signed(1337), + 11, + expected_last_reward_era + )); // Can't claim again assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 15), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_start_reward_era), Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( - Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, 98), + Staking::payout_stakers(RuntimeOrigin::signed(1337), 11, expected_last_reward_era), Error::::AlreadyClaimed.with_weight(err_weight) ); }); @@ -3944,7 +3958,7 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }) ); mock::start_active_era(5); @@ -3956,10 +3970,14 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (0..5).collect(), + claimed_rewards: (0..5).collect::>().try_into().unwrap(), }) ); - mock::start_active_era(99); + + // make sure only era upto history depth is stored + let current_era = 99; + let last_reward_era = 99 - HistoryDepth::get(); + mock::start_active_era(current_era); bond_validator(13, 12, 1000); assert_eq!( Staking::ledger(&12), @@ -3968,7 +3986,10 @@ fn bond_during_era_correctly_populates_claimed_rewards() { total: 1000, active: 1000, unlocking: Default::default(), - claimed_rewards: (15..99).collect(), + claimed_rewards: (last_reward_era..current_era) + .collect::>() + .try_into() + .unwrap(), }) ); }); @@ -4213,7 +4234,7 @@ fn cannot_rebond_to_lower_than_ed() { total: 10 * 1000, active: 10 * 1000, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); @@ -4227,7 +4248,7 @@ fn cannot_rebond_to_lower_than_ed() { total: 10 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 10 * 1000, era: 3 }], - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); @@ -4253,7 +4274,7 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 10 * 1000, active: 10 * 1000, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); @@ -4267,7 +4288,7 @@ fn cannot_bond_extra_to_lower_than_ed() { total: 10 * 1000, active: 0, unlocking: bounded_vec![UnlockChunk { value: 10 * 1000, era: 3 }], - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); @@ -4294,7 +4315,7 @@ fn do_not_die_when_active_is_ed() { total: 1000 * ed, active: 1000 * ed, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); @@ -4311,7 +4332,7 @@ fn do_not_die_when_active_is_ed() { total: ed, active: ed, unlocking: Default::default(), - claimed_rewards: vec![] + claimed_rewards: bounded_vec![], } ); }) @@ -5162,7 +5183,7 @@ fn proportional_slash_stop_slashing_if_remaining_zero() { active: 20, // we have some chunks, but they are not affected. unlocking: bounded_vec![c(1, 10), c(2, 10)], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }; assert_eq!(BondingDuration::get(), 3); @@ -5180,7 +5201,7 @@ fn proportional_ledger_slash_works() { total: 10, active: 10, unlocking: bounded_vec![], - claimed_rewards: vec![], + claimed_rewards: bounded_vec![], }; assert_eq!(BondingDuration::get(), 3); @@ -5396,3 +5417,146 @@ fn proportional_ledger_slash_works() { BTreeMap::from([(4, 0), (5, value_slashed), (6, 0), (7, 0)]) ); } + +#[test] +fn pre_bonding_era_cannot_be_claimed() { + // Verifies initial conditions of mock + ExtBuilder::default().nominate(false).build_and_execute(|| { + let history_depth = HistoryDepth::get(); + // jump to some era above history_depth + let mut current_era = history_depth + 10; + let last_reward_era = current_era - 1; + let start_reward_era = current_era - history_depth; + + // put some money in stash=3 and controller=4. + for i in 3..5 { + let _ = Balances::make_free_balance_be(&i, 2000); + } + + mock::start_active_era(current_era); + + // add a new candidate for being a validator. account 3 controlled by 4. + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); + + let claimed_rewards: BoundedVec<_, _> = + (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); + assert_eq!( + Staking::ledger(&4).unwrap(), + StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: Default::default(), + claimed_rewards, + } + ); + + // start next era + current_era = current_era + 1; + mock::start_active_era(current_era); + + // claiming reward for last era in which validator was active works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1)); + + // consumed weight for all payout_stakers dispatches that fail + let err_weight = ::WeightInfo::payout_stakers_alive_staked(0); + // cannot claim rewards for an era before bonding occured as it is + // already marked as claimed. + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 2), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // decoding will fail now since Staking Ledger is in corrupt state + HistoryDepth::set(history_depth - 1); + assert_eq!(Staking::ledger(&4), None); + + // make sure stakers still cannot claim rewards that they are not meant to + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 2), + Error::::NotController + ); + + // fix the corrupted state for post conditions check + HistoryDepth::set(history_depth); + }); +} + +#[test] +fn reducing_history_depth_without_migration() { + // Verifies initial conditions of mock + ExtBuilder::default().nominate(false).build_and_execute(|| { + let original_history_depth = HistoryDepth::get(); + let mut current_era = original_history_depth + 10; + let last_reward_era = current_era - 1; + let start_reward_era = current_era - original_history_depth; + + // put some money in (stash, controller)=(3,4),(5,6). + for i in 3..7 { + let _ = Balances::make_free_balance_be(&i, 2000); + } + + // start current era + mock::start_active_era(current_era); + + // add a new candidate for being a staker. account 3 controlled by 4. + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 1500, RewardDestination::Controller)); + + // all previous era before the bonding action should be marked as + // claimed. + let claimed_rewards: BoundedVec<_, _> = + (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); + assert_eq!( + Staking::ledger(&4).unwrap(), + StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: Default::default(), + claimed_rewards, + } + ); + + // next era + current_era = current_era + 1; + mock::start_active_era(current_era); + + // claiming reward for last era in which validator was active works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1)); + + // next era + current_era = current_era + 1; + mock::start_active_era(current_era); + + // history_depth reduced without migration + let history_depth = original_history_depth - 1; + HistoryDepth::set(history_depth); + // claiming reward does not work anymore + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(4), 3, current_era - 1), + Error::::NotController + ); + + // new stakers can still bond + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 6, 1200, RewardDestination::Controller)); + + // new staking ledgers created will be bounded by the current history depth + let last_reward_era = current_era - 1; + let start_reward_era = current_era - history_depth; + let claimed_rewards: BoundedVec<_, _> = + (start_reward_era..=last_reward_era).collect::>().try_into().unwrap(); + assert_eq!( + Staking::ledger(&6).unwrap(), + StakingLedger { + stash: 5, + total: 1200, + active: 1200, + unlocking: Default::default(), + claimed_rewards, + } + ); + + // fix the corrupted state for post conditions check + HistoryDepth::set(original_history_depth); + }); +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 09c2d8b5de113..5070232365d3e 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-09-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_staking // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_staking +// --chain=dev // --output=./frame/staking/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -65,7 +67,6 @@ pub trait WeightInfo { fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; - fn set_history_depth(e: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; fn new_era(v: u32, n: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight; @@ -82,21 +83,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(43_992_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) + Weight::from_ref_time(52_281_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(75_827_000 as u64) + Weight::from_ref_time(89_748_000 as u64) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -106,11 +106,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(81_075_000 as u64) + Weight::from_ref_time(94_782_000 as u64) .saturating_add(T::DbWeight::get().reads(12 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } @@ -118,10 +118,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(35_763_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(44_499_000 as u64) + // Standard Error: 387 + .saturating_add(Weight::from_ref_time(72_726 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -132,14 +133,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(66_938_000 as u64) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + Weight::from_ref_time(83_230_000 as u64) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(10_289 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(13 as u64)) .saturating_add(T::DbWeight::get().writes(11 as u64)) } @@ -150,23 +154,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:1 w:1) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(52_943_000 as u64) + Weight::from_ref_time(64_430_000 as u64) .saturating_add(T::DbWeight::get().reads(11 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) + /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(23_264_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as u64).saturating_mul(k as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + Weight::from_ref_time(42_030_000 as u64) + // Standard Error: 5_873 + .saturating_add(Weight::from_ref_time(6_747_156 as u64).saturating_mul(k as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(k as u64))) + .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) @@ -176,15 +182,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) + /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(56_596_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) + Weight::from_ref_time(70_834_000 as u64) + // Standard Error: 4_405 + .saturating_add(Weight::from_ref_time(2_583_120 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(13 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -192,53 +199,54 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(51_117_000 as u64) + Weight::from_ref_time(63_328_000 as u64) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(11_223_000 as u64) + Weight::from_ref_time(17_763_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(19_826_000 as u64) + Weight::from_ref_time(26_163_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(3_789_000 as u64) + Weight::from_ref_time(4_842_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(3_793_000 as u64) + Weight::from_ref_time(4_974_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(3_802_000 as u64) + Weight::from_ref_time(5_039_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(3_762_000 as u64) + Weight::from_ref_time(4_950_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) + /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(4_318_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as u64).saturating_mul(v as u64)) + Weight::from_ref_time(5_150_000 as u64) + // Standard Error: 30 + .saturating_add(Weight::from_ref_time(10_540 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) @@ -246,32 +254,33 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) + /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(65_265_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(76_282_000 as u64) + // Standard Error: 2_397 + .saturating_add(Weight::from_ref_time(1_137_934 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) + .saturating_add(T::DbWeight::get().writes(11 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) + /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(903_312_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(93_690_000 as u64) + // Standard Error: 43_203 + .saturating_add(Weight::from_ref_time(6_149_862 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -280,17 +289,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasValidatorPrefs (r:1 w:0) // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) + /// The range of component `n` is `[1, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(87_569_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(10 as u64)) + Weight::from_ref_time(151_647_000 as u64) + // Standard Error: 8_237 + .saturating_add(Weight::from_ref_time(21_296_415 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(12 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:2) @@ -300,45 +309,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) + /// The range of component `n` is `[1, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(98_839_000 as u64) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(11 as u64)) + Weight::from_ref_time(197_310_000 as u64) + // Standard Error: 13_818 + .saturating_add(Weight::from_ref_time(30_053_043 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(15 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListBags (r:2 w:2) + /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(74_865_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as u64).saturating_mul(l as u64)) + Weight::from_ref_time(89_469_000 as u64) + // Standard Error: 1_197 + .saturating_add(Weight::from_ref_time(74_212 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(9 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:1) - // Storage: Staking ErasStakersClipped (r:0 w:2) - // Storage: Staking ErasValidatorPrefs (r:0 w:2) - // Storage: Staking ErasValidatorReward (r:0 w:1) - // Storage: Staking ErasRewardPoints (r:0 w:1) - // Storage: Staking ErasStakers (r:0 w:2) - // Storage: Staking ErasTotalStake (r:0 w:1) - // Storage: Staking ErasStartSessionIndex (r:0 w:1) - fn set_history_depth(e: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as u64).saturating_mul(e as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(e as u64))) - } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) @@ -346,24 +340,25 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) + /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(70_933_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(88_333_000 as u64) + // Standard Error: 1_567 + .saturating_add(Weight::from_ref_time(1_101_099 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(12 as u64)) + .saturating_add(T::DbWeight::get().writes(13 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } - // Storage: BagsList CounterForListNodes (r:1 w:0) + // Storage: VoterBagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: BagsList ListBags (r:200 w:0) - // Storage: BagsList ListNodes (r:101 w:0) + // Storage: VoterBagsList ListBags (r:200 w:0) + // Storage: VoterBagsList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -372,52 +367,54 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasStakersClipped (r:0 w:1) // Storage: Staking ErasValidatorPrefs (r:0 w:1) // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) + /// The range of component `v` is `[1, 10]`. + /// The range of component `n` is `[1, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as u64).saturating_mul(v as u64)) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(208 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + Weight::from_ref_time(572_530_000 as u64) + // Standard Error: 3_166_542 + .saturating_add(Weight::from_ref_time(101_354_358 as u64).saturating_mul(v as u64)) + // Standard Error: 315_238 + .saturating_add(Weight::from_ref_time(15_565_319 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(261 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } - // Storage: BagsList CounterForListNodes (r:1 w:0) + // Storage: VoterBagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: BagsList ListBags (r:200 w:0) - // Storage: BagsList ListNodes (r:1500 w:0) + // Storage: VoterBagsList ListBags (r:200 w:0) + // Storage: VoterBagsList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) - fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as u64).saturating_mul(v as u64)) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as u64).saturating_mul(n as u64)) - // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(202 as u64)) - .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + /// The range of component `v` is `[500, 1000]`. + /// The range of component `n` is `[500, 1000]`. + /// The range of component `s` is `[1, 20]`. + fn get_npos_voters(v: u32, n: u32, _s: u32, ) -> Weight { + Weight::from_ref_time(24_930_788_000 as u64) + // Standard Error: 266_386 + .saturating_add(Weight::from_ref_time(6_687_552 as u64).saturating_mul(v as u64)) + // Standard Error: 266_386 + .saturating_add(Weight::from_ref_time(6_839_134 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(6722 as u64)) + .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) } + // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) + /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + Weight::from_ref_time(4_864_727_000 as u64) + // Standard Error: 54_240 + .saturating_add(Weight::from_ref_time(3_319_738 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(502 as u64)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -426,7 +423,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(7_041_000 as u64) + Weight::from_ref_time(10_141_000 as u64) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) @@ -436,7 +433,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(6_495_000 as u64) + Weight::from_ref_time(8_993_000 as u64) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) @@ -446,18 +443,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(62_014_000 as u64) + Weight::from_ref_time(79_082_000 as u64) .saturating_add(T::DbWeight::get().reads(11 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(12_814_000 as u64) + Weight::from_ref_time(19_265_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -468,21 +465,20 @@ impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(43_992_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) + Weight::from_ref_time(52_281_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(75_827_000 as u64) + Weight::from_ref_time(89_748_000 as u64) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } @@ -492,11 +488,11 @@ impl WeightInfo for () { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(81_075_000 as u64) + Weight::from_ref_time(94_782_000 as u64) .saturating_add(RocksDbWeight::get().reads(12 as u64)) .saturating_add(RocksDbWeight::get().writes(8 as u64)) } @@ -504,10 +500,11 @@ impl WeightInfo for () { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(35_763_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(57_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(44_499_000 as u64) + // Standard Error: 387 + .saturating_add(Weight::from_ref_time(72_726 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -518,14 +515,17 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(66_938_000 as u64) + /// The range of component `s` is `[0, 100]`. + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + Weight::from_ref_time(83_230_000 as u64) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(10_289 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(13 as u64)) .saturating_add(RocksDbWeight::get().writes(11 as u64)) } @@ -536,23 +536,25 @@ impl WeightInfo for () { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:1 w:1) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(52_943_000 as u64) + Weight::from_ref_time(64_430_000 as u64) .saturating_add(RocksDbWeight::get().reads(11 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) + /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(23_264_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(8_006_000 as u64).saturating_mul(k as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + Weight::from_ref_time(42_030_000 as u64) + // Standard Error: 5_873 + .saturating_add(Weight::from_ref_time(6_747_156 as u64).saturating_mul(k as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(k as u64))) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) @@ -562,15 +564,16 @@ impl WeightInfo for () { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) + /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(56_596_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(3_644_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) + Weight::from_ref_time(70_834_000 as u64) + // Standard Error: 4_405 + .saturating_add(Weight::from_ref_time(2_583_120 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(13 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -578,53 +581,54 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(51_117_000 as u64) + Weight::from_ref_time(63_328_000 as u64) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(11_223_000 as u64) + Weight::from_ref_time(17_763_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(19_826_000 as u64) + Weight::from_ref_time(26_163_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(3_789_000 as u64) + Weight::from_ref_time(4_842_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(3_793_000 as u64) + Weight::from_ref_time(4_974_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(3_802_000 as u64) + Weight::from_ref_time(5_039_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(3_762_000 as u64) + Weight::from_ref_time(4_950_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) + /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(4_318_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(10_000 as u64).saturating_mul(v as u64)) + Weight::from_ref_time(5_150_000 as u64) + // Standard Error: 30 + .saturating_add(Weight::from_ref_time(10_540 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) @@ -632,32 +636,33 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:2) + /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(65_265_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_029_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(76_282_000 as u64) + // Standard Error: 2_397 + .saturating_add(Weight::from_ref_time(1_137_934 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) + .saturating_add(RocksDbWeight::get().writes(11 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) + /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(903_312_000 as u64) - // Standard Error: 56_000 - .saturating_add(Weight::from_ref_time(4_968_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(93_690_000 as u64) + // Standard Error: 43_203 + .saturating_add(Weight::from_ref_time(6_149_862 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:1 w:1) @@ -666,17 +671,17 @@ impl WeightInfo for () { // Storage: Staking ErasValidatorPrefs (r:1 w:0) // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) + /// The range of component `n` is `[1, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(87_569_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(24_232_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(10 as u64)) + Weight::from_ref_time(151_647_000 as u64) + // Standard Error: 8_237 + .saturating_add(Weight::from_ref_time(21_296_415 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:2) @@ -686,45 +691,30 @@ impl WeightInfo for () { // Storage: Staking Payee (r:2 w:0) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:2 w:2) + /// The range of component `n` is `[1, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(98_839_000 as u64) - // Standard Error: 21_000 - .saturating_add(Weight::from_ref_time(34_480_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(11 as u64)) + Weight::from_ref_time(197_310_000 as u64) + // Standard Error: 13_818 + .saturating_add(Weight::from_ref_time(30_053_043 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(15 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) + // Storage: VoterBagsList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterBagsList ListBags (r:2 w:2) + /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(74_865_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(64_000 as u64).saturating_mul(l as u64)) + Weight::from_ref_time(89_469_000 as u64) + // Standard Error: 1_197 + .saturating_add(Weight::from_ref_time(74_212 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(9 as u64)) .saturating_add(RocksDbWeight::get().writes(8 as u64)) } - // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:1) - // Storage: Staking ErasStakersClipped (r:0 w:2) - // Storage: Staking ErasValidatorPrefs (r:0 w:2) - // Storage: Staking ErasValidatorReward (r:0 w:1) - // Storage: Staking ErasRewardPoints (r:0 w:1) - // Storage: Staking ErasStakers (r:0 w:2) - // Storage: Staking ErasTotalStake (r:0 w:1) - // Storage: Staking ErasStartSessionIndex (r:0 w:1) - fn set_history_depth(e: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 62_000 - .saturating_add(Weight::from_ref_time(22_829_000 as u64).saturating_mul(e as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(e as u64))) - } // Storage: System Account (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:1 w:1) @@ -732,24 +722,25 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) + /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(70_933_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_021_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(88_333_000 as u64) + // Standard Error: 1_567 + .saturating_add(Weight::from_ref_time(1_101_099 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(12 as u64)) + .saturating_add(RocksDbWeight::get().writes(13 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } - // Storage: BagsList CounterForListNodes (r:1 w:0) + // Storage: VoterBagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: BagsList ListBags (r:200 w:0) - // Storage: BagsList ListNodes (r:101 w:0) + // Storage: VoterBagsList ListBags (r:200 w:0) + // Storage: VoterBagsList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -758,52 +749,54 @@ impl WeightInfo for () { // Storage: Staking ValidatorCount (r:1 w:0) // Storage: Staking MinimumValidatorCount (r:1 w:0) // Storage: Staking CurrentEra (r:1 w:1) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Staking ErasStakersClipped (r:0 w:1) // Storage: Staking ErasValidatorPrefs (r:0 w:1) // Storage: Staking ErasStakers (r:0 w:1) // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) + /// The range of component `v` is `[1, 10]`. + /// The range of component `n` is `[1, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 897_000 - .saturating_add(Weight::from_ref_time(213_100_000 as u64).saturating_mul(v as u64)) - // Standard Error: 45_000 - .saturating_add(Weight::from_ref_time(31_123_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(208 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + Weight::from_ref_time(572_530_000 as u64) + // Standard Error: 3_166_542 + .saturating_add(Weight::from_ref_time(101_354_358 as u64).saturating_mul(v as u64)) + // Standard Error: 315_238 + .saturating_add(Weight::from_ref_time(15_565_319 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(261 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } - // Storage: BagsList CounterForListNodes (r:1 w:0) + // Storage: VoterBagsList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: BagsList ListBags (r:200 w:0) - // Storage: BagsList ListNodes (r:1500 w:0) + // Storage: VoterBagsList ListBags (r:200 w:0) + // Storage: VoterBagsList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) // Storage: Staking Ledger (r:1500 w:0) - fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(23_745_000 as u64).saturating_mul(v as u64)) - // Standard Error: 116_000 - .saturating_add(Weight::from_ref_time(22_497_000 as u64).saturating_mul(n as u64)) - // Standard Error: 3_968_000 - .saturating_add(Weight::from_ref_time(20_676_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(202 as u64)) - .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + /// The range of component `v` is `[500, 1000]`. + /// The range of component `n` is `[500, 1000]`. + /// The range of component `s` is `[1, 20]`. + fn get_npos_voters(v: u32, n: u32, _s: u32, ) -> Weight { + Weight::from_ref_time(24_930_788_000 as u64) + // Standard Error: 266_386 + .saturating_add(Weight::from_ref_time(6_687_552 as u64).saturating_mul(v as u64)) + // Standard Error: 266_386 + .saturating_add(Weight::from_ref_time(6_839_134 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(6722 as u64)) + .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) } + // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) + /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 36_000 - .saturating_add(Weight::from_ref_time(8_097_000 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + Weight::from_ref_time(4_864_727_000 as u64) + // Standard Error: 54_240 + .saturating_add(Weight::from_ref_time(3_319_738 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(502 as u64)) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -812,7 +805,7 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(7_041_000 as u64) + Weight::from_ref_time(10_141_000 as u64) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) @@ -822,7 +815,7 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(6_495_000 as u64) + Weight::from_ref_time(8_993_000 as u64) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) @@ -832,18 +825,18 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: BagsList ListNodes (r:2 w:2) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterBagsList ListNodes (r:2 w:2) + // Storage: VoterBagsList ListBags (r:1 w:1) + // Storage: VoterBagsList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(62_014_000 as u64) + Weight::from_ref_time(79_082_000 as u64) .saturating_add(RocksDbWeight::get().reads(11 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(12_814_000 as u64) + Weight::from_ref_time(19_265_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } From 9df7ac80f367e396135afb94150bdafbce88e094 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 21 Sep 2022 11:42:12 +0200 Subject: [PATCH 156/348] Introduce 'intermediate_insert' method to hide implementation details (#12215) Renaming from 'intermediate_take' to 'intermediate_remove' --- bin/node/cli/src/service.rs | 8 ++++---- client/consensus/babe/src/lib.rs | 17 +++++++---------- client/consensus/babe/src/tests.rs | 6 ++---- client/consensus/common/src/block_import.rs | 15 ++++++++++----- .../consensus/manual-seal/src/consensus/babe.rs | 13 ++++--------- client/consensus/pow/src/lib.rs | 13 ++++--------- client/consensus/pow/src/worker.rs | 6 +----- 7 files changed, 32 insertions(+), 46 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index c058e22fa7a97..f2b4feb58075f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -587,7 +587,7 @@ mod tests { RuntimeAppPublic, }; use sp_timestamp; - use std::{borrow::Cow, sync::Arc}; + use std::sync::Arc; type AccountPublic = ::Signer; @@ -733,9 +733,9 @@ mod tests { let mut params = BlockImportParams::new(BlockOrigin::File, new_header); params.post_digests.push(item); params.body = Some(new_body); - params.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + params.insert_intermediate( + INTERMEDIATE_KEY, + BabeIntermediate:: { epoch_descriptor }, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index eca6654f6e4dc..aef4785b7bb81 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -67,7 +67,6 @@ #![warn(missing_docs)] use std::{ - borrow::Cow, collections::{HashMap, HashSet}, future::Future, pin::Pin, @@ -857,10 +856,8 @@ where import_block.body = Some(body); import_block.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, - ); + import_block + .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); Ok(import_block) } @@ -1272,9 +1269,9 @@ where block.header = pre_header; block.post_digests.push(verified_info.seal); - block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + block.insert_intermediate( + INTERMEDIATE_KEY, + BabeIntermediate:: { epoch_descriptor }, ); block.post_hash = Some(hash); @@ -1426,7 +1423,7 @@ where match self.client.status(BlockId::Hash(hash)) { Ok(sp_blockchain::BlockStatus::InChain) => { // When re-importing existing block strip away intermediates. - let _ = block.take_intermediate::>(INTERMEDIATE_KEY); + let _ = block.remove_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); return self.inner.import_block(block, new_cache).await.map_err(Into::into) }, @@ -1495,7 +1492,7 @@ where }; let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; + block.remove_intermediate::>(INTERMEDIATE_KEY)?; let epoch_descriptor = intermediate.epoch_descriptor; let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index ab3805138482c..a3467e0020200 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -642,10 +642,8 @@ fn propose_and_import_block( let mut import = BlockImportParams::new(BlockOrigin::Own, block.header); import.post_digests.push(seal); import.body = Some(block.extrinsics); - import.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, - ); + import + .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); let import_result = block_on(block_import.import_block(import, Default::default())).unwrap(); diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index b8fc815cc5c5c..f888176addd2d 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -294,18 +294,23 @@ impl BlockImportParams { } } - /// Take intermediate by given key, and remove it from the processing list. - pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { + /// Insert intermediate by given key. + pub fn insert_intermediate(&mut self, key: &'static [u8], value: T) { + self.intermediates.insert(Cow::from(key), Box::new(value)); + } + + /// Remove and return intermediate by given key. + pub fn remove_intermediate(&mut self, key: &[u8]) -> Result { let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; - v.downcast::().map_err(|v| { + v.downcast::().map(|v| *v).map_err(|v| { self.intermediates.insert(k, v); Error::InvalidIntermediate }) } /// Get a reference to a given intermediate. - pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { + pub fn get_intermediate(&self, key: &[u8]) -> Result<&T, Error> { self.intermediates .get(key) .ok_or(Error::NoIntermediate)? @@ -314,7 +319,7 @@ impl BlockImportParams { } /// Get a mutable reference to a given intermediate. - pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { + pub fn get_intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { self.intermediates .get_mut(key) .ok_or(Error::NoIntermediate)? diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 300a96695c90a..206f5163a13cd 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -30,7 +30,7 @@ use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; use sp_keystore::SyncCryptoStorePtr; -use std::{borrow::Cow, marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc}; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; @@ -125,10 +125,8 @@ where // drop the lock drop(epoch_changes); - import_params.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, - ); + import_params + .insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); Ok((import_params, None)) } @@ -315,10 +313,7 @@ where }; } - params.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, - ); + params.insert_intermediate(INTERMEDIATE_KEY, BabeIntermediate:: { epoch_descriptor }); Ok(()) } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 9b3ef501d2396..dcf069d617bab 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -65,10 +65,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, RuntimeString, }; -use std::{ - borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, - time::Duration, -}; +use std::{cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, time::Duration}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -358,8 +355,8 @@ where let inner_seal = fetch_seal::(block.post_digests.last(), block.header.hash())?; - let intermediate = - block.take_intermediate::>(INTERMEDIATE_KEY)?; + let intermediate = block + .remove_intermediate::>(INTERMEDIATE_KEY)?; let difficulty = match intermediate.difficulty { Some(difficulty) => difficulty, @@ -455,9 +452,7 @@ where let intermediate = PowIntermediate:: { difficulty: None }; block.header = checked_header; block.post_digests.push(seal); - block - .intermediates - .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + block.insert_intermediate(INTERMEDIATE_KEY, intermediate); block.post_hash = Some(hash); Ok((block, None)) diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 750e78cd9a038..a00da6e7022fb 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -32,7 +32,6 @@ use sp_runtime::{ DigestItem, }; use std::{ - borrow::Cow, collections::HashMap, pin::Pin, sync::{ @@ -212,10 +211,7 @@ where let intermediate = PowIntermediate:: { difficulty: Some(build.metadata.difficulty), }; - - import_block - .intermediates - .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + import_block.insert_intermediate(INTERMEDIATE_KEY, intermediate); let header = import_block.post_header(); let mut block_import = self.block_import.lock(); From ae4ba19c12e4fad5368558b2c49688256b15c9ba Mon Sep 17 00:00:00 2001 From: Koute Date: Wed, 21 Sep 2022 19:21:01 +0900 Subject: [PATCH 157/348] Bump `wasmtime` to 1.0.0 (#12317) --- Cargo.lock | 120 ++++++++++++++------------- client/executor/wasmtime/Cargo.toml | 5 +- primitives/wasm-interface/Cargo.toml | 2 +- 3 files changed, 65 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5791c8762d955..5521e91de895d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1153,11 +1153,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f91425bea5a5ac6d76b788477064944a7e21f0e240fd93f6f368a774a3efdd1" +checksum = "b27bbd3e6c422cf6282b047bcdd51ecd9ca9f3497a3be0132ffa08e509b824b0" dependencies = [ - "cranelift-entity 0.87.1", + "cranelift-entity 0.88.0", ] [[package]] @@ -1179,14 +1179,16 @@ dependencies = [ [[package]] name = "cranelift-codegen" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b83b4bbf7bc96db77b7b5b5e41fafc4001536e9f0cbfd702ed7d4d8f848dc06" +checksum = "872f5d4557a411b087bd731df6347c142ae1004e6467a144a7e33662e5715a01" dependencies = [ - "cranelift-bforest 0.87.1", - "cranelift-codegen-meta 0.87.1", - "cranelift-codegen-shared 0.87.1", - "cranelift-entity 0.87.1", + "arrayvec 0.7.2", + "bumpalo", + "cranelift-bforest 0.88.0", + "cranelift-codegen-meta 0.88.0", + "cranelift-codegen-shared 0.88.0", + "cranelift-entity 0.88.0", "cranelift-isle", "gimli 0.26.1", "log", @@ -1207,11 +1209,11 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da02e8fff048c381b313a3dfef4deb2343976fb6d7acc8e7d9c86d4c93e3fa06" +checksum = "21b49fdebb29c62c1fc4da1eeebd609e9d530ecde24a9876def546275f73a244" dependencies = [ - "cranelift-codegen-shared 0.87.1", + "cranelift-codegen-shared 0.88.0", ] [[package]] @@ -1222,9 +1224,9 @@ checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc" [[package]] name = "cranelift-codegen-shared" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9abc2a06e8fc29e36660ebbc9e2503e18a051057072acbb1e75e7f7cf19cb95e" +checksum = "5fc0c091e2db055d4d7f6b7cec2d2ead286bcfaea3357c6a52c2a2613a8cb5ac" [[package]] name = "cranelift-entity" @@ -1234,9 +1236,9 @@ checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799" [[package]] name = "cranelift-entity" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeced7874890fc25d85cacc5e626c4d67931c7c25aad1c2ad521684744c1ff5c" +checksum = "354a9597be87996c9b278655e68b8447f65dd907256855ad773864edee8d985c" dependencies = [ "serde", ] @@ -1255,11 +1257,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1d301ccad6fce05d9c9793d433d225fafdd57661b98d268d8d162e9291ff2e" +checksum = "0cd8dd3fb8b82c772f4172e87ae1677b971676fffa7c4e3398e3047e650a266b" dependencies = [ - "cranelift-codegen 0.87.1", + "cranelift-codegen 0.88.0", "log", "smallvec", "target-lexicon", @@ -1267,34 +1269,34 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7b100db19320848986b4df1da19501dbddeb706a799f502222f72f889b0fab" +checksum = "b82527802b1f7d8da288adc28f1dc97ea52943f5871c041213f7b5035ac698a7" [[package]] name = "cranelift-native" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be18d8b976cddc822e52343f328b7593d26dd2f1aeadd90da071596a210d524" +checksum = "c30ba8b910f1be023af0c39109cb28a8809734942a6b3eecbf2de8993052ea5e" dependencies = [ - "cranelift-codegen 0.87.1", + "cranelift-codegen 0.88.0", "libc", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.87.1" +version = "0.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f9e48bb632a2e189b38a9fa89fa5a6eea687a5a4c613bbef7c2b7522c3ad0e0" +checksum = "776a8916d201894aca9637a20814f1e11abc62acd5cfbe0b4eb2e63922756971" dependencies = [ - "cranelift-codegen 0.87.1", - "cranelift-entity 0.87.1", - "cranelift-frontend 0.87.1", + "cranelift-codegen 0.88.0", + "cranelift-entity 0.88.0", + "cranelift-frontend 0.88.0", "itertools", "log", "smallvec", - "wasmparser 0.88.0", + "wasmparser 0.89.1", "wasmtime-types", ] @@ -11858,18 +11860,18 @@ checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" [[package]] name = "wasmparser" -version = "0.88.0" +version = "0.89.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb8cf7dd82407fe68161bedcd57fde15596f32ebf6e9b3bdbf3ae1da20e38e5e" +checksum = "ab5d3e08b13876f96dd55608d03cd4883a0545884932d5adf11925876c96daef" dependencies = [ "indexmap", ] [[package]] name = "wasmtime" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a020a3f6587fa7a7d98a021156177735ebb07212a6239a85ab5f14b2f728508f" +checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" dependencies = [ "anyhow", "bincode", @@ -11884,7 +11886,7 @@ dependencies = [ "rayon", "serde", "target-lexicon", - "wasmparser 0.88.0", + "wasmparser 0.89.1", "wasmtime-cache", "wasmtime-cranelift", "wasmtime-environ", @@ -11895,18 +11897,18 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed4ada1fdd4d9a2aa37be652abcc31ae3188ad0efcefb4571ef4f785be2d777" +checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "wasmtime-cache" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a03a5732ef39b83943d9d72de8ac2d58623d3bfaaea4d9a92aea5fcd9acf5" +checksum = "9f507f3fa1ee1b2f9a83644e2514242b1dfe580782c0eb042f1ef70255bc4ffe" dependencies = [ "anyhow", "base64", @@ -11924,14 +11926,14 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc59c28fe895112db09e262fb9c483f9e7b82c78a82a6ded69567ccc0e9795b" +checksum = "8f03cf79d982fc68e94ba0bea6a300a3b94621c4eb9705eece0a4f06b235a3b5" dependencies = [ "anyhow", - "cranelift-codegen 0.87.1", - "cranelift-entity 0.87.1", - "cranelift-frontend 0.87.1", + "cranelift-codegen 0.88.0", + "cranelift-entity 0.88.0", + "cranelift-frontend 0.88.0", "cranelift-native", "cranelift-wasm", "gimli 0.26.1", @@ -11939,18 +11941,18 @@ dependencies = [ "object 0.29.0", "target-lexicon", "thiserror", - "wasmparser 0.88.0", + "wasmparser 0.89.1", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11086e573d2635a45ac0d44697a8e4586e058cf1b190f76bea466ca2ec36c30a" +checksum = "5c587c62e91c5499df62012b87b88890d0eb470b2ffecc5964e9da967b70c77c" dependencies = [ "anyhow", - "cranelift-entity 0.87.1", + "cranelift-entity 0.88.0", "gimli 0.26.1", "indexmap", "log", @@ -11958,15 +11960,15 @@ dependencies = [ "serde", "target-lexicon", "thiserror", - "wasmparser 0.88.0", + "wasmparser 0.89.1", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5444a78b74144718633f8642eccd7c4858f4c6f0c98ae6a3668998adf177ba2" +checksum = "047839b5dabeae5424a078c19b8cc897e5943a7fadc69e3d888b9c9a897666b3" dependencies = [ "addr2line", "anyhow", @@ -11989,9 +11991,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2bf6a667d2a29b2b0ed42bcf7564f00c595d92c24acb4d241c7c4d950b1910c" +checksum = "b299569abf6f99b7b8e020afaf84a700e8636c6a42e242069267322cd5818235" dependencies = [ "object 0.29.0", "once_cell", @@ -12000,9 +12002,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee064ce7b563cc201cdf3bb1cc4b233f386d8c57a96e55f4c4afe6103f4bd6a1" +checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" dependencies = [ "anyhow", "cc", @@ -12025,14 +12027,14 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "0.40.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e104bd9e625181d53ead85910bbc0863aa5f0c6ef96836fe9a5cc65da11b69" +checksum = "790cf43ee8e2d5dad1780af30f00d7a972b74725fb1e4f90c28d62733819b185" dependencies = [ - "cranelift-entity 0.87.1", + "cranelift-entity 0.88.0", "serde", "thiserror", - "wasmparser 0.88.0", + "wasmparser 0.89.1", ] [[package]] diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 94ad25bdc948c..fc6d5db14aa1d 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -21,7 +21,7 @@ parity-wasm = "0.45" # When bumping wasmtime do not forget to also bump rustix # to exactly the same version as used by wasmtime! -wasmtime = { version = "0.40.1", default-features = false, features = [ +wasmtime = { version = "1.0.0", default-features = false, features = [ "cache", "cranelift", "jitdump", @@ -35,7 +35,8 @@ sp-runtime-interface = { version = "6.0.0", path = "../../../primitives/runtime- sp-sandbox = { version = "0.10.0-dev", path = "../../../primitives/sandbox" } sp-wasm-interface = { version = "6.0.0", features = ["wasmtime"], path = "../../../primitives/wasm-interface" } -# Here we include the rustix crate used by wasmtime just to enable its 'use-libc' flag. +# Here we include the rustix crate in the exactly same semver-compatible version as used by +# wasmtime and enable its 'use-libc' flag. # # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index 05ccad88ec37a..d61c74f20222c 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } wasmi = { version = "0.13", optional = true } -wasmtime = { version = "0.40.1", default-features = false, optional = true } +wasmtime = { version = "1.0.0", default-features = false, optional = true } sp-std = { version = "4.0.0", default-features = false, path = "../std" } [features] From be976154078ae8df932802f32350bfaff78a1e57 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 21 Sep 2022 13:08:56 +0200 Subject: [PATCH 158/348] Format templates and fix `--steps` default value (#12286) * Cleaup imports Signed-off-by: Oliver Tale-Yazdi * Format template Signed-off-by: Oliver Tale-Yazdi * Set steps min value to two Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- .../benchmarking-cli/src/overhead/weights.hbs | 21 ++++++++++++++----- .../frame/benchmarking-cli/src/pallet/mod.rs | 2 +- .../benchmarking-cli/src/pallet/template.hbs | 2 +- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/overhead/weights.hbs b/utils/frame/benchmarking-cli/src/overhead/weights.hbs index 86b2c0b0e0cdb..8d1a369372721 100644 --- a/utils/frame/benchmarking-cli/src/overhead/weights.hbs +++ b/utils/frame/benchmarking-cli/src/overhead/weights.hbs @@ -39,7 +39,6 @@ parameter_types! { #[cfg(test)] mod test_weights { - use super::*; use sp_weights::constants; /// Checks that the weight exists and is sane. @@ -51,14 +50,26 @@ mod test_weights { {{#if (eq short_name "block")}} // At least 100 µs. - assert!(w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), "Weight should be at least 100 µs."); + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_PER_MICROS.ref_time(), + "Weight should be at least 100 µs." + ); // At most 50 ms. - assert!(w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), "Weight should be at most 50 ms."); + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_PER_MILLIS.ref_time(), + "Weight should be at most 50 ms." + ); {{else}} // At least 10 µs. - assert!(w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), "Weight should be at least 10 µs."); + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_PER_MICROS.ref_time(), + "Weight should be at least 10 µs." + ); // At most 1 ms. - assert!(w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), "Weight should be at most 1 ms."); + assert!( + w.ref_time() <= constants::WEIGHT_PER_MILLIS.ref_time(), + "Weight should be at most 1 ms." + ); {{/if}} } } diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index 7beaf321a2927..b8c1f7b905c0c 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -43,7 +43,7 @@ pub struct PalletCmd { pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[clap(short, long, default_value = "1")] + #[clap(short, long, default_value = "2")] pub steps: u32, /// Indicates lowest values for each of the component ranges. diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index 6e016c47ef010..fbe435edf8fc3 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -15,7 +15,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::{traits::Get, weights::{Weight}}; +use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; /// Weight functions for `{{pallet}}`. From 493b58bd4a475080d428ce47193ee9ea9757a808 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 21 Sep 2022 19:31:22 +0200 Subject: [PATCH 159/348] Make automatic storage deposits resistant against changing deposit prices (#12083) * Require `FixedPointOperand` for Balances * Delay deposit calculation * Make refunds pro rata of consumed storage * Add storage migration * Fix clippy * Add liquidity checks * Fixe delayed deposit limit enforcement * Defer charges * Import Vec * Add try-runtime hooks for migration * Fix warning * Adapt to new OnRuntimeUpgrade trait * Apply suggestions from code review Co-authored-by: Sasha Gryaznov * fmt * Apply suggestions from code review Co-authored-by: Sasha Gryaznov * More suggestions from code review Co-authored-by: Sasha Gryaznov --- bin/node/runtime/src/lib.rs | 1 + frame/balances/src/lib.rs | 5 +- .../fixtures/create_storage_and_call.wat | 55 ++ frame/contracts/src/benchmarking/mod.rs | 18 +- frame/contracts/src/exec.rs | 23 +- frame/contracts/src/lib.rs | 33 +- frame/contracts/src/migration.rs | 198 ++++++- frame/contracts/src/storage.rs | 51 +- frame/contracts/src/storage/meter.rs | 495 ++++++++++-------- frame/contracts/src/tests.rs | 386 ++++++++++++-- frame/contracts/src/wasm/prepare.rs | 2 +- frame/support/src/traits/tokens/currency.rs | 4 +- 12 files changed, 958 insertions(+), 313 deletions(-) create mode 100644 frame/contracts/fixtures/create_storage_and_call.wat diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0d8f00550ed1a..d6a9798b4d9ca 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1706,6 +1706,7 @@ pub type Executive = frame_executive::Executive< type Migrations = ( pallet_nomination_pools::migration::v2::MigrateToV2, pallet_alliance::migration::Migration, + pallet_contracts::Migration, ); /// MMR helper types. diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 82d6f87dbde1a..4014efdd26e4e 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -185,7 +185,7 @@ use sp_runtime::{ AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Saturating, StaticLookup, Zero, }, - ArithmeticError, DispatchError, RuntimeDebug, + ArithmeticError, DispatchError, FixedPointOperand, RuntimeDebug, }; use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; pub use weights::WeightInfo; @@ -212,7 +212,8 @@ pub mod pallet { + MaybeSerializeDeserialize + Debug + MaxEncodedLen - + TypeInfo; + + TypeInfo + + FixedPointOperand; /// Handler for the unbalanced reduction when removing a dust account. type DustRemoval: OnUnbalanced>; diff --git a/frame/contracts/fixtures/create_storage_and_call.wat b/frame/contracts/fixtures/create_storage_and_call.wat new file mode 100644 index 0000000000000..2a1e53f7ce08a --- /dev/null +++ b/frame/contracts/fixtures/create_storage_and_call.wat @@ -0,0 +1,55 @@ +;; This calls another contract as passed as its account id. It also creates some storage. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32))) + (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + (func (export "call") + ;; store length of input buffer + (i32.store (i32.const 0) (i32.const 512)) + + ;; copy input at address 4 + (call $seal_input (i32.const 4) (i32.const 0)) + + ;; create 4 byte of storage before calling + (call $seal_set_storage + (i32.const 0) ;; Pointer to storage key + (i32.const 0) ;; Pointer to value + (i32.const 4) ;; Size of value + ) + + ;; call passed contract + (call $assert (i32.eqz + (call $seal_call + (i32.const 0) ;; No flags + (i32.const 8) ;; Pointer to "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 512) ;; Pointer to the buffer with value to transfer + (i32.const 4) ;; Pointer to input data buffer address + (i32.const 4) ;; Length of input data buffer + (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + )) + + ;; create 8 byte of storage after calling + ;; item of 12 bytes because we override 4 bytes + (call $seal_set_storage + (i32.const 0) ;; Pointer to storage key + (i32.const 0) ;; Pointer to value + (i32.const 12) ;; Size of value + ) + ) +) diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 4a987fd8dde78..186ac6f63503e 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -87,7 +87,7 @@ where module: WasmModule, data: Vec, ) -> Result, &'static str> { - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let salt = vec![0xff]; let addr = Contracts::::contract_address(&caller, &module.hash, &salt); @@ -257,7 +257,7 @@ benchmarks! { let instance = Contract::::with_caller( whitelisted_caller(), WasmModule::sized(c, Location::Deploy), vec![], )?; - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr; }: call(origin, callee, value, Weight::MAX, None, vec![]) @@ -280,7 +280,7 @@ benchmarks! { let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get()); let s in 0 .. code::max_pages::() * 64 * 1024; let salt = vec![42u8; s as usize]; - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); @@ -307,7 +307,7 @@ benchmarks! { instantiate { let s in 0 .. code::max_pages::() * 64 * 1024; let salt = vec![42u8; s as usize]; - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); @@ -338,7 +338,7 @@ benchmarks! { let instance = Contract::::with_caller( whitelisted_caller(), WasmModule::dummy(), vec![], )?; - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); let origin = RawOrigin::Signed(instance.caller.clone()); let callee = instance.addr.clone(); let before = T::Currency::free_balance(&instance.account_id); @@ -767,13 +767,13 @@ benchmarks! { let instance = Contract::::new(code, vec![])?; let origin = RawOrigin::Signed(instance.caller.clone()); assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); - assert_eq!(T::Currency::free_balance(&instance.account_id), T::Currency::minimum_balance()); + assert_eq!(T::Currency::free_balance(&instance.account_id), Pallet::::min_balance()); assert_ne!(T::Currency::reserved_balance(&instance.account_id), 0u32.into()); }: call(origin, instance.addr.clone(), 0u32.into(), Weight::MAX, None, vec![]) verify { if r > 0 { assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); - assert_eq!(T::Currency::total_balance(&beneficiary), T::Currency::minimum_balance()); + assert_eq!(T::Currency::total_balance(&beneficiary), Pallet::::min_balance()); } } @@ -1469,7 +1469,7 @@ benchmarks! { .collect::>(); let account_len = accounts.get(0).map(|i| i.encode().len()).unwrap_or(0); let account_bytes = accounts.iter().flat_map(|x| x.encode()).collect(); - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); @@ -1705,7 +1705,7 @@ benchmarks! { let hash_len = hashes.get(0).map(|x| x.encode().len()).unwrap_or(0); let hashes_bytes = hashes.iter().flat_map(|x| x.encode()).collect::>(); let hashes_len = hashes_bytes.len(); - let value = T::Currency::minimum_balance(); + let value = Pallet::::min_balance(); assert!(value > 0u32.into()); let value_bytes = value.encode(); let value_len = value_bytes.len(); diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 38b47a915cddc..6260dd41de707 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -813,6 +813,15 @@ where return Ok(output) } + // Storage limit is enforced as late as possible (when the last frame returns) so that + // the ordering of storage accesses does not matter. + if self.frames.is_empty() { + let frame = &mut self.first_frame; + frame.contract_info.load(&frame.account_id); + let contract = frame.contract_info.as_contract(); + frame.nested_storage.enforce_limit(contract)?; + } + let frame = self.top_frame(); let account_id = &frame.account_id; match (entry_point, delegated_code_hash) { @@ -911,12 +920,7 @@ where // it was invalidated. frame.contract_info.load(account_id); let mut contract = frame.contract_info.into_contract(); - prev.nested_storage.absorb( - frame.nested_storage, - &self.origin, - account_id, - contract.as_mut(), - ); + prev.nested_storage.absorb(frame.nested_storage, account_id, contract.as_mut()); // In case the contract wasn't terminated we need to persist changes made to it. if let Some(contract) = contract { @@ -954,7 +958,6 @@ where let mut contract = self.first_frame.contract_info.as_contract(); self.storage_meter.absorb( mem::take(&mut self.first_frame.nested_storage), - &self.origin, &self.first_frame.account_id, contract.as_deref_mut(), ); @@ -2354,10 +2357,10 @@ mod tests { let code_bob = MockLoader::insert(Call, |ctx, _| { if ctx.input_data[0] == 0 { let info = ctx.ext.contract_info(); - assert_eq!(info.storage_deposit, 0); - info.storage_deposit = 42; + assert_eq!(info.storage_byte_deposit, 0); + info.storage_byte_deposit = 42; assert_eq!(ctx.ext.call(Weight::zero(), CHARLIE, 0, vec![], true), exec_trapped()); - assert_eq!(ctx.ext.contract_info().storage_deposit, 42); + assert_eq!(ctx.ext.contract_info().storage_byte_deposit, 42); } exec_success() }); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index abe91a9046ce2..fc44e4507ca00 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -87,12 +87,12 @@ mod gas; mod benchmarking; mod exec; +mod migration; mod schedule; mod storage; mod wasm; pub mod chain_extension; -pub mod migration; pub mod weights; #[cfg(test)] @@ -109,7 +109,10 @@ use codec::{Encode, HasCompact}; use frame_support::{ dispatch::{DispatchClass, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, - traits::{ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time}, + traits::{ + tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, + ReservableCurrency, Time, + }, weights::Weight, BoundedVec, WeakBoundedVec, }; @@ -126,6 +129,7 @@ use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub use crate::{ exec::{Frame, VarSizedKey as StorageKey}, + migration::Migration, pallet::*, schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, }; @@ -225,7 +229,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -236,11 +240,12 @@ pub mod pallet { /// The time implementation used to supply timestamps to contracts through `seal_now`. type Time: Time; - /// The generator used to supply randomness to contracts through `seal_random`. + /// The generator used to supply randomness to contracts through `seal_random` type Randomness: Randomness; /// The currency in which fees are paid and contract balances are held. - type Currency: ReservableCurrency; + type Currency: ReservableCurrency + + Inspect>; /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -1036,7 +1041,7 @@ where }; let schedule = T::Schedule::get(); let result = ExecStack::>::run_call( - origin, + origin.clone(), dest, &mut gas_meter, &mut storage_meter, @@ -1045,7 +1050,11 @@ where data, debug_message, ); - InternalCallOutput { result, gas_meter, storage_deposit: storage_meter.into_deposit() } + InternalCallOutput { + result, + gas_meter, + storage_deposit: storage_meter.into_deposit(&origin), + } } /// Internal function that does the actual instantiation. @@ -1089,7 +1098,7 @@ where value.saturating_add(extra_deposit), )?; let result = ExecStack::>::run_instantiate( - origin, + origin.clone(), executable, &mut gas_meter, &mut storage_meter, @@ -1100,17 +1109,23 @@ where debug_message, ); storage_deposit = storage_meter - .into_deposit() + .into_deposit(&origin) .saturating_add(&StorageDeposit::Charge(extra_deposit)); result }; InternalInstantiateOutput { result: try_exec(), gas_meter, storage_deposit } } + /// Deposit a pallet contracts event. Handles the conversion to the overarching event type. fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( &topics, ::RuntimeEvent::from(event).into(), ) } + + /// Return the existential deposit of [`Config::Currency`]. + fn min_balance() -> BalanceOf { + >>::minimum_balance() + } } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 0db285e81a91f..5ea821aac7682 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -18,37 +18,80 @@ use crate::{BalanceOf, CodeHash, Config, Pallet, TrieId, Weight}; use codec::{Decode, Encode}; use frame_support::{ - codec, pallet_prelude::*, storage::migration, storage_alias, traits::Get, Identity, - Twox64Concat, + codec, + pallet_prelude::*, + storage::migration, + storage_alias, + traits::{Get, OnRuntimeUpgrade}, + Identity, Twox64Concat, }; +use sp_runtime::traits::Saturating; use sp_std::{marker::PhantomData, prelude::*}; -/// Wrapper for all migrations of this pallet, based on `StorageVersion`. -pub fn migrate() -> Weight { - let version = StorageVersion::get::>(); - let mut weight = Weight::zero(); +/// Performs all necessary migrations based on `StorageVersion`. +pub struct Migration(PhantomData); +impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + let version = StorageVersion::get::>(); + let mut weight = Weight::zero(); - if version < 4 { - weight = weight.saturating_add(v4::migrate::()); - StorageVersion::new(4).put::>(); - } + if version < 4 { + weight = weight.saturating_add(v4::migrate::()); + StorageVersion::new(4).put::>(); + } - if version < 5 { - weight = weight.saturating_add(v5::migrate::()); - StorageVersion::new(5).put::>(); - } + if version < 5 { + weight = weight.saturating_add(v5::migrate::()); + StorageVersion::new(5).put::>(); + } + + if version < 6 { + weight = weight.saturating_add(v6::migrate::()); + StorageVersion::new(6).put::>(); + } + + if version < 7 { + weight = weight.saturating_add(v7::migrate::()); + StorageVersion::new(7).put::>(); + } + + if version < 8 { + weight = weight.saturating_add(v8::migrate::()); + StorageVersion::new(8).put::>(); + } - if version < 6 { - weight = weight.saturating_add(v6::migrate::()); - StorageVersion::new(6).put::>(); + weight } - if version < 7 { - weight = weight.saturating_add(v7::migrate::()); - StorageVersion::new(7).put::>(); + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let version = StorageVersion::get::>(); + + if version < 7 { + return Ok(vec![]) + } + + if version < 8 { + v8::pre_upgrade::()?; + } + + Ok(vec![]) } - weight + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + let version = StorageVersion::get::>(); + + if version < 7 { + return Ok(()) + } + + if version < 8 { + v8::post_upgrade::()?; + } + + Ok(()) + } } /// V4: `Schedule` is changed to be a config item rather than an in-storage value. @@ -185,9 +228,9 @@ mod v6 { #[derive(Encode, Decode)] pub struct RawContractInfo { - trie_id: TrieId, - code_hash: CodeHash, - storage_deposit: Balance, + pub trie_id: TrieId, + pub code_hash: CodeHash, + pub storage_deposit: Balance, } #[derive(Encode, Decode)] @@ -199,7 +242,7 @@ mod v6 { refcount: u64, } - type ContractInfo = RawContractInfo, BalanceOf>; + pub type ContractInfo = RawContractInfo, BalanceOf>; #[storage_alias] type ContractInfoOf = StorageMap< @@ -266,3 +309,108 @@ mod v7 { T::DbWeight::get().reads_writes(1, 2) } } + +/// Update `ContractInfo` with new fields that track storage deposits. +mod v8 { + use super::*; + use sp_io::default_child_storage as child; + use v6::ContractInfo as OldContractInfo; + + #[derive(Encode, Decode)] + struct ContractInfo { + trie_id: TrieId, + code_hash: CodeHash, + storage_bytes: u32, + storage_items: u32, + storage_byte_deposit: BalanceOf, + storage_item_deposit: BalanceOf, + storage_base_deposit: BalanceOf, + } + + #[storage_alias] + type ContractInfoOf = + StorageMap, Twox64Concat, ::AccountId, V>; + + pub fn migrate() -> Weight { + let mut weight = Weight::zero(); + + >>::translate_values(|old: OldContractInfo| { + // Count storage items of this contract + let mut storage_bytes = 0u32; + let mut storage_items = 0u32; + let mut key = Vec::new(); + while let Some(next) = child::next_key(&old.trie_id, &key) { + key = next; + let mut val_out = []; + let len = child::read(&old.trie_id, &key, &mut val_out, 0) + .expect("The loop conditions checks for existence of the key; qed"); + storage_bytes.saturating_accrue(len); + storage_items.saturating_accrue(1); + } + + let storage_byte_deposit = + T::DepositPerByte::get().saturating_mul(storage_bytes.into()); + let storage_item_deposit = + T::DepositPerItem::get().saturating_mul(storage_items.into()); + let storage_base_deposit = old + .storage_deposit + .saturating_sub(storage_byte_deposit) + .saturating_sub(storage_item_deposit); + + // Reads: One read for each storage item plus the contract info itself. + // Writes: Only the new contract info. + weight = weight + .saturating_add(T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1)); + + Some(ContractInfo { + trie_id: old.trie_id, + code_hash: old.code_hash, + storage_bytes, + storage_items, + storage_byte_deposit, + storage_item_deposit, + storage_base_deposit, + }) + }); + + weight + } + + #[cfg(feature = "try-runtime")] + pub fn pre_upgrade() -> Result<(), &'static str> { + use frame_support::traits::ReservableCurrency; + for (key, value) in ContractInfoOf::>::iter() { + let reserved = T::Currency::reserved_balance(&key); + ensure!(reserved >= value.storage_deposit, "Reserved balance out of sync."); + } + Ok(()) + } + + #[cfg(feature = "try-runtime")] + pub fn post_upgrade() -> Result<(), &'static str> { + use frame_support::traits::ReservableCurrency; + for (key, value) in ContractInfoOf::>::iter() { + let reserved = T::Currency::reserved_balance(&key); + let stored = value + .storage_base_deposit + .saturating_add(value.storage_byte_deposit) + .saturating_add(value.storage_item_deposit); + ensure!(reserved >= stored, "Reserved balance out of sync."); + + let mut storage_bytes = 0u32; + let mut storage_items = 0u32; + let mut key = Vec::new(); + while let Some(next) = child::next_key(&value.trie_id, &key) { + key = next; + let mut val_out = []; + let len = child::read(&value.trie_id, &key, &mut val_out, 0) + .expect("The loop conditions checks for existence of the key; qed"); + storage_bytes.saturating_accrue(len); + storage_items.saturating_accrue(1); + } + ensure!(storage_bytes == value.storage_bytes, "Storage bytes do not match.",); + ensure!(storage_items == value.storage_items, "Storage items do not match.",); + } + Ok(()) + } +} diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index ec49a6325c125..c3fc0840d8649 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -34,31 +34,51 @@ use scale_info::TypeInfo; use sp_core::crypto::UncheckedFrom; use sp_io::KillStorageResult; use sp_runtime::{ - traits::{Hash, Zero}, + traits::{Hash, Saturating, Zero}, RuntimeDebug, }; use sp_std::{marker::PhantomData, prelude::*}; -pub type ContractInfo = RawContractInfo, BalanceOf>; - /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct RawContractInfo { +#[scale_info(skip_type_params(T))] +pub struct ContractInfo { /// Unique ID for the subtree encoded as a bytes vector. pub trie_id: TrieId, /// The code associated with a given account. - pub code_hash: CodeHash, - /// The amount of balance that is currently deposited to pay for consumed storage. - pub storage_deposit: Balance, + pub code_hash: CodeHash, + /// How many bytes of storage are accumulated in this contract's child trie. + pub storage_bytes: u32, + /// How many items of storage are accumulated in this contract's child trie. + pub storage_items: u32, + /// This records to how much deposit the accumulated `storage_bytes` amount to. + pub storage_byte_deposit: BalanceOf, + /// This records to how much deposit the accumulated `storage_items` amount to. + pub storage_item_deposit: BalanceOf, + /// This records how much deposit is put down in order to pay for the contract itself. + /// + /// We need to store this information separately so it is not used when calculating any refunds + /// since the base deposit can only ever be refunded on contract termination. + pub storage_base_deposit: BalanceOf, } -impl RawContractInfo { +impl ContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. #[cfg(test)] pub fn child_trie_info(&self) -> ChildInfo { child_trie_info(&self.trie_id[..]) } + + /// The deposit paying for the accumulated storage generated within the contract's child trie. + pub fn extra_deposit(&self) -> BalanceOf { + self.storage_byte_deposit.saturating_add(self.storage_item_deposit) + } + + /// Same as [`Self::extra_deposit`] but including the base deposit. + pub fn total_deposit(&self) -> BalanceOf { + self.extra_deposit().saturating_add(self.storage_base_deposit) + } } /// Associated child trie unique id is built from the hash part of the trie id. @@ -178,7 +198,7 @@ where }, (None, None) => (), } - storage_meter.charge(&diff)?; + storage_meter.charge(&diff); } match &new_value { @@ -200,14 +220,21 @@ where pub fn new_contract( account: &AccountIdOf, trie_id: TrieId, - ch: CodeHash, + code_hash: CodeHash, ) -> Result, DispatchError> { if >::contains_key(account) { return Err(Error::::DuplicateContract.into()) } - let contract = - ContractInfo:: { code_hash: ch, trie_id, storage_deposit: >::zero() }; + let contract = ContractInfo:: { + code_hash, + trie_id, + storage_bytes: 0, + storage_items: 0, + storage_byte_deposit: Zero::zero(), + storage_item_deposit: Zero::zero(), + storage_base_deposit: Zero::zero(), + }; Ok(contract) } diff --git a/frame/contracts/src/storage/meter.rs b/frame/contracts/src/storage/meter.rs index d89208812bcac..0a63eb42b86cb 100644 --- a/frame/contracts/src/storage/meter.rs +++ b/frame/contracts/src/storage/meter.rs @@ -17,17 +17,24 @@ //! This module contains functions to meter the storage deposit. -use crate::{storage::ContractInfo, BalanceOf, Config, Error}; +use crate::{storage::ContractInfo, BalanceOf, Config, Error, Inspect, Pallet}; use codec::Encode; use frame_support::{ dispatch::DispatchError, - traits::{tokens::BalanceStatus, Currency, ExistenceRequirement, Get, ReservableCurrency}, - DefaultNoBound, + ensure, + traits::{ + tokens::{BalanceStatus, WithdrawConsequence}, + Currency, ExistenceRequirement, Get, ReservableCurrency, + }, + DefaultNoBound, RuntimeDebugNoBound, }; use pallet_contracts_primitives::StorageDeposit as Deposit; use sp_core::crypto::UncheckedFrom; -use sp_runtime::traits::{Saturating, Zero}; -use sp_std::marker::PhantomData; +use sp_runtime::{ + traits::{Saturating, Zero}, + FixedPointNumber, FixedU128, +}; +use sp_std::{marker::PhantomData, vec::Vec}; /// Deposit that uses the native currency's balance type. pub type DepositOf = Deposit>; @@ -99,22 +106,25 @@ impl State for Root {} impl State for Nested {} /// A type that allows the metering of consumed or freed storage of a single contract call stack. -#[derive(DefaultNoBound)] -pub struct RawMeter, S: State> { +#[derive(DefaultNoBound, RuntimeDebugNoBound)] +pub struct RawMeter { /// The limit of how much balance this meter is allowed to consume. limit: BalanceOf, /// The amount of balance that was used in this meter and all of its already absorbed children. total_deposit: DepositOf, - /// The amount of balance that was used in this meter alone. - own_deposit: DepositOf, - /// Only when a contract was terminated we allow it to drop below the minimum balance. - terminated: bool, + /// The amount of storage changes that were recorded in this meter alone. + own_contribution: Contribution, + /// List of charges that should be applied at the end of a contract stack execution. + /// + /// We only have one charge per contract hence the size of this vector is + /// limited by the maximum call depth. + charges: Vec>, /// Type parameters are only used in impls. _phantom: PhantomData<(E, S)>, } /// This type is used to describe a storage change when charging from the meter. -#[derive(Default)] +#[derive(Default, RuntimeDebugNoBound)] pub struct Diff { /// How many bytes were added to storage. pub bytes_added: u32, @@ -124,43 +134,120 @@ pub struct Diff { pub items_added: u32, /// How many storage items were removed from storage. pub items_removed: u32, - /// If set to true the derived deposit will always a `Charge` larger than the - /// the existential deposit. - pub require_ed: bool, } impl Diff { - /// Calculate how much of a charge or refund results from applying the diff. - pub fn to_deposit(&self) -> DepositOf { - let mut deposit = Deposit::default(); + /// Calculate how much of a charge or refund results from applying the diff and store it + /// in the passed `info` if any. + /// + /// # Note + /// + /// In case `None` is passed for `info` only charges are calculated. This is because refunds + /// are calculated pro rata of the existing storage within a contract and hence need extract + /// this information from the passed `info`. + pub fn update_contract(&self, info: Option<&mut ContractInfo>) -> DepositOf { let per_byte = T::DepositPerByte::get(); let per_item = T::DepositPerItem::get(); + let bytes_added = self.bytes_added.saturating_sub(self.bytes_removed); + let items_added = self.items_added.saturating_sub(self.items_removed); + let mut bytes_deposit = Deposit::Charge(per_byte.saturating_mul((bytes_added).into())); + let mut items_deposit = Deposit::Charge(per_item.saturating_mul((items_added).into())); + + // Without any contract info we can only calculate diffs which add storage + let info = if let Some(info) = info { + info + } else { + debug_assert_eq!(self.bytes_removed, 0); + debug_assert_eq!(self.items_removed, 0); + return bytes_deposit.saturating_add(&items_deposit) + }; - if self.bytes_added > self.bytes_removed { - deposit = deposit.saturating_add(&Deposit::Charge( - per_byte.saturating_mul((self.bytes_added - self.bytes_removed).into()), - )); - } else if self.bytes_removed > self.bytes_added { - deposit = deposit.saturating_add(&Deposit::Refund( - per_byte.saturating_mul((self.bytes_removed - self.bytes_added).into()), - )); + // Refunds are calculated pro rata based on the accumulated storage within the contract + let bytes_removed = self.bytes_removed.saturating_sub(self.bytes_added); + let items_removed = self.items_removed.saturating_sub(self.items_added); + let ratio = FixedU128::checked_from_rational(bytes_removed, info.storage_bytes) + .unwrap_or_default() + .min(FixedU128::from_u32(1)); + bytes_deposit = bytes_deposit + .saturating_add(&Deposit::Refund(ratio.saturating_mul_int(info.storage_byte_deposit))); + let ratio = FixedU128::checked_from_rational(items_removed, info.storage_items) + .unwrap_or_default() + .min(FixedU128::from_u32(1)); + items_deposit = items_deposit + .saturating_add(&Deposit::Refund(ratio.saturating_mul_int(info.storage_item_deposit))); + + // We need to update the contract info structure with the new deposits + info.storage_bytes = + info.storage_bytes.saturating_add(bytes_added).saturating_sub(bytes_removed); + info.storage_items = + info.storage_items.saturating_add(items_added).saturating_sub(items_removed); + match &bytes_deposit { + Deposit::Charge(amount) => + info.storage_byte_deposit = info.storage_byte_deposit.saturating_add(*amount), + Deposit::Refund(amount) => + info.storage_byte_deposit = info.storage_byte_deposit.saturating_sub(*amount), } + match &items_deposit { + Deposit::Charge(amount) => + info.storage_item_deposit = info.storage_item_deposit.saturating_add(*amount), + Deposit::Refund(amount) => + info.storage_item_deposit = info.storage_item_deposit.saturating_sub(*amount), + } + + bytes_deposit.saturating_add(&items_deposit) + } +} - if self.items_added > self.items_removed { - deposit = deposit.saturating_add(&Deposit::Charge( - per_item.saturating_mul((self.items_added - self.items_removed).into()), - )); - } else if self.items_removed > self.items_added { - deposit = deposit.saturating_add(&Deposit::Refund( - per_item.saturating_mul((self.items_removed - self.items_added).into()), - )); +impl Diff { + fn saturating_add(&self, rhs: &Self) -> Self { + Self { + bytes_added: self.bytes_added.saturating_add(rhs.bytes_added), + bytes_removed: self.bytes_removed.saturating_add(rhs.bytes_removed), + items_added: self.items_added.saturating_add(rhs.items_added), + items_removed: self.items_removed.saturating_add(rhs.items_removed), } + } +} + +/// Records information to charge or refund a plain account. +/// +/// All the charges are deferred to the end of a whole call stack. Reason is that by doing +/// this we can do all the refunds before doing any charge. This way a plain account can use +/// more deposit than it has balance as along as it is covered by a refund. This +/// essentially makes the order of storage changes irrelevant with regard to the deposit system. +#[derive(RuntimeDebugNoBound, Clone)] +struct Charge { + contract: T::AccountId, + amount: DepositOf, + terminated: bool, +} + +/// Records the storage changes of a storage meter. +#[derive(RuntimeDebugNoBound)] +enum Contribution { + /// The contract the meter belongs to is alive and accumulates changes using a [`Diff`]. + Alive(Diff), + /// The meter was checked against its limit using [`RawMeter::enforce_limit`] at the end of + /// its execution. In this process the [`Diff`] was converted into a [`Deposit`]. + Checked(DepositOf), + /// The contract was terminated. In this process the [`Diff`] was converted into a [`Deposit`] + /// in order to calculate the refund. + Terminated(DepositOf), +} - if self.require_ed { - deposit = deposit.max(Deposit::Charge(T::Currency::minimum_balance())) +impl Contribution { + /// See [`Diff::update_contract`]. + fn update_contract(&self, info: Option<&mut ContractInfo>) -> DepositOf { + match self { + Self::Alive(diff) => diff.update_contract::(info), + Self::Terminated(deposit) | Self::Checked(deposit) => deposit.clone(), } + } +} - deposit +impl Default for Contribution { + fn default() -> Self { + Self::Alive(Default::default()) } } @@ -178,6 +265,7 @@ where /// usage for this sub call separately. This is necessary because we want to exchange balance /// with the current contract we are interacting with. pub fn nested(&self) -> RawMeter { + debug_assert!(self.is_alive()); RawMeter { limit: self.available(), ..Default::default() } } @@ -192,45 +280,28 @@ where /// /// # Parameters /// - /// `absorbed`: The child storage meter that should be absorbed. - /// `origin`: The origin that spawned the original root meter. - /// `contract`: The contract that this sub call belongs to. - /// `info`: The info of the contract in question. `None` if the contract was terminated. + /// - `absorbed`: The child storage meter that should be absorbed. + /// - `origin`: The origin that spawned the original root meter. + /// - `contract`: The contract that this sub call belongs to. + /// - `info`: The info of the contract in question. `None` if the contract was terminated. pub fn absorb( &mut self, - mut absorbed: RawMeter, - origin: &T::AccountId, + absorbed: RawMeter, contract: &T::AccountId, info: Option<&mut ContractInfo>, ) { - // Absorbing from an existing (non terminated) contract. - if let Some(info) = info { - match &mut absorbed.own_deposit { - Deposit::Charge(amount) => - info.storage_deposit = info.storage_deposit.saturating_add(*amount), - Deposit::Refund(amount) => { - // We need to make sure to never refund more than what was deposited and - // still leave the existential deposit inside the contract's account. - // This case can happen when costs change due to a runtime upgrade where - // increased costs could remove an account due to refunds. - let amount = { - let corrected_amount = (*amount).min( - info.storage_deposit.saturating_sub(T::Currency::minimum_balance()), - ); - let correction = (*amount).saturating_sub(corrected_amount); - absorbed.total_deposit = - absorbed.total_deposit.saturating_sub(&Deposit::Refund(correction)); - *amount = corrected_amount; - corrected_amount - }; - info.storage_deposit = info.storage_deposit.saturating_sub(amount); - }, - } - } - - self.total_deposit = self.total_deposit.saturating_add(&absorbed.total_deposit); - if !absorbed.own_deposit.is_zero() { - E::charge(origin, contract, &absorbed.own_deposit, absorbed.terminated); + let own_deposit = absorbed.own_contribution.update_contract(info); + self.total_deposit = self + .total_deposit + .saturating_add(&absorbed.total_deposit) + .saturating_add(&own_deposit); + if !own_deposit.is_zero() { + self.charges.extend_from_slice(&absorbed.charges); + self.charges.push(Charge { + contract: contract.clone(), + amount: own_deposit, + terminated: absorbed.is_terminated(), + }); } } @@ -238,6 +309,16 @@ where fn available(&self) -> BalanceOf { self.total_deposit.available(&self.limit) } + + /// True if the contract is alive. + fn is_alive(&self) -> bool { + matches!(self.own_contribution, Contribution::Alive(_)) + } + + /// True if the contract is terminated. + fn is_terminated(&self) -> bool { + matches!(self.own_contribution, Contribution::Terminated(_)) + } } /// Functions that only apply to the root state. @@ -260,11 +341,18 @@ where } /// The total amount of deposit that should change hands as result of the execution - /// that this meter was passed into. + /// that this meter was passed into. This will also perform all the charges accumulated + /// in the whole contract stack. /// /// This drops the root meter in order to make sure it is only called when the whole /// execution did finish. - pub fn into_deposit(self) -> DepositOf { + pub fn into_deposit(self, origin: &T::AccountId) -> DepositOf { + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Refund(_))) { + E::charge(origin, &charge.contract, &charge.amount, charge.terminated); + } + for charge in self.charges.iter().filter(|c| matches!(c.amount, Deposit::Charge(_))) { + E::charge(origin, &charge.contract, &charge.amount, charge.terminated); + } self.total_deposit } } @@ -277,18 +365,12 @@ where E: Ext, { /// Try to charge the `diff` from the meter. Fails if this would exceed the original limit. - pub fn charge(&mut self, diff: &Diff) -> Result, DispatchError> { - debug_assert!(!self.terminated); - let deposit = diff.to_deposit::(); - let total_deposit = self.total_deposit.saturating_add(&deposit); - if let Deposit::Charge(amount) = total_deposit { - if amount > self.limit { - return Err(>::StorageDepositLimitExhausted.into()) - } - } - self.total_deposit = total_deposit; - self.own_deposit = self.own_deposit.saturating_add(&deposit); - Ok(deposit) + pub fn charge(&mut self, diff: &Diff) { + debug_assert!(self.is_alive()); + match &mut self.own_contribution { + Contribution::Alive(own) => *own = own.saturating_add(diff), + _ => panic!("Charge is never called after termination; qed"), + }; } /// Charge from `origin` a storage deposit for contract instantiation. @@ -300,25 +382,22 @@ where contract: &T::AccountId, info: &mut ContractInfo, ) -> Result, DispatchError> { - debug_assert!(!self.terminated); - let deposit = Diff { - bytes_added: info.encoded_size() as u32, - items_added: 1, - require_ed: true, - ..Default::default() + debug_assert!(self.is_alive()); + let mut deposit = + Diff { bytes_added: info.encoded_size() as u32, items_added: 1, ..Default::default() } + .update_contract::(None); + + // Instantiate needs to transfer the minimum balance at least in order to pull the + // contract's account into existence. + deposit = deposit.max(Deposit::Charge(Pallet::::min_balance())); + if deposit.charge_or_zero() > self.limit { + return Err(>::StorageDepositLimitExhausted.into()) } - .to_deposit::(); - debug_assert!(matches!(deposit, Deposit::Charge(_))); - // We do not increase `own_deposit` because this will be charged later when the contract - // execution does conclude. - let total_deposit = self.total_deposit.saturating_add(&deposit); - if let Deposit::Charge(amount) = &total_deposit { - if amount > &self.limit { - return Err(>::StorageDepositLimitExhausted.into()) - } - } - info.storage_deposit = info.storage_deposit.saturating_add(deposit.charge_or_zero()); - self.total_deposit = total_deposit; + + // We do not increase `own_contribution` because this will be charged later when the + // contract execution does conclude and hence would lead to a double charge. + self.total_deposit = deposit.clone(); + info.storage_base_deposit = deposit.charge_or_zero(); if !deposit.is_zero() { // We need to charge immediately so that the account is created before the `value` // is transferred from the caller to the contract. @@ -331,34 +410,55 @@ where /// /// This will manipulate the meter so that all storage deposit accumulated in /// `contract_info` will be refunded to the `origin` of the meter. - pub fn terminate(&mut self, contract_info: &ContractInfo) { - debug_assert!(!self.terminated); - let refund = Deposit::Refund(contract_info.storage_deposit); - - // The deposit for `own_deposit` isn't persisted into the contract info until the current - // frame is dropped. This means that whatever changes were introduced during the - // current frame are dicarded when terminating. - self.total_deposit = - self.total_deposit.saturating_add(&refund).saturating_sub(&self.own_deposit); - self.own_deposit = refund; - self.terminated = true; + pub fn terminate(&mut self, info: &ContractInfo) { + debug_assert!(self.is_alive()); + self.own_contribution = Contribution::Terminated(Deposit::Refund(info.total_deposit())); + } + + /// [`Self::charge`] does not enforce the storage limit since we want to do this check as late + /// as possible to allow later refunds to offset earlier charges. + /// + /// # Note + /// + /// We only need to call this **once** for every call stack and not for every cross contract + /// call. Hence this is only called when the last call frame returns. + pub fn enforce_limit( + &mut self, + info: Option<&mut ContractInfo>, + ) -> Result<(), DispatchError> { + let deposit = self.own_contribution.update_contract(info); + let total_deposit = self.total_deposit.saturating_add(&deposit); + // We don't want to override a `Terminated` with a `Checked`. + if self.is_alive() { + self.own_contribution = Contribution::Checked(deposit); + } + if let Deposit::Charge(amount) = total_deposit { + if amount > self.limit { + return Err(>::StorageDepositLimitExhausted.into()) + } + } + Ok(()) } } -impl Ext for ReservingExt { +impl Ext for ReservingExt +where + T: Config, + T::AccountId: UncheckedFrom + AsRef<[u8]>, +{ fn check_limit( origin: &T::AccountId, limit: Option>, min_leftover: BalanceOf, ) -> Result, DispatchError> { - let max = T::Currency::free_balance(origin) - .saturating_sub(T::Currency::minimum_balance()) - .saturating_sub(min_leftover); - match limit { - Some(limit) if limit <= max => Ok(limit), - None => Ok(max), - _ => Err(>::StorageDepositNotEnoughFunds.into()), - } + let max = T::Currency::reducible_balance(origin, true).saturating_sub(min_leftover); + let limit = limit.unwrap_or(max); + ensure!( + limit <= max && + matches!(T::Currency::can_withdraw(origin, limit), WithdrawConsequence::Success), + >::StorageDepositNotEnoughFunds, + ); + Ok(limit) } fn charge( @@ -393,6 +493,9 @@ impl Ext for ReservingExt { "Failed to transfer storage deposit {:?} from origin {:?} to contract {:?}: {:?}", amount, origin, contract, err, ); + if cfg!(debug_assertions) { + panic!("Unable to collect storage deposit. This is a bug."); + } } }, // For `Refund(_)` no error happen because the initial value transfer from the @@ -411,7 +514,7 @@ impl Ext for ReservingExt { // refund because we consider this unexpected behaviour. *amount.min( &T::Currency::reserved_balance(contract) - .saturating_sub(T::Currency::minimum_balance()), + .saturating_sub(Pallet::::min_balance()), ) }; let result = @@ -422,6 +525,9 @@ impl Ext for ReservingExt { "Failed to repatriate storage deposit {:?} from contract {:?} to origin {:?}: {:?}", amount, contract, origin, result, ); + if cfg!(debug_assertions) { + panic!("Unable to refund storage deposit. This is a bug."); + } } }, }; @@ -513,14 +619,26 @@ mod tests { TestExtTestValue::mutate(|ext| ext.clear()) } - fn new_info(deposit: BalanceOf) -> ContractInfo { + #[derive(Default)] + struct StorageInfo { + bytes: u32, + items: u32, + bytes_deposit: BalanceOf, + items_deposit: BalanceOf, + } + + fn new_info(info: StorageInfo) -> ContractInfo { use crate::storage::Storage; use sp_runtime::traits::Hash; ContractInfo:: { trie_id: >::generate_trie_id(&ALICE, 42), code_hash: ::Hashing::hash(b"42"), - storage_deposit: deposit, + storage_bytes: info.bytes, + storage_items: info.items, + storage_byte_deposit: info.bytes_deposit, + storage_item_deposit: info.items_deposit, + storage_base_deposit: Default::default(), } } @@ -548,8 +666,8 @@ mod tests { // an empty charge does not create a `Charge` entry let mut nested0 = meter.nested(); - nested0.charge(&Default::default()).unwrap(); - meter.absorb(nested0, &ALICE, &BOB, None); + nested0.charge(&Default::default()); + meter.absorb(nested0, &BOB, None); assert_eq!( TestExtTestValue::get(), @@ -560,79 +678,49 @@ mod tests { ) } - #[test] - fn existential_deposit_works() { - clear_ext(); - - let mut meter = TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); - assert_eq!(meter.available(), 1_000); - - // a `Refund` will be turned into a `Charge(ed)` which is intended behaviour - let mut nested0 = meter.nested(); - nested0.charge(&Diff { require_ed: true, ..Default::default() }).unwrap(); - nested0 - .charge(&Diff { bytes_removed: 1, require_ed: true, ..Default::default() }) - .unwrap(); - meter.absorb(nested0, &ALICE, &BOB, None); - - assert_eq!( - TestExtTestValue::get(), - TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], - charges: vec![Charge { - origin: ALICE, - contract: BOB, - amount: Deposit::Charge(::Currency::minimum_balance() * 2), - terminated: false, - }] - } - ) - } - #[test] fn charging_works() { clear_ext(); - let min_balance = ::Currency::minimum_balance(); - - let mut meter = TestMeter::new(&ALICE, Some(1_000), 0).unwrap(); - assert_eq!(meter.available(), 1_000); + let mut meter = TestMeter::new(&ALICE, Some(100), 0).unwrap(); + assert_eq!(meter.available(), 100); - let mut nested0_info = new_info(100); + let mut nested0_info = + new_info(StorageInfo { bytes: 100, items: 5, bytes_deposit: 100, items_deposit: 10 }); let mut nested0 = meter.nested(); - nested0 - .charge(&Diff { - bytes_added: 10, - bytes_removed: 5, - items_added: 1, - items_removed: 2, - ..Default::default() - }) - .unwrap(); - nested0.charge(&Diff { bytes_removed: 1, ..Default::default() }).unwrap(); + nested0.charge(&Diff { + bytes_added: 108, + bytes_removed: 5, + items_added: 1, + items_removed: 2, + }); + nested0.charge(&Diff { bytes_removed: 99, ..Default::default() }); - let mut nested1_info = new_info(50); + let mut nested1_info = + new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); let mut nested1 = nested0.nested(); - nested1.charge(&Diff { items_removed: 5, ..Default::default() }).unwrap(); - nested0.absorb(nested1, &ALICE, &CHARLIE, Some(&mut nested1_info)); + nested1.charge(&Diff { items_removed: 5, ..Default::default() }); + nested0.absorb(nested1, &CHARLIE, Some(&mut nested1_info)); - // Trying to refund more than is available in the contract will cap the charge - // to (deposit_in_contract - ed). - let mut nested2_info = new_info(5); + let mut nested2_info = + new_info(StorageInfo { bytes: 100, items: 7, bytes_deposit: 100, items_deposit: 20 }); let mut nested2 = nested0.nested(); - nested2.charge(&Diff { bytes_removed: 7, ..Default::default() }).unwrap(); - nested0.absorb(nested2, &ALICE, &CHARLIE, Some(&mut nested2_info)); + nested2.charge(&Diff { items_removed: 7, ..Default::default() }); + nested0.absorb(nested2, &CHARLIE, Some(&mut nested2_info)); + + nested0.enforce_limit(Some(&mut nested0_info)).unwrap(); + meter.absorb(nested0, &BOB, Some(&mut nested0_info)); - meter.absorb(nested0, &ALICE, &BOB, Some(&mut nested0_info)); + meter.into_deposit(&ALICE); - assert_eq!(nested0_info.storage_deposit, 102); - assert_eq!(nested1_info.storage_deposit, 40); - assert_eq!(nested2_info.storage_deposit, min_balance); + assert_eq!(nested0_info.extra_deposit(), 112); + assert_eq!(nested1_info.extra_deposit(), 110); + assert_eq!(nested2_info.extra_deposit(), 100); assert_eq!( TestExtTestValue::get(), TestExt { - limit_checks: vec![LimitCheck { origin: ALICE, limit: 1_000, min_leftover: 0 }], + limit_checks: vec![LimitCheck { origin: ALICE, limit: 100, min_leftover: 0 }], charges: vec![ Charge { origin: ALICE, @@ -643,7 +731,7 @@ mod tests { Charge { origin: ALICE, contract: CHARLIE, - amount: Deposit::Refund(4), + amount: Deposit::Refund(20), terminated: false }, Charge { @@ -665,26 +753,25 @@ mod tests { assert_eq!(meter.available(), 1_000); let mut nested0 = meter.nested(); - nested0 - .charge(&Diff { - bytes_added: 5, - bytes_removed: 1, - items_added: 3, - items_removed: 1, - ..Default::default() - }) - .unwrap(); - nested0.charge(&Diff { items_added: 2, ..Default::default() }).unwrap(); - - let nested1_info = new_info(400); + nested0.charge(&Diff { + bytes_added: 5, + bytes_removed: 1, + items_added: 3, + items_removed: 1, + }); + nested0.charge(&Diff { items_added: 2, ..Default::default() }); + + let mut nested1_info = + new_info(StorageInfo { bytes: 100, items: 10, bytes_deposit: 100, items_deposit: 20 }); let mut nested1 = nested0.nested(); - nested1.charge(&Diff { items_removed: 5, ..Default::default() }).unwrap(); - nested1.charge(&Diff { bytes_added: 20, ..Default::default() }).unwrap(); + nested1.charge(&Diff { items_removed: 5, ..Default::default() }); + nested1.charge(&Diff { bytes_added: 20, ..Default::default() }); nested1.terminate(&nested1_info); - nested0.absorb(nested1, &ALICE, &CHARLIE, None); + nested0.enforce_limit(Some(&mut nested1_info)).unwrap(); + nested0.absorb(nested1, &CHARLIE, None); - meter.absorb(nested0, &ALICE, &BOB, None); - drop(meter); + meter.absorb(nested0, &BOB, None); + meter.into_deposit(&ALICE); assert_eq!( TestExtTestValue::get(), @@ -694,7 +781,7 @@ mod tests { Charge { origin: ALICE, contract: CHARLIE, - amount: Deposit::Refund(400), + amount: Deposit::Refund(120), terminated: true }, Charge { diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 12029abd59e46..a56e4f5564845 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -23,6 +23,7 @@ use crate::{ }, exec::{FixSizedKey, Frame}, storage::Storage, + tests::test_utils::{get_contract, get_contract_checked}, wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, @@ -36,8 +37,8 @@ use frame_support::{ parameter_types, storage::child, traits::{ - BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, OnIdle, OnInitialize, - ReservableCurrency, + BalanceStatus, ConstU32, ConstU64, Contains, Currency, Get, LockableCurrency, OnIdle, + OnInitialize, ReservableCurrency, WithdrawReasons, }, weights::{constants::WEIGHT_PER_SECOND, Weight}, }; @@ -76,7 +77,9 @@ frame_support::construct_runtime!( #[macro_use] pub mod test_utils { use super::{Balances, Hash, SysConfig, Test}; - use crate::{exec::AccountIdOf, storage::Storage, CodeHash, Config, ContractInfoOf, Nonce}; + use crate::{ + exec::AccountIdOf, storage::Storage, CodeHash, Config, ContractInfo, ContractInfoOf, Nonce, + }; use codec::Encode; use frame_support::traits::Currency; @@ -97,6 +100,12 @@ pub mod test_utils { pub fn get_balance(who: &AccountIdOf) -> u64 { Balances::free_balance(who) } + pub fn get_contract(addr: &AccountIdOf) -> ContractInfo { + get_contract_checked(addr).unwrap() + } + pub fn get_contract_checked(addr: &AccountIdOf) -> Option> { + ContractInfoOf::::get(addr) + } pub fn hash(s: &S) -> <::Hashing as Hash>::Output { <::Hashing as Hash>::hash_of(s) } @@ -105,6 +114,7 @@ pub mod test_utils { assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); }}; } + macro_rules! assert_refcount { ( $code_hash:expr , $should:expr $(,)? ) => {{ let is = crate::OwnerInfoOf::::get($code_hash).map(|m| m.refcount()).unwrap(); @@ -693,7 +703,7 @@ fn instantiate_unique_trie_id() { vec![], vec![], )); - let trie_id = ContractInfoOf::::get(&addr).unwrap().trie_id; + let trie_id = get_contract(&addr).trie_id; // Try to instantiate it again without termination should yield an error. assert_err_ignore_postinfo!( @@ -731,7 +741,7 @@ fn instantiate_unique_trie_id() { )); // Trie ids shouldn't match or we might have a collision - assert_ne!(trie_id, ContractInfoOf::::get(&addr).unwrap().trie_id); + assert_ne!(trie_id, get_contract(&addr).trie_id); }); } @@ -752,7 +762,7 @@ fn storage_max_value_limit() { vec![], )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - ContractInfoOf::::get(&addr).unwrap(); + get_contract(&addr); // Call contract with allowed storage value. assert_ok!(Contracts::call( @@ -962,7 +972,7 @@ fn cannot_self_destruct_through_draning() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); // Call BOB which makes it send all funds to the zero address // The contract code asserts that the transfer was successful @@ -1003,11 +1013,11 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated and has the minimum balance - let info = ContractInfoOf::::get(&addr).unwrap(); - assert_eq!(info.storage_deposit, min_balance); + assert_eq!(get_contract(&addr).total_deposit(), min_balance); + assert_eq!(get_contract(&addr).extra_deposit(), 0); assert_eq!(::Currency::total_balance(&addr), min_balance); - // Create 100 bytes of storage with a price of per byte + // Create 100 bytes of storage with a price of per byte and a single storage item of price 2 assert_ok!(Contracts::call( RuntimeOrigin::signed(ALICE), addr.clone(), @@ -1016,9 +1026,10 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { None, 100u32.to_le_bytes().to_vec() )); + assert_eq!(get_contract(&addr).total_deposit(), min_balance + 102); - // Increase the byte price and trigger a refund. This could potentially destroy the account - // because the refund removes the reserved existential deposit. This should not happen. + // Increase the byte price and trigger a refund. This should not have any influence because + // the removal is pro rata and exactly those 100 bytes should have been removed. DEPOSIT_PER_BYTE.with(|c| *c.borrow_mut() = 500); assert_ok!(Contracts::call( RuntimeOrigin::signed(ALICE), @@ -1032,8 +1043,9 @@ fn cannot_self_destruct_through_storage_refund_after_price_change() { // Make sure the account wasn't removed by the refund assert_eq!( ::Currency::total_balance(&addr), - ::Currency::minimum_balance(), + get_contract(&addr).total_deposit(), ); + assert_eq!(get_contract(&addr).extra_deposit(), 2,); }); } @@ -1086,9 +1098,6 @@ fn cannot_self_destruct_by_refund_after_slash() { // Make sure the account kept the minimum balance and was not destroyed assert_eq!(::Currency::total_balance(&addr), min_balance); - // even though it was not charged it is still substracted from the storage deposit tracker - assert_eq!(ContractInfoOf::::get(&addr).unwrap().storage_deposit, 550); - assert_eq!( System::events(), vec![ @@ -1142,7 +1151,7 @@ fn cannot_self_destruct_while_live() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); // Call BOB with input data, forcing it make a recursive call to itself to // self-destruct, resulting in a trap. @@ -1159,7 +1168,7 @@ fn cannot_self_destruct_while_live() { ); // Check that BOB is still there. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); }); } @@ -1183,7 +1192,7 @@ fn self_destruct_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); // Drop all previous events initialize_block(2); @@ -1198,7 +1207,7 @@ fn self_destruct_works() { assert_refcount!(&code_hash, 0); // Check that account is gone - assert!(ContractInfoOf::::get(&addr).is_none()); + assert!(get_contract_checked(&addr).is_none()); assert_eq!(Balances::total_balance(&addr), 0); // check that the beneficiary (django) got remaining balance @@ -1289,7 +1298,7 @@ fn destroy_contract_and_transfer_funds() { let addr_charlie = Contracts::contract_address(&addr_bob, &callee_code_hash, &[0x47, 0x11]); // Check that the CHARLIE contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr_charlie), Some(_)); + get_contract(&addr_charlie); // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. assert_ok!(Contracts::call( @@ -1302,7 +1311,7 @@ fn destroy_contract_and_transfer_funds() { )); // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(&addr_charlie).is_none()); + assert!(get_contract_checked(&addr_charlie).is_none()); }); } @@ -1862,7 +1871,7 @@ fn lazy_removal_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = >::get(&addr).unwrap(); + let info = get_contract(&addr); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -1931,7 +1940,7 @@ fn lazy_batch_removal_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[i]); - let info = >::get(&addr).unwrap(); + let info = get_contract(&addr); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -1993,7 +2002,7 @@ fn lazy_removal_partial_remove_works() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = >::get(&addr).unwrap(); + let info = get_contract(&addr); // Put value into the contracts child trie for val in &vals { @@ -2105,7 +2114,7 @@ fn lazy_removal_does_no_run_on_low_remaining_weight() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = >::get(&addr).unwrap(); + let info = get_contract(&addr); let trie = &info.child_trie_info(); // Put value into the contracts child trie @@ -2173,7 +2182,7 @@ fn lazy_removal_does_not_use_all_weight() { ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = >::get(&addr).unwrap(); + let info = get_contract(&addr); let (weight_per_key, max_keys) = Storage::::deletion_budget(1, weight_limit); // We create a contract with one less storage item than we can remove within the limit @@ -2264,7 +2273,7 @@ fn deletion_queue_full() { ); // Contract should exist because removal failed - >::get(&addr).unwrap(); + get_contract(&addr); }); } @@ -2909,7 +2918,7 @@ fn instantiate_with_zero_balance_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); // Make sure the account exists even though no free balance was send assert_eq!(::Currency::free_balance(&addr), 0,); @@ -3002,7 +3011,7 @@ fn instantiate_with_below_existential_deposit_works() { let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Check that the BOB contract has been instantiated. - assert_matches!(ContractInfoOf::::get(&addr), Some(_)); + get_contract(&addr); // Make sure the account exists even though no free balance was send assert_eq!(::Currency::free_balance(&addr), 50,); @@ -3114,7 +3123,7 @@ fn storage_deposit_works() { // 4 is for creating 2 storage items let charged0 = 4 + 1_000 + 5_000; deposit += charged0; - assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); + assert_eq!(get_contract(&addr).total_deposit(), deposit); // Add more storage (but also remove some) assert_ok!(Contracts::call( @@ -3127,7 +3136,7 @@ fn storage_deposit_works() { )); let charged1 = 1_000 - 100; deposit += charged1; - assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); + assert_eq!(get_contract(&addr).total_deposit(), deposit); // Remove more storage (but also add some) assert_ok!(Contracts::call( @@ -3138,9 +3147,10 @@ fn storage_deposit_works() { None, (2_100u32, 900u32).encode(), )); - let refunded0 = 4_000 - 100; + // -1 for numeric instability + let refunded0 = 4_000 - 100 - 1; deposit -= refunded0; - assert_eq!(>::get(&addr).unwrap().storage_deposit, deposit); + assert_eq!(get_contract(&addr).total_deposit(), deposit); assert_eq!( System::events(), @@ -3253,7 +3263,7 @@ fn set_code_extrinsic() { // Drop previous events initialize_block(2); - assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); + assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); @@ -3262,7 +3272,7 @@ fn set_code_extrinsic() { Contracts::set_code(RuntimeOrigin::signed(ALICE), addr.clone(), new_code_hash), sp_runtime::traits::BadOrigin, ); - assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); + assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); @@ -3272,7 +3282,7 @@ fn set_code_extrinsic() { Contracts::set_code(RuntimeOrigin::root(), BOB, new_code_hash), >::ContractNotFound, ); - assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); + assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); @@ -3282,14 +3292,14 @@ fn set_code_extrinsic() { Contracts::set_code(RuntimeOrigin::root(), addr.clone(), Default::default()), >::CodeNotFound, ); - assert_eq!(>::get(&addr).unwrap().code_hash, code_hash); + assert_eq!(get_contract(&addr).code_hash, code_hash); assert_refcount!(&code_hash, 1); assert_refcount!(&new_code_hash, 0); assert_eq!(System::events(), vec![],); // successful call assert_ok!(Contracts::set_code(RuntimeOrigin::root(), addr.clone(), new_code_hash)); - assert_eq!(>::get(&addr).unwrap().code_hash, new_code_hash); + assert_eq!(get_contract(&addr).code_hash, new_code_hash); assert_refcount!(&code_hash, 0); assert_refcount!(&new_code_hash, 1); assert_eq!( @@ -3639,3 +3649,301 @@ fn set_code_hash() { ); }); } + +#[test] +fn storage_deposit_limit_is_enforced() { + let (wasm, code_hash) = compile_module::("store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let min_balance = ::Currency::minimum_balance(); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Check that the BOB contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), min_balance); + assert_eq!(::Currency::total_balance(&addr), min_balance); + + // Create 100 bytes of storage with a price of per byte + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(1)), + 100u32.to_le_bytes().to_vec() + ), + >::StorageDepositLimitExhausted, + ); + }); +} + +#[test] +fn storage_deposit_limit_is_enforced_late() { + let (wasm_caller, code_hash_caller) = + compile_module::("create_storage_and_call").unwrap(); + let (wasm_callee, code_hash_callee) = compile_module::("store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Create both contracts: Constructors do nothing. + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm_caller, + vec![], + vec![], + )); + let addr_caller = Contracts::contract_address(&ALICE, &code_hash_caller, &[]); + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm_callee, + vec![], + vec![], + )); + let addr_callee = Contracts::contract_address(&ALICE, &code_hash_callee, &[]); + + // Create 100 bytes of storage with a price of per byte + // This is 100 Balance + 2 Balance for the item + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_callee.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(102)), + 100u32.to_le_bytes().to_vec() + )); + + // We do not remove any storage but require 14 bytes of storage for the new + // storage created in the immediate contract. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(5)), + 100u32 + .to_le_bytes() + .as_ref() + .iter() + .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) + .cloned() + .collect(), + ), + >::StorageDepositLimitExhausted, + ); + + // Allow for the additional 14 bytes but demand an additional byte in the callee contract. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(14)), + 101u32 + .to_le_bytes() + .as_ref() + .iter() + .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) + .cloned() + .collect(), + ), + >::StorageDepositLimitExhausted, + ); + + // Refund in the callee contract but not enough to cover the 14 balance required by the + // caller. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(0)), + 87u32 + .to_le_bytes() + .as_ref() + .iter() + .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) + .cloned() + .collect(), + ), + >::StorageDepositLimitExhausted, + ); + + let _ = Balances::make_free_balance_be(&ALICE, 1_000); + + // Send more than the sender has balance. + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(50)), + 1_200u32 + .to_le_bytes() + .as_ref() + .iter() + .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) + .cloned() + .collect(), + ), + >::StorageDepositLimitExhausted, + ); + + // Same as above but allow for the additional balance. + assert_ok!(Contracts::call( + RuntimeOrigin::signed(ALICE), + addr_caller.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(1)), + 87u32 + .to_le_bytes() + .as_ref() + .iter() + .chain(<_ as AsRef<[u8]>>::as_ref(&addr_callee)) + .cloned() + .collect(), + )); + }); +} + +#[test] +fn deposit_limit_honors_liquidity_restrictions() { + let (wasm, code_hash) = compile_module::("store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&BOB, 1_000); + let min_balance = ::Currency::minimum_balance(); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), min_balance); + assert_eq!(::Currency::total_balance(&addr), min_balance); + + // check that the lock ins honored + Balances::set_lock([0; 8], &BOB, 1_000, WithdrawReasons::TRANSFER); + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(200)), + 100u32.to_le_bytes().to_vec() + ), + >::StorageDepositNotEnoughFunds, + ); + assert_eq!(Balances::free_balance(&BOB), 1_000); + }); +} + +#[test] +fn deposit_limit_honors_existential_deposit() { + let (wasm, code_hash) = compile_module::("store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&BOB, 1_000); + let min_balance = ::Currency::minimum_balance(); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), min_balance); + assert_eq!(::Currency::total_balance(&addr), min_balance); + + // check that the deposit can't bring the account below the existential deposit + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr.clone(), + 0, + GAS_LIMIT, + Some(codec::Compact(900)), + 100u32.to_le_bytes().to_vec() + ), + >::StorageDepositNotEnoughFunds, + ); + assert_eq!(Balances::free_balance(&BOB), 1_000); + }); +} + +#[test] +fn deposit_limit_honors_min_leftover() { + let (wasm, code_hash) = compile_module::("store").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&BOB, 1_000); + let min_balance = ::Currency::minimum_balance(); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Check that the contract has been instantiated and has the minimum balance + assert_eq!(get_contract(&addr).total_deposit(), min_balance); + assert_eq!(::Currency::total_balance(&addr), min_balance); + + // check that the minumum leftover (value send) is considered + assert_err_ignore_postinfo!( + Contracts::call( + RuntimeOrigin::signed(BOB), + addr.clone(), + 400, + GAS_LIMIT, + Some(codec::Compact(500)), + 100u32.to_le_bytes().to_vec() + ), + >::StorageDepositNotEnoughFunds, + ); + assert_eq!(Balances::free_balance(&BOB), 1_000); + }); +} diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 9c130b562c275..e8873f604c9c7 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -426,7 +426,7 @@ fn do_preparation( .saturating_add(original_code_len) .saturating_add(>::max_encoded_len()) as u32; let deposit = Diff { bytes_added, items_added: 3, ..Default::default() } - .to_deposit::() + .update_contract::(None) .charge_or_zero(); module.owner_info = Some(OwnerInfo { owner, deposit, refcount: 0 }); diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs index 9a1634fd89313..d0beb66d34923 100644 --- a/frame/support/src/traits/tokens/currency.rs +++ b/frame/support/src/traits/tokens/currency.rs @@ -26,7 +26,7 @@ use crate::{ traits::Get, }; use codec::MaxEncodedLen; -use sp_runtime::traits::MaybeSerializeDeserialize; +use sp_runtime::{traits::MaybeSerializeDeserialize, FixedPointOperand}; use sp_std::fmt::Debug; mod reservable; @@ -37,7 +37,7 @@ pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { /// The balance of an account. - type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen; + type Balance: Balance + MaybeSerializeDeserialize + Debug + MaxEncodedLen + FixedPointOperand; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. From 1253de628bcb5d70b7ac7dd2d8c7b40abb751a28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 22 Sep 2022 09:41:16 +0200 Subject: [PATCH 160/348] pallet-utility: Disallow none origin (#12321) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: André Silva --- frame/utility/src/lib.rs | 6 +++--- frame/utility/src/tests.rs | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index bc19b5625a71a..819314f3d8454 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -203,7 +203,7 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_root(origin.clone()).is_ok(); + let is_root = ensure_signed_or_root(origin.clone())?.is_none(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -319,7 +319,7 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_root(origin.clone()).is_ok(); + let is_root = ensure_signed_or_root(origin.clone())?.is_none(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -426,7 +426,7 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_root(origin.clone()).is_ok(); + let is_root = ensure_signed_or_root(origin.clone())?.is_none(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 65d9c966ebdfd..ebd8dda81adbc 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -25,6 +25,7 @@ use crate as utility; use frame_support::{ assert_err_ignore_postinfo, assert_noop, assert_ok, dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable, Pays}, + error::BadOrigin, parameter_types, storage, traits::{ConstU32, ConstU64, Contains}, weights::Weight, @@ -651,7 +652,7 @@ fn force_batch_works() { call_transfer(2, 10), call_transfer(2, 5), ] - ),); + )); System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); System::assert_has_event( utility::Event::ItemFailed { error: DispatchError::Other("") }.into(), @@ -662,10 +663,19 @@ fn force_batch_works() { assert_ok!(Utility::force_batch( RuntimeOrigin::signed(2), vec![call_transfer(1, 5), call_transfer(1, 5),] - ),); + )); System::assert_last_event(utility::Event::BatchCompleted.into()); assert_ok!(Utility::force_batch(RuntimeOrigin::signed(1), vec![call_transfer(2, 50),]),); System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); }); } + +#[test] +fn none_origin_does_not_work() { + new_test_ext().execute_with(|| { + assert_noop!(Utility::force_batch(RuntimeOrigin::none(), vec![]), BadOrigin); + assert_noop!(Utility::batch(RuntimeOrigin::none(), vec![]), BadOrigin); + assert_noop!(Utility::batch_all(RuntimeOrigin::none(), vec![]), BadOrigin); + }) +} From cc4d5cc8654d280f03a13421669ba03632e14aa7 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Thu, 22 Sep 2022 16:53:51 +0800 Subject: [PATCH 161/348] Migrate remaining old decl_* macros to the new pallet attribute macros (#12271) * Migrate remaining old decl_* macros to the new pallet attribute macros Signed-off-by: koushiro * Apply review suggestions Signed-off-by: koushiro * Apply review suggestions Signed-off-by: koushiro * use pallet::storage * Fix dev rpc test Signed-off-by: koushiro * Fix service tests Signed-off-by: koushiro Signed-off-by: koushiro --- Cargo.lock | 2 + client/rpc/src/dev/tests.rs | 4 +- client/service/test/src/client/mod.rs | 9 ++- frame/benchmarking/src/tests.rs | 8 +- frame/benchmarking/src/tests_instance.rs | 83 ++++++++++---------- test-utils/runtime/src/genesismap.rs | 12 +-- test-utils/runtime/src/lib.rs | 2 + test-utils/runtime/src/system.rs | 97 ++++++++++++++++-------- utils/frame/rpc/support/Cargo.toml | 2 + utils/frame/rpc/support/src/lib.rs | 94 ++++++++++++++++++----- 10 files changed, 206 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5521e91de895d..5172c3d02861d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10522,6 +10522,8 @@ dependencies = [ "sc-rpc-api", "scale-info", "serde", + "sp-core", + "sp-runtime", "sp-storage", "tokio", ] diff --git a/client/rpc/src/dev/tests.rs b/client/rpc/src/dev/tests.rs index f3b18690d0972..816f3cdfe6025 100644 --- a/client/rpc/src/dev/tests.rs +++ b/client/rpc/src/dev/tests.rs @@ -43,8 +43,8 @@ async fn block_stats_work() { .await .unwrap(), Some(BlockStats { - witness_len: 597, - witness_compact_len: 500, + witness_len: 630, + witness_compact_len: 534, block_len: 99, num_extrinsics: 0, }), diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index cb648664b4006..2ab1415f8ca31 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1663,7 +1663,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() - .take(8) + .take(9) .map(|x| array_bytes::bytes2hex("", &x.0)) .collect(); assert_eq!( @@ -1675,6 +1675,7 @@ fn storage_keys_iter_works() { "1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34", "1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824", "237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204", + "26aa394eea5630e07c48ae0c9558cef75e0621c4869aa60c02be9adcc98a0d1d", "29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729", "3a636f6465", ] @@ -1699,7 +1700,7 @@ fn storage_keys_iter_works() { "5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133", "6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081", "66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808", - "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", + "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", ] ); @@ -1708,7 +1709,7 @@ fn storage_keys_iter_works() { &BlockId::Number(0), Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked( - "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", + "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", ))), ) .unwrap() @@ -1718,11 +1719,11 @@ fn storage_keys_iter_works() { assert_eq!( res, [ - "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", "811ecfaadcf5f2ee1d67393247e2f71a1662d433e8ce7ff89fb0d4aa9561820b", "a93d74caa7ec34ea1b04ce1e5c090245f867d333f0f88278a451e45299654dc5", "a9ee1403384afbfc13f13be91ff70bfac057436212e53b9733914382ac942892", "cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f", + "e3b47b6c84c0493481f97c5197d2554f", ] ); } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 3bbd06ea64f84..88a7d6d0286b2 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -36,7 +36,7 @@ mod pallet_test { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); + pub struct Pallet(PhantomData); #[pallet::config] pub trait Config: frame_system::Config { @@ -46,21 +46,21 @@ mod pallet_test { } #[pallet::storage] - #[pallet::getter(fn heartbeat_after)] + #[pallet::getter(fn value)] pub(crate) type Value = StorageValue<_, u32, OptionQuery>; #[pallet::call] impl Pallet { #[pallet::weight(0)] pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { - let _sender = frame_system::ensure_signed(origin)?; + let _sender = ensure_signed(origin)?; Value::::put(n); Ok(()) } #[pallet::weight(0)] pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { - let _sender = frame_system::ensure_none(origin)?; + let _sender = ensure_none(origin)?; Ok(()) } diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index 1fb122f031a76..7e1cd48840687 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -28,48 +28,52 @@ use sp_runtime::{ }; use sp_std::prelude::*; +#[frame_support::pallet] mod pallet_test { - use frame_support::pallet_prelude::Get; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; - frame_support::decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as Test where - ::OtherEvent: Into<>::RuntimeEvent> - { - pub Value get(fn value): Option; - } - } - - frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where - origin: T::RuntimeOrigin, ::OtherEvent: Into<>::RuntimeEvent> - { - #[weight = 0] - fn set_value(origin, n: u32) -> frame_support::dispatch::DispatchResult { - let _sender = frame_system::ensure_signed(origin)?; - Value::::put(n); - Ok(()) - } - - #[weight = 0] - fn dummy(origin, _n: u32) -> frame_support::dispatch::DispatchResult { - let _sender = frame_system::ensure_none(origin)?; - Ok(()) - } - } - } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); pub trait OtherConfig { type OtherEvent; } - pub trait Config: frame_system::Config + OtherConfig - where - Self::OtherEvent: Into<>::RuntimeEvent>, - { - type RuntimeEvent; + #[pallet::config] + pub trait Config: frame_system::Config + OtherConfig { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; type LowerBound: Get; type UpperBound: Get; } + + #[pallet::storage] + #[pallet::getter(fn value)] + pub(crate) type Value, I: 'static = ()> = StorageValue<_, u32, OptionQuery>; + + #[pallet::event] + pub enum Event, I: 'static = ()> {} + + #[pallet::call] + impl, I: 'static> Pallet + where + ::OtherEvent: Into<>::RuntimeEvent>, + { + #[pallet::weight(0)] + pub fn set_value(origin: OriginFor, n: u32) -> DispatchResult { + let _sender = ensure_signed(origin)?; + Value::::put(n); + Ok(()) + } + + #[pallet::weight(0)] + pub fn dummy(origin: OriginFor, _n: u32) -> DispatchResult { + let _sender = ensure_none(origin)?; + Ok(()) + } + } } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -82,7 +86,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - TestPallet: pallet_test::{Pallet, Call, Storage}, + TestPallet: pallet_test::{Pallet, Call, Storage, Event}, } ); @@ -90,18 +94,18 @@ impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = (); - type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; type Index = u64; type BlockNumber = u64; type Hash = H256; - type RuntimeCall = RuntimeCall; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; type RuntimeEvent = RuntimeEvent; type BlockHashCount = (); + type DbWeight = (); type Version = (); type PalletInfo = PalletInfo; type AccountData = (); @@ -110,7 +114,7 @@ impl frame_system::Config for Test { type SystemWeightInfo = (); type SS58Prefix = (); type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type MaxConsumers = ConstU32<16>; } impl pallet_test::Config for Test { @@ -130,15 +134,14 @@ fn new_test_ext() -> sp_io::TestExternalities { mod benchmarks { use super::pallet_test::{self, Value}; use crate::account; - use frame_support::{ensure, StorageValue}; + use frame_support::ensure; use frame_system::RawOrigin; use sp_std::prelude::*; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; - use frame_support::traits::Instance; - crate::benchmarks_instance! { + crate::benchmarks_instance_pallet! { where_clause { where ::OtherEvent: Clone @@ -151,7 +154,7 @@ mod benchmarks { let caller = account::("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) verify { - assert_eq!(Value::::get(), Some(b)); + assert_eq!(Value::::get(), Some(b)); } other_name { diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 3ece5165e1757..42706730eb9ac 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -17,8 +17,9 @@ //! Tool for creating the genesis block. -use super::{system, wasm_binary_unwrap, AccountId, AuthorityId}; +use super::{system, wasm_binary_unwrap, AccountId, AuthorityId, Runtime}; use codec::{Encode, Joiner, KeyedVec}; +use frame_support::traits::GenesisBuild; use sc_service::client::genesis; use sp_core::{ map, @@ -80,10 +81,11 @@ impl GenesisConfig { // Assimilate the system genesis config. let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone() }; - let config = system::GenesisConfig { authorities: self.authorities.clone() }; - config - .assimilate_storage(&mut storage) - .expect("Adding `system::GensisConfig` to the genesis"); + >::assimilate_storage( + &system::GenesisConfig { authorities: self.authorities.clone() }, + &mut storage, + ) + .expect("Adding `system::GensisConfig` to the genesis"); storage } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 5601de632eae3..a64e3f25ef041 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -610,6 +610,8 @@ impl frame_system::Config for Runtime { type MaxConsumers = ConstU32<16>; } +impl system::Config for Runtime {} + impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 0cb3e7d70f6df..6e33d5c25fe6f 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -19,11 +19,11 @@ //! and depositing logs. use crate::{ - AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, + AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Runtime, Transfer, + H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; -use frame_support::{decl_module, decl_storage, storage}; -use frame_system::Config; +use frame_support::storage; use sp_core::storage::well_known_keys; use sp_io::{hashing::blake2_256, storage::root as storage_root, trie}; use sp_runtime::{ @@ -39,19 +39,51 @@ use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; -decl_module! { - pub struct Module for enum Call where origin: T::RuntimeOrigin {} -} +pub use self::pallet::*; + +#[frame_support::pallet] +mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::without_storage_info] + pub struct Pallet(PhantomData); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::storage] + pub type ExtrinsicData = StorageMap<_, Blake2_128Concat, u32, Vec, ValueQuery>; + + // The current block number being processed. Set by `execute_block`. + #[pallet::storage] + pub type Number = StorageValue<_, BlockNumber, OptionQuery>; -decl_storage! { - trait Store for Module as TestRuntime { - ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; - // The current block number being processed. Set by `execute_block`. - Number get(fn number): Option; - ParentHash get(fn parent_hash): Hash; - NewAuthorities get(fn new_authorities): Option>; - StorageDigest get(fn storage_digest): Option; - Authorities get(fn authorities) config(): Vec; + #[pallet::storage] + pub type ParentHash = StorageValue<_, Hash, ValueQuery>; + + #[pallet::storage] + pub type NewAuthorities = StorageValue<_, Vec, OptionQuery>; + + #[pallet::storage] + pub type StorageDigest = StorageValue<_, Digest, OptionQuery>; + + #[pallet::storage] + pub type Authorities = StorageValue<_, Vec, ValueQuery>; + + #[pallet::genesis_config] + #[cfg_attr(feature = "std", derive(Default))] + pub struct GenesisConfig { + pub authorities: Vec, + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(self.authorities.clone()); + } } } @@ -69,9 +101,9 @@ pub fn nonce_of(who: AccountId) -> u64 { pub fn initialize_block(header: &Header) { // populate environment. - ::put(&header.number); - ::put(&header.parent_hash); - ::put(header.digest()); + >::put(&header.number); + >::put(&header.parent_hash); + >::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest @@ -82,15 +114,15 @@ pub fn initialize_block(header: &Header) { } pub fn authorities() -> Vec { - Authorities::get() + >::get() } pub fn get_block_number() -> Option { - Number::get() + >::get() } pub fn take_block_number() -> Option { - Number::take() + >::take() } #[derive(Copy, Clone)] @@ -124,8 +156,8 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Heade header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); - assert!( - new_header.state_root == header.state_root, + assert_eq!( + new_header.state_root, header.state_root, "Storage root must match that calculated.", ); } @@ -134,8 +166,8 @@ fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) -> Heade header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); - assert!( - new_header.extrinsics_root == header.extrinsics_root, + assert_eq!( + new_header.extrinsics_root, header.extrinsics_root, "Transaction trie root must be valid.", ); } @@ -187,7 +219,7 @@ pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); let result = execute_transaction_backend(&utx, extrinsic_index); - ExtrinsicData::insert(extrinsic_index, utx.encode()); + >::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } @@ -196,13 +228,14 @@ pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { pub fn finalize_block() -> Header { use sp_core::storage::StateVersion; let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); + let txs: Vec<_> = (0..extrinsic_index).map(>::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs, StateVersion::V0); - let number = ::take().expect("Number is set by `initialize_block`"); - let parent_hash = ::take(); - let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); + let number = >::take().expect("Number is set by `initialize_block`"); + let parent_hash = >::take(); + let mut digest = + >::take().expect("StorageDigest is set by `initialize_block`"); - let o_new_authorities = ::take(); + let o_new_authorities = >::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. @@ -280,7 +313,7 @@ fn execute_store(data: Vec) -> ApplyExtrinsicResult { } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { - NewAuthorities::put(new_authorities.to_vec()); + >::put(new_authorities.to_vec()); Ok(Ok(())) } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 2104774bd2605..38e40a33d9c7f 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -27,4 +27,6 @@ sp-storage = { version = "6.0.0", path = "../../../../primitives/storage" } scale-info = "2.1.1" jsonrpsee = { version = "0.15.1", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.17.0" +sp-core = { version = "6.0.0", path = "../../../../primitives/core" } +sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index ad90c24167587..fdf6fe0be8172 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -34,50 +34,104 @@ use sp_storage::{StorageData, StorageKey}; /// # use jsonrpsee::core::Error as RpcError; /// # use jsonrpsee::ws_client::WsClientBuilder; /// # use codec::Encode; -/// # use frame_support::{decl_storage, decl_module}; +/// # use frame_support::{construct_runtime, traits::ConstU32}; /// # use substrate_frame_rpc_support::StorageQuery; -/// # use frame_system::Config; /// # use sc_rpc_api::state::StateApiClient; +/// # use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; /// # -/// # // Hash would normally be ::Hash, but we don't have -/// # // frame_system::Config implemented for TestRuntime. Here we just pretend. -/// # type Hash = (); +/// # construct_runtime!( +/// # pub enum TestRuntime where +/// # Block = frame_system::mocking::MockBlock, +/// # NodeBlock = frame_system::mocking::MockBlock, +/// # UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic, +/// # { +/// # System: frame_system::{Pallet, Call, Config, Storage, Event}, +/// # Test: pallet_test::{Pallet, Storage}, +/// # } +/// # ); /// # +/// # type Hash = sp_core::H256; /// # -/// # struct TestRuntime; -/// # -/// # decl_module! { -/// # pub struct Module for enum Call where origin: T::RuntimeOrigin {} +/// # impl frame_system::Config for TestRuntime { +/// # type BaseCallFilter = (); +/// # type BlockWeights = (); +/// # type BlockLength = (); +/// # type RuntimeOrigin = RuntimeOrigin; +/// # type RuntimeCall = RuntimeCall; +/// # type Index = u64; +/// # type BlockNumber = u64; +/// # type Hash = Hash; +/// # type Hashing = BlakeTwo256; +/// # type AccountId = u64; +/// # type Lookup = IdentityLookup; +/// # type Header = Header; +/// # type RuntimeEvent = RuntimeEvent; +/// # type BlockHashCount = (); +/// # type DbWeight = (); +/// # type Version = (); +/// # type PalletInfo = PalletInfo; +/// # type AccountData = (); +/// # type OnNewAccount = (); +/// # type OnKilledAccount = (); +/// # type SystemWeightInfo = (); +/// # type SS58Prefix = (); +/// # type OnSetCode = (); +/// # type MaxConsumers = ConstU32<16>; /// # } /// # +/// # impl pallet_test::Config for TestRuntime {} +/// # +/// /// pub type Loc = (i64, i64, i64); /// pub type Block = u8; /// /// // Note that all fields are marked pub. -/// decl_storage! { -/// trait Store for Module as TestRuntime { -/// pub LastActionId: u64; -/// pub Voxels: map hasher(blake2_128_concat) Loc => Block; -/// pub Actions: map hasher(blake2_128_concat) u64 => Loc; -/// pub Prefab: double_map hasher(blake2_128_concat) u128, hasher(blake2_128_concat) (i8, i8, i8) => Block; -/// } +/// pub use self::pallet_test::*; +/// +/// #[frame_support::pallet] +/// mod pallet_test { +/// use super::*; +/// use frame_support::pallet_prelude::*; +/// +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(PhantomData); +/// +/// #[pallet::config] +/// pub trait Config: frame_system::Config {} +/// +/// #[pallet::storage] +/// pub type LastActionId = StorageValue<_, u64, ValueQuery>; +/// +/// #[pallet::storage] +/// pub type Voxels = StorageMap<_, Blake2_128Concat, Loc, Block>; +/// +/// #[pallet::storage] +/// pub type Actions = StorageMap<_, Blake2_128Concat, u64, Loc>; +/// +/// #[pallet::storage] +/// pub type Prefab = StorageDoubleMap< +/// _, +/// Blake2_128Concat, u128, +/// Blake2_128Concat, (i8, i8, i8), Block +/// >; /// } /// /// #[tokio::main] /// async fn main() -> Result<(), RpcError> { /// let cl = WsClientBuilder::default().build("ws://[::1]:9944").await?; /// -/// let q = StorageQuery::value::(); +/// let q = StorageQuery::value::>(); /// let hash = None::; /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::((0, 0, 0)); +/// let q = StorageQuery::map::, _>((0, 0, 0)); /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::(12); +/// let q = StorageQuery::map::, _>(12); /// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::double_map::(3, (0, 0, 0)); +/// let q = StorageQuery::double_map::, _, _>(3, (0, 0, 0)); /// let _: Option = q.get(&cl, hash).await?; /// /// Ok(()) From 1f720c11b90d45bba0c4ddad60d3b9a27ca85441 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Thu, 22 Sep 2022 10:15:31 -0400 Subject: [PATCH 162/348] Rename anonymous to pure proxy (#12283) * rename anon to pure proxy * remove old weight comments * fix merge * Update frame/proxy/src/lib.rs Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * fn pure -> fn create_pure Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- docs/CHANGELOG.md | 2 +- frame/proxy/src/benchmarking.rs | 24 +++---- frame/proxy/src/lib.rs | 109 +++++++++----------------------- frame/proxy/src/tests.rs | 30 ++++----- frame/proxy/src/weights.rs | 12 ++-- 5 files changed, 64 insertions(+), 113 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index c867a245739ff..25f8e582c78fc 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -360,7 +360,7 @@ Runtime ------- * Introduce stacked filtering (#6273) -* Allow "anonymous" proxied accounts (#6236) +* Allow "pure" proxied accounts (#6236) * Allow over-weight collective proposals to be closed (#6163) * Fix Election when ForceNone V1 (#6166) diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index f7e440d1f7d6f..58c0cb73011df 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -98,7 +98,7 @@ benchmarks! { let a in 0 .. T::MaxPending::get() - 1; let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; // In this case the caller is the "target" proxy - let caller: T::AccountId = account("anonymous", 0, SEED); + let caller: T::AccountId = account("pure", 0, SEED); let delegate: T::AccountId = account("target", p - 1, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); T::Currency::make_free_balance_be(&delegate, BalanceOf::::max_value() / 2u32.into()); @@ -218,7 +218,7 @@ benchmarks! { assert_eq!(proxies.len() as u32, 0); } - anonymous { + create_pure { let p in 1 .. (T::MaxProxies::get() - 1) => add_proxies::(p, None)?; let caller: T::AccountId = whitelisted_caller(); }: _( @@ -228,22 +228,22 @@ benchmarks! { 0 ) verify { - let anon_account = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); - assert_last_event::(Event::AnonymousCreated { - anonymous: anon_account, + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); + assert_last_event::(Event::PureCreated { + pure: pure_account, who: caller, proxy_type: T::ProxyType::default(), disambiguation_index: 0, }.into()); } - kill_anonymous { + kill_pure { let p in 0 .. (T::MaxProxies::get() - 2); let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - Pallet::::anonymous( + Pallet::::create_pure( RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), T::BlockNumber::zero(), @@ -251,13 +251,13 @@ benchmarks! { )?; let height = system::Pallet::::block_number(); let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); - let anon = Pallet::::anonymous_account(&caller, &T::ProxyType::default(), 0, None); + let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); - add_proxies::(p, Some(anon.clone()))?; - ensure!(Proxies::::contains_key(&anon), "anon proxy not created"); - }: _(RawOrigin::Signed(anon.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) + add_proxies::(p, Some(pure_account.clone()))?; + ensure!(Proxies::::contains_key(&pure_account), "pure proxy not created"); + }: _(RawOrigin::Signed(pure_account.clone()), caller_lookup, T::ProxyType::default(), 0, height, ext_index) verify { - assert!(!Proxies::::contains_key(&anon)); + assert!(!Proxies::::contains_key(&pure_account)); } impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/proxy/src/lib.rs b/frame/proxy/src/lib.rs index af96069397255..5c07a2b012243 100644 --- a/frame/proxy/src/lib.rs +++ b/frame/proxy/src/lib.rs @@ -191,10 +191,6 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy(T::MaxProxies::get()) @@ -228,10 +224,6 @@ pub mod pallet { /// - `proxy_type`: The permissions allowed for this proxy account. /// - `delay`: The announcement period required of the initial proxy. Will generally be /// zero. - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] pub fn add_proxy( origin: OriginFor, @@ -251,10 +243,6 @@ pub mod pallet { /// Parameters: /// - `proxy`: The account that the `caller` would like to remove as a proxy. /// - `proxy_type`: The permissions currently enabled for the removed proxy account. - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( origin: OriginFor, @@ -271,12 +259,8 @@ pub mod pallet { /// /// The dispatch origin for this call must be _Signed_. /// - /// WARNING: This may be called on accounts created by `anonymous`, however if done, then + /// WARNING: This may be called on accounts created by `pure`, however if done, then /// the unreserved fees will be inaccessible. **All access to this account will be lost.** - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # #[pallet::weight(T::WeightInfo::remove_proxies(T::MaxProxies::get()))] pub fn remove_proxies(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; @@ -304,13 +288,8 @@ pub mod pallet { /// same sender, with the same parameters. /// /// Fails if there are insufficient funds to pay for deposit. - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # - /// TODO: Might be over counting 1 read - #[pallet::weight(T::WeightInfo::anonymous(T::MaxProxies::get()))] - pub fn anonymous( + #[pallet::weight(T::WeightInfo::create_pure(T::MaxProxies::get()))] + pub fn create_pure( origin: OriginFor, proxy_type: T::ProxyType, delay: T::BlockNumber, @@ -318,8 +297,8 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); - ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); + let pure = Self::pure_account(&who, &proxy_type, index, None); + ensure!(!Proxies::::contains_key(&pure), Error::::Duplicate); let proxy_def = ProxyDefinition { delegate: who.clone(), proxy_type: proxy_type.clone(), delay }; @@ -329,9 +308,9 @@ pub mod pallet { let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; - Proxies::::insert(&anonymous, (bounded_proxies, deposit)); - Self::deposit_event(Event::AnonymousCreated { - anonymous, + Proxies::::insert(&pure, (bounded_proxies, deposit)); + Self::deposit_event(Event::PureCreated { + pure, who, proxy_type, disambiguation_index: index, @@ -340,28 +319,24 @@ pub mod pallet { Ok(()) } - /// Removes a previously spawned anonymous proxy. + /// Removes a previously spawned pure proxy. /// /// WARNING: **All access to this account will be lost.** Any funds held in it will be /// inaccessible. /// /// Requires a `Signed` origin, and the sender account must have been created by a call to - /// `anonymous` with corresponding parameters. - /// - /// - `spawner`: The account that originally called `anonymous` to create this account. - /// - `index`: The disambiguation index originally passed to `anonymous`. Probably `0`. - /// - `proxy_type`: The proxy type originally passed to `anonymous`. - /// - `height`: The height of the chain when the call to `anonymous` was processed. - /// - `ext_index`: The extrinsic index in which the call to `anonymous` was processed. - /// - /// Fails with `NoPermission` in case the caller is not a previously created anonymous - /// account whose `anonymous` call has corresponding parameters. - /// - /// # - /// Weight is a function of the number of proxies the user has (P). - /// # - #[pallet::weight(T::WeightInfo::kill_anonymous(T::MaxProxies::get()))] - pub fn kill_anonymous( + /// `pure` with corresponding parameters. + /// + /// - `spawner`: The account that originally called `pure` to create this account. + /// - `index`: The disambiguation index originally passed to `pure`. Probably `0`. + /// - `proxy_type`: The proxy type originally passed to `pure`. + /// - `height`: The height of the chain when the call to `pure` was processed. + /// - `ext_index`: The extrinsic index in which the call to `pure` was processed. + /// + /// Fails with `NoPermission` in case the caller is not a previously created pure + /// account whose `pure` call has corresponding parameters. + #[pallet::weight(T::WeightInfo::kill_pure(T::MaxProxies::get()))] + pub fn kill_pure( origin: OriginFor, spawner: AccountIdLookupOf, proxy_type: T::ProxyType, @@ -373,7 +348,7 @@ pub mod pallet { let spawner = T::Lookup::lookup(spawner)?; let when = (height, ext_index); - let proxy = Self::anonymous_account(&spawner, &proxy_type, index, Some(when)); + let proxy = Self::pure_account(&spawner, &proxy_type, index, Some(when)); ensure!(proxy == who, Error::::NoPermission); let (_, deposit) = Proxies::::take(&who); @@ -397,12 +372,6 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. - /// - /// # - /// Weight is a function of: - /// - A: the number of announcements made. - /// - P: the number of proxies the user has. - /// # #[pallet::weight(T::WeightInfo::announce(T::MaxPending::get(), T::MaxProxies::get()))] pub fn announce( origin: OriginFor, @@ -452,12 +421,6 @@ pub mod pallet { /// Parameters: /// - `real`: The account that the proxy will make a call on behalf of. /// - `call_hash`: The hash of the call to be made by the `real` account. - /// - /// # - /// Weight is a function of: - /// - A: the number of announcements made. - /// - P: the number of proxies the user has. - /// # #[pallet::weight(T::WeightInfo::remove_announcement( T::MaxPending::get(), T::MaxProxies::get() @@ -484,12 +447,6 @@ pub mod pallet { /// Parameters: /// - `delegate`: The account that previously announced the call. /// - `call_hash`: The hash of the call to be made. - /// - /// # - /// Weight is a function of: - /// - A: the number of announcements made. - /// - P: the number of proxies the user has. - /// # #[pallet::weight(T::WeightInfo::reject_announcement( T::MaxPending::get(), T::MaxProxies::get() @@ -519,12 +476,6 @@ pub mod pallet { /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. - /// - /// # - /// Weight is a function of: - /// - A: the number of announcements made. - /// - P: the number of proxies the user has. - /// # #[pallet::weight({ let di = call.get_dispatch_info(); (T::WeightInfo::proxy_announced(T::MaxPending::get(), T::MaxProxies::get()) @@ -565,10 +516,10 @@ pub mod pallet { pub enum Event { /// A proxy was executed correctly, with the given. ProxyExecuted { result: DispatchResult }, - /// Anonymous account has been created by new proxy with given + /// A pure account has been created by new proxy with given /// disambiguation index and proxy type. - AnonymousCreated { - anonymous: T::AccountId, + PureCreated { + pure: T::AccountId, who: T::AccountId, proxy_type: T::ProxyType, disambiguation_index: u16, @@ -642,7 +593,7 @@ pub mod pallet { } impl Pallet { - /// Calculate the address of an anonymous account. + /// Calculate the address of an pure account. /// /// - `who`: The spawner account. /// - `proxy_type`: The type of the proxy that the sender will be registered as over the @@ -651,9 +602,9 @@ impl Pallet { /// - `index`: A disambiguation index, in case this is called multiple times in the same /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just /// want to use `0`. - /// - `maybe_when`: The block height and extrinsic index of when the anonymous account was + /// - `maybe_when`: The block height and extrinsic index of when the pure account was /// created. None to use current block height and extrinsic index. - pub fn anonymous_account( + pub fn pure_account( who: &T::AccountId, proxy_type: &T::ProxyType, index: u16, @@ -830,9 +781,9 @@ impl Pallet { Some(Call::remove_proxy { ref proxy_type, .. }) if !def.proxy_type.is_superset(proxy_type) => false, - // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full + // Proxy call cannot remove all proxies or kill pure proxies unless it has full // permissions. - Some(Call::remove_proxies { .. }) | Some(Call::kill_anonymous { .. }) + Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) if def.proxy_type != T::ProxyType::default() => false, _ => def.proxy_type.filter(c), diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 3ed80107e0702..17bc7bcb92ee1 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -550,13 +550,13 @@ fn proxying_works() { } #[test] -fn anonymous_works() { +fn pure_works() { new_test_ext().execute_with(|| { - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); - let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( - ProxyEvent::AnonymousCreated { - anonymous: anon, + ProxyEvent::PureCreated { + pure: anon, who: 1, proxy_type: ProxyType::Any, disambiguation_index: 0, @@ -564,20 +564,20 @@ fn anonymous_works() { .into(), ); - // other calls to anonymous allowed as long as they're not exactly the same. - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::JustTransfer, 0, 0)); - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1)); - let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0)); + // other calls to pure allowed as long as they're not exactly the same. + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::JustTransfer, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1)); + let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0)); assert_noop!( - Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), + Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate ); System::set_extrinsic_index(1); - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::anonymous(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); let call = Box::new(call_transfer(6, 1)); assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), anon, 5)); @@ -585,7 +585,7 @@ fn anonymous_works() { System::assert_last_event(ProxyEvent::ProxyExecuted { result: Ok(()) }.into()); assert_eq!(Balances::free_balance(6), 1); - let call = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_kill_anonymous( + let call = Box::new(RuntimeCall::Proxy(ProxyCall::new_call_variant_kill_pure( 1, ProxyType::Any, 0, @@ -596,7 +596,7 @@ fn anonymous_works() { let de = DispatchError::from(Error::::NoPermission).stripped(); System::assert_last_event(ProxyEvent::ProxyExecuted { result: Err(de) }.into()); assert_noop!( - Proxy::kill_anonymous(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), + Proxy::kill_pure(RuntimeOrigin::signed(1), 1, ProxyType::Any, 0, 1, 0), Error::::NoPermission ); assert_eq!(Balances::free_balance(1), 0); diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 2d409d977f875..25457d52f4391 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -52,8 +52,8 @@ pub trait WeightInfo { fn add_proxy(p: u32, ) -> Weight; fn remove_proxy(p: u32, ) -> Weight; fn remove_proxies(p: u32, ) -> Weight; - fn anonymous(p: u32, ) -> Weight; - fn kill_anonymous(p: u32, ) -> Weight; + fn create_pure(p: u32, ) -> Weight; + fn kill_pure(p: u32, ) -> Weight; } /// Weights for pallet_proxy using the Substrate node and recommended hardware. @@ -138,7 +138,7 @@ impl WeightInfo for SubstrateWeight { } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) - fn anonymous(p: u32, ) -> Weight { + fn create_pure(p: u32, ) -> Weight { Weight::from_ref_time(31_077_000 as u64) // Standard Error: 3_000 .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) @@ -146,7 +146,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) - fn kill_anonymous(p: u32, ) -> Weight { + fn kill_pure(p: u32, ) -> Weight { Weight::from_ref_time(24_657_000 as u64) // Standard Error: 2_000 .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) @@ -236,7 +236,7 @@ impl WeightInfo for () { } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) - fn anonymous(p: u32, ) -> Weight { + fn create_pure(p: u32, ) -> Weight { Weight::from_ref_time(31_077_000 as u64) // Standard Error: 3_000 .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) @@ -244,7 +244,7 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) - fn kill_anonymous(p: u32, ) -> Weight { + fn kill_pure(p: u32, ) -> Weight { Weight::from_ref_time(24_657_000 as u64) // Standard Error: 2_000 .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) From bffd6a3007e0163cff7458559105028a4d2031a0 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 23 Sep 2022 10:36:33 +0100 Subject: [PATCH 163/348] Fast Unstake Pallet (#12129) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add failing test for itamar * an ugly example of fast unstake * Revert "add failing test for itamar" This reverts commit bdd139989cb4dfa8cc6cc6b5ee470a711df32d25. * fast unstake wip * clean it up a bit * some comments * on_idle logic * fix * comment * new working version, checks all pass, looking good * some notes * add mock boilerplate * more boilerplate * simplify the weight stuff * ExtBuilder for pools * fmt * rm bags-list, simplify setup_works * mock + tests boilerplate * make some benchmarks work * mock boilerplate * tests boilerplate * run_to_block works * add Error enums * add test * note * make UnstakeRequest fields pub * some tests * fix origin * fmt * add fast_unstake_events_since_last_call * text * rewrite some benchmes and fix them -- the outcome is still strange * Fix weights * cleanup * Update frame/election-provider-support/solution-type/src/single_page.rs * fix build * Fix pools tests * iterate teset + mock * test unfinished * cleanup and add some tests * add test successful_multi_queue * comment * rm Head check * add TODO * complete successful_multi_queue * + test early_exit * fix a lot of things above the beautiful atlantic ocean 🌊 * seemingly it is finished now * Fix build * ".git/.scripts/fmt.sh" 1 * Fix slashing amount as well * better docs * abstract types * rm use * import * Update frame/nomination-pools/benchmarking/src/lib.rs Co-authored-by: Nitwit <47109040+nitwit69@users.noreply.github.com> * Update frame/fast-unstake/src/types.rs Co-authored-by: Nitwit <47109040+nitwit69@users.noreply.github.com> * Fix build * fmt * Update frame/fast-unstake/src/lib.rs Co-authored-by: Keith Yeung * make bounded * feedback from code review with Ankan * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/lib.rs Co-authored-by: Roman Useinov * Update frame/fast-unstake/src/mock.rs * update to master * some final review comments * fmt * fix clippy * remove unused * ".git/.scripts/fmt.sh" 1 * make it all build again * fmt * undo fishy change Co-authored-by: Ross Bulat Co-authored-by: command-bot <> Co-authored-by: Nitwit <47109040+nitwit69@users.noreply.github.com> Co-authored-by: Keith Yeung Co-authored-by: Roman Useinov --- Cargo.lock | 26 + Cargo.toml | 1 + bin/node/runtime/Cargo.toml | 4 + bin/node/runtime/src/lib.rs | 9 + .../election-provider-multi-phase/src/lib.rs | 11 + .../election-provider-multi-phase/src/mock.rs | 4 + frame/election-provider-support/src/lib.rs | 9 +- .../election-provider-support/src/onchain.rs | 8 + frame/fast-unstake/Cargo.toml | 70 ++ frame/fast-unstake/src/benchmarking.rs | 228 ++++ frame/fast-unstake/src/lib.rs | 505 ++++++++ frame/fast-unstake/src/mock.rs | 387 ++++++ frame/fast-unstake/src/tests.rs | 1036 +++++++++++++++++ frame/fast-unstake/src/types.rs | 119 ++ frame/fast-unstake/src/weights.rs | 210 ++++ .../nomination-pools/benchmarking/src/lib.rs | 36 +- frame/nomination-pools/src/lib.rs | 3 +- frame/nomination-pools/src/tests.rs | 2 +- frame/staking/src/lib.rs | 2 +- frame/staking/src/pallet/impls.rs | 4 +- frame/staking/src/pallet/mod.rs | 5 +- 21 files changed, 2650 insertions(+), 29 deletions(-) create mode 100644 frame/fast-unstake/Cargo.toml create mode 100644 frame/fast-unstake/src/benchmarking.rs create mode 100644 frame/fast-unstake/src/lib.rs create mode 100644 frame/fast-unstake/src/mock.rs create mode 100644 frame/fast-unstake/src/tests.rs create mode 100644 frame/fast-unstake/src/types.rs create mode 100644 frame/fast-unstake/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index 5172c3d02861d..d1349ce488fa7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3376,6 +3376,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", + "pallet-fast-unstake", "pallet-gilt", "pallet-grandpa", "pallet-identity", @@ -5718,6 +5719,31 @@ dependencies = [ "sp-tasks", ] +[[package]] +name = "pallet-fast-unstake" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "log", + "pallet-balances", + "pallet-nomination-pools", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-tracing", + "substrate-test-utils", +] + [[package]] name = "pallet-gilt" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 48a31940fd3bf..018355df6c9fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,7 @@ members = [ "frame/contracts/rpc/runtime-api", "frame/conviction-voting", "frame/democracy", + "frame/fast-unstake", "frame/try-runtime", "frame/election-provider-multi-phase", "frame/election-provider-support", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index d52b1aaccfcfa..e722024231651 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -68,6 +68,7 @@ pallet-democracy = { version = "4.0.0-dev", default-features = false, path = ".. pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } pallet-election-provider-support-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-support/benchmarking", optional = true } pallet-elections-phragmen = { version = "5.0.0-dev", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-fast-unstake = { version = "4.0.0-dev", default-features = false, path = "../../../frame/fast-unstake" } pallet-gilt = { version = "4.0.0-dev", default-features = false, path = "../../../frame/gilt" } pallet-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../../frame/grandpa" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } @@ -142,6 +143,7 @@ std = [ "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", + "pallet-fast-unstake/std", "frame-executive/std", "pallet-gilt/std", "pallet-grandpa/std", @@ -220,6 +222,7 @@ runtime-benchmarks = [ "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-election-provider-support-benchmarking/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", + "pallet-fast-unstake/runtime-benchmarks", "pallet-gilt/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", @@ -272,6 +275,7 @@ try-runtime = [ "pallet-democracy/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-elections-phragmen/try-runtime", + "pallet-fast-unstake/try-runtime", "pallet-gilt/try-runtime", "pallet-grandpa/try-runtime", "pallet-im-online/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d6a9798b4d9ca..8ed5f1c847f5e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -579,6 +579,13 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = StakingBenchmarkingConfig; } +impl pallet_fast_unstake::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SlashPerEra = ConstU128<{ DOLLARS }>; + type ControlOrigin = frame_system::EnsureRoot; + type WeightInfo = (); +} + parameter_types! { // phase durations. 1/4 of the last session for each. pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; @@ -1655,6 +1662,7 @@ construct_runtime!( NominationPools: pallet_nomination_pools, RankedPolls: pallet_referenda::, RankedCollective: pallet_ranked_collective, + FastUnstake: pallet_fast_unstake, } ); @@ -1741,6 +1749,7 @@ mod benches { [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] + [pallet_fast_unstake, FastUnstake] [pallet_gilt, Gilt] [pallet_grandpa, Grandpa] [pallet_identity, Identity] diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 3c9b15dabb053..05353e5a3ac61 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -318,6 +318,10 @@ impl ElectionProvider for NoFallback { type DataProvider = T::DataProvider; type Error = &'static str; + fn ongoing() -> bool { + false + } + fn elect() -> Result, Self::Error> { // Do nothing, this will enable the emergency phase. Err("NoFallback.") @@ -1598,6 +1602,13 @@ impl ElectionProvider for Pallet { type Error = ElectionError; type DataProvider = T::DataProvider; + fn ongoing() -> bool { + match Self::current_phase() { + Phase::Off => false, + _ => true, + } + } + fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index d7affc14564f5..34aa2e1bbfc58 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -303,6 +303,10 @@ impl ElectionProvider for MockFallback { type Error = &'static str; type DataProvider = StakingMock; + fn ongoing() -> bool { + false + } + fn elect() -> Result, Self::Error> { Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 9f60411435da0..0bf62bd8c35cd 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -136,7 +136,7 @@ //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; -//! +//! fn ongoing() -> bool { false } //! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") @@ -370,6 +370,9 @@ pub trait ElectionProvider { BlockNumber = Self::BlockNumber, >; + /// Indicate if this election provider is currently ongoing an asynchronous election or not. + fn ongoing() -> bool; + /// Elect a new set of winners, without specifying any bounds on the amount of data fetched from /// [`Self::DataProvider`]. An implementation could nonetheless impose its own custom limits. /// @@ -420,6 +423,10 @@ where fn elect() -> Result, Self::Error> { Err(" cannot do anything.") } + + fn ongoing() -> bool { + false + } } /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index e477499f3c927..10c3519d03df6 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -138,6 +138,10 @@ impl ElectionProvider for UnboundedExecution { type Error = Error; type DataProvider = T::DataProvider; + fn ongoing() -> bool { + false + } + fn elect() -> Result, Self::Error> { // This should not be called if not in `std` mode (and therefore neither in genesis nor in // testing) @@ -167,6 +171,10 @@ impl ElectionProvider for BoundedExecution { type Error = Error; type DataProvider = T::DataProvider; + fn ongoing() -> bool { + false + } + fn elect() -> Result, Self::Error> { elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) } diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml new file mode 100644 index 0000000000000..1fa118dba4a8d --- /dev/null +++ b/frame/fast-unstake/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "pallet-fast-unstake" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Unlicense" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME fast unstake pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +log = { version = "0.4.17", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } + +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +sp-staking = { default-features = false, path = "../../primitives/staking" } + +pallet-balances = { default-features = false, path = "../balances" } +pallet-timestamp = { default-features = false, path = "../timestamp" } +pallet-staking = { default-features = false, path = "../staking" } +pallet-nomination-pools = { default-features = false, path = "../nomination-pools" } +frame-election-provider-support = { default-features = false, path = "../election-provider-support" } + +frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } + +[dev-dependencies] +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } +sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } +substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } +sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } + +[features] +default = ["std"] +std = [ + "codec/std", + "log/std", + "scale-info/std", + + "frame-support/std", + "frame-system/std", + + "sp-io/std", + "sp-staking/std", + "sp-runtime/std", + "sp-std/std", + + "pallet-staking/std", + "pallet-nomination-pools/std", + "pallet-balances/std", + "pallet-timestamp/std", + "frame-election-provider-support/std", + + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs new file mode 100644 index 0000000000000..68a3da0d40af3 --- /dev/null +++ b/frame/fast-unstake/src/benchmarking.rs @@ -0,0 +1,228 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for pallet-fast-unstake. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::{types::*, Pallet as FastUnstake, *}; +use frame_benchmarking::{benchmarks, whitelist_account}; +use frame_support::{ + assert_ok, + traits::{Currency, EnsureOrigin, Get, Hooks}, +}; +use frame_system::RawOrigin; +use pallet_nomination_pools::{Pallet as Pools, PoolId}; +use pallet_staking::Pallet as Staking; +use sp_runtime::traits::{StaticLookup, Zero}; +use sp_staking::EraIndex; +use sp_std::prelude::*; + +const USER_SEED: u32 = 0; +const DEFAULT_BACKER_PER_VALIDATOR: u32 = 128; +const MAX_VALIDATORS: u32 = 128; + +type CurrencyOf = ::Currency; + +fn l( + who: T::AccountId, +) -> <::Lookup as StaticLookup>::Source { + T::Lookup::unlookup(who) +} + +fn create_unexposed_nominator() -> T::AccountId { + let account = frame_benchmarking::account::("nominator_42", 0, USER_SEED); + fund_and_bond_account::(&account); + account +} + +fn fund_and_bond_account(account: &T::AccountId) { + let stake = CurrencyOf::::minimum_balance() * 100u32.into(); + CurrencyOf::::make_free_balance_be(&account, stake * 10u32.into()); + + let account_lookup = l::(account.clone()); + // bond and nominate ourselves, this will guarantee that we are not backing anyone. + assert_ok!(Staking::::bond( + RawOrigin::Signed(account.clone()).into(), + account_lookup.clone(), + stake, + pallet_staking::RewardDestination::Controller, + )); + assert_ok!(Staking::::nominate( + RawOrigin::Signed(account.clone()).into(), + vec![account_lookup] + )); +} + +pub(crate) fn fast_unstake_events() -> Vec> { + frame_system::Pallet::::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| ::RuntimeEvent::from(e).try_into().ok()) + .collect::>() +} + +fn setup_pool() -> PoolId { + let depositor = frame_benchmarking::account::("depositor_42", 0, USER_SEED); + let depositor_lookup = l::(depositor.clone()); + + let stake = Pools::::depositor_min_bond(); + CurrencyOf::::make_free_balance_be(&depositor, stake * 10u32.into()); + + Pools::::create( + RawOrigin::Signed(depositor.clone()).into(), + stake, + depositor_lookup.clone(), + depositor_lookup.clone(), + depositor_lookup, + ) + .unwrap(); + + pallet_nomination_pools::LastPoolId::::get() +} + +fn setup_staking(v: u32, until: EraIndex) { + let ed = CurrencyOf::::minimum_balance(); + + log!(debug, "registering {} validators and {} eras.", v, until); + + // our validators don't actually need to registered in staking -- just generate `v` random + // accounts. + let validators = (0..v) + .map(|x| frame_benchmarking::account::("validator", x, USER_SEED)) + .collect::>(); + + for era in 0..=until { + let others = (0..DEFAULT_BACKER_PER_VALIDATOR) + .map(|s| { + let who = frame_benchmarking::account::("nominator", era, s); + let value = ed; + pallet_staking::IndividualExposure { who, value } + }) + .collect::>(); + let exposure = + pallet_staking::Exposure { total: Default::default(), own: Default::default(), others }; + validators.iter().for_each(|v| { + Staking::::add_era_stakers(era, v.clone(), exposure.clone()); + }); + } +} + +fn on_idle_full_block() { + let remaining_weight = ::BlockWeights::get().max_block; + FastUnstake::::on_idle(Zero::zero(), remaining_weight); +} + +benchmarks! { + // on_idle, we we don't check anyone, but fully unbond and move them to another pool. + on_idle_unstake { + let who = create_unexposed_nominator::(); + let pool_id = setup_pool::(); + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + Some(pool_id) + )); + ErasToCheckPerBlock::::put(1); + + // run on_idle once. This will check era 0. + assert_eq!(Head::::get(), None); + on_idle_full_block::(); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), maybe_pool_id: Some(pool_id) }) + ); + } + : { + on_idle_full_block::(); + } + verify { + assert!(matches!( + fast_unstake_events::().last(), + Some(Event::Unstaked { .. }) + )); + } + + // on_idle, when we check some number of eras, + on_idle_check { + // number of eras multiplied by validators in that era. + let x in (::BondingDuration::get() * 1) .. (::BondingDuration::get() * MAX_VALIDATORS); + + let v = x / ::BondingDuration::get(); + let u = ::BondingDuration::get(); + + ErasToCheckPerBlock::::put(u); + pallet_staking::CurrentEra::::put(u); + + // setup staking with v validators and u eras of data (0..=u) + setup_staking::(v, u); + let who = create_unexposed_nominator::(); + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + None, + )); + + // no one is queued thus far. + assert_eq!(Head::::get(), None); + } + : { + on_idle_full_block::(); + } + verify { + let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: who.clone(), checked, maybe_pool_id: None }) + ); + assert!(matches!( + fast_unstake_events::().last(), + Some(Event::Checking { .. }) + )); + } + + register_fast_unstake { + let who = create_unexposed_nominator::(); + whitelist_account!(who); + assert_eq!(Queue::::count(), 0); + + } + :_(RawOrigin::Signed(who.clone()), None) + verify { + assert_eq!(Queue::::count(), 1); + } + + deregister { + let who = create_unexposed_nominator::(); + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + None + )); + assert_eq!(Queue::::count(), 1); + whitelist_account!(who); + } + :_(RawOrigin::Signed(who.clone())) + verify { + assert_eq!(Queue::::count(), 0); + } + + control { + let origin = ::ControlOrigin::successful_origin(); + } + : _(origin, 128) + verify {} + + impl_benchmark_test_suite!(Pallet, crate::mock::ExtBuilder::default().build(), crate::mock::Runtime) +} diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs new file mode 100644 index 0000000000000..51416808f48c8 --- /dev/null +++ b/frame/fast-unstake/src/lib.rs @@ -0,0 +1,505 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A pallet that's designed to JUST do the following: +//! +//! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any +//! validators in the last `BondingDuration` days"), then they can register themselves in this +//! pallet, unstake faster than having to wait an entire bonding duration, and potentially move +//! into a nomination pool. +//! +//! Appearing in the exposure of a validator means being exposed equal to that validator from the +//! point of view of the staking system. This usually means earning rewards with the validator, and +//! also being at the risk of slashing with the validator. This is equivalent to the "Active +//! Nominator" role explained in the +//! [February Staking Update](https://polkadot.network/blog/staking-update-february-2022/). +//! +//! This pallet works off the basis of `on_idle`, meaning that it provides no guarantee about when +//! it will succeed, if at all. Moreover, the queue implementation is unordered. In case of +//! congestion, no FIFO ordering is provided. +//! +//! Stakers who are certain about NOT being exposed can register themselves with +//! [`Call::register_fast_unstake`]. This will chill, and fully unbond the staker, and place them in +//! the queue to be checked. +//! +//! Once queued, but not being actively processed, stakers can withdraw their request via +//! [`Call::deregister`]. +//! +//! Once queued, a staker wishing to unbond can perform no further action in pallet-staking. This is +//! to prevent them from accidentally exposing themselves behind a validator etc. +//! +//! Once processed, if successful, no additional fee for the checking process is taken, and the +//! staker is instantly unbonded. Optionally, if they have asked to join a pool, their *entire* +//! stake is joined into their pool of choice. +//! +//! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras +//! they will end up being slashed for the amount of wasted work they have inflicted on the chian. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +// NOTE: enable benchmarking in tests as well. +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +mod types; +pub mod weights; + +pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] 💨 ", $patter), >::block_number() $(, $values)* + ) + }; +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use crate::types::*; + use frame_election_provider_support::ElectionProvider; + use frame_support::pallet_prelude::*; + use frame_system::{pallet_prelude::*, RawOrigin}; + use pallet_nomination_pools::PoolId; + use pallet_staking::Pallet as Staking; + use sp_runtime::{ + traits::{Saturating, Zero}, + DispatchResult, + }; + use sp_staking::EraIndex; + use sp_std::{prelude::*, vec::Vec}; + use weights::WeightInfo; + + #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] + #[codec(mel_bound(T: Config))] + #[scale_info(skip_type_params(T))] + pub struct MaxChecking(sp_std::marker::PhantomData); + impl frame_support::traits::Get for MaxChecking { + fn get() -> u32 { + ::BondingDuration::get() + 1 + } + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: + frame_system::Config + + pallet_staking::Config< + CurrencyBalance = ::CurrencyBalance, + > + pallet_nomination_pools::Config + { + /// The overarching event type. + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + + TryInto>; + + /// The amount of balance slashed per each era that was wastefully checked. + /// + /// A reasonable value could be `runtime_weight_to_fee(weight_per_era_check)`. + type SlashPerEra: Get>; + + /// The origin that can control this pallet. + type ControlOrigin: frame_support::traits::EnsureOrigin; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + } + + /// The current "head of the queue" being unstaked. + #[pallet::storage] + pub type Head = + StorageValue<_, UnstakeRequest>, OptionQuery>; + + /// The map of all accounts wishing to be unstaked. + /// + /// Points the `AccountId` wishing to unstake to the optional `PoolId` they wish to join + /// thereafter. + #[pallet::storage] + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, Option>; + + /// Number of eras to check per block. + /// + /// If set to 0, this pallet does absolutely nothing. + /// + /// Based on the amount of weight available at `on_idle`, up to this many eras of a single + /// nominator might be checked. + #[pallet::storage] + pub type ErasToCheckPerBlock = StorageValue<_, u32, ValueQuery>; + + /// The events of this pallet. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A staker was unstaked. + Unstaked { stash: T::AccountId, maybe_pool_id: Option, result: DispatchResult }, + /// A staker was slashed for requesting fast-unstake whilst being exposed. + Slashed { stash: T::AccountId, amount: BalanceOf }, + /// A staker was partially checked for the given eras, but the process did not finish. + Checking { stash: T::AccountId, eras: Vec }, + /// Some internal error happened while migrating stash. They are removed as head as a + /// consequence. + Errored { stash: T::AccountId }, + /// An internal error happened. Operations will be paused now. + InternalError, + } + + #[pallet::error] + #[cfg_attr(test, derive(PartialEq))] + pub enum Error { + /// The provided Controller account was not found. + /// + /// This means that the given account is not bonded. + NotController, + /// The bonded account has already been queued. + AlreadyQueued, + /// The bonded account has active unlocking chunks. + NotFullyBonded, + /// The provided un-staker is not in the `Queue`. + NotQueued, + /// The provided un-staker is already in Head, and cannot deregister. + AlreadyHead, + } + + #[pallet::hooks] + impl Hooks for Pallet { + fn on_idle(_: T::BlockNumber, remaining_weight: Weight) -> Weight { + if remaining_weight.any_lt(T::DbWeight::get().reads(2)) { + return Weight::from_ref_time(0) + } + + Self::do_on_idle(remaining_weight) + } + } + + #[pallet::call] + impl Pallet { + /// Register oneself for fast-unstake. + /// + /// The dispatch origin of this call must be signed by the controller account, similar to + /// `staking::unbond`. + /// + /// The stash associated with the origin must have no ongoing unlocking chunks. If + /// successful, this will fully unbond and chill the stash. Then, it will enqueue the stash + /// to be checked in further blocks. + /// + /// If by the time this is called, the stash is actually eligible for fast-unstake, then + /// they are guaranteed to remain eligible, because the call will chill them as well. + /// + /// If the check works, the entire staking data is removed, i.e. the stash is fully + /// unstaked, and they potentially join a pool with their entire bonded stake. + /// + /// If the check fails, the stash remains chilled and waiting for being unbonded as in with + /// the normal staking system, but they lose part of their unbonding chunks due to consuming + /// the chain's resources. + #[pallet::weight(::WeightInfo::register_fast_unstake())] + pub fn register_fast_unstake( + origin: OriginFor, + maybe_pool_id: Option, + ) -> DispatchResult { + let ctrl = ensure_signed(origin)?; + + let ledger = + pallet_staking::Ledger::::get(&ctrl).ok_or(Error::::NotController)?; + ensure!(!Queue::::contains_key(&ledger.stash), Error::::AlreadyQueued); + ensure!( + Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != ledger.stash), + Error::::AlreadyHead + ); + // second part of the && is defensive. + ensure!( + ledger.active == ledger.total && ledger.unlocking.is_empty(), + Error::::NotFullyBonded + ); + + // chill and fully unstake. + Staking::::chill(RawOrigin::Signed(ctrl.clone()).into())?; + Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; + + // enqueue them. + Queue::::insert(ledger.stash, maybe_pool_id); + Ok(()) + } + + /// Deregister oneself from the fast-unstake (also cancels joining the pool if that was + /// supplied on `register_fast_unstake` . + /// + /// This is useful if one is registered, they are still waiting, and they change their mind. + /// + /// Note that the associated stash is still fully unbonded and chilled as a consequence of + /// calling `register_fast_unstake`. This should probably be followed by a call to + /// `Staking::rebond`. + #[pallet::weight(::WeightInfo::deregister())] + pub fn deregister(origin: OriginFor) -> DispatchResult { + let ctrl = ensure_signed(origin)?; + let stash = pallet_staking::Ledger::::get(&ctrl) + .map(|l| l.stash) + .ok_or(Error::::NotController)?; + ensure!(Queue::::contains_key(&stash), Error::::NotQueued); + ensure!( + Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != stash), + Error::::AlreadyHead + ); + Queue::::remove(stash); + Ok(()) + } + + /// Control the operation of this pallet. + /// + /// Dispatch origin must be signed by the [`Config::ControlOrigin`]. + #[pallet::weight(::WeightInfo::control())] + pub fn control(origin: OriginFor, unchecked_eras_to_check: EraIndex) -> DispatchResult { + let _ = T::ControlOrigin::ensure_origin(origin)?; + ErasToCheckPerBlock::::put(unchecked_eras_to_check); + Ok(()) + } + } + + impl Pallet { + /// process up to `remaining_weight`. + /// + /// Returns the actual weight consumed. + /// + /// Written for readability in mind, not efficiency. For example: + /// + /// 1. We assume this is only ever called once per `on_idle`. This is because we know that + /// in all use cases, even a single nominator cannot be unbonded in a single call. Multiple + /// calls to this function are thus not needed. + /// + /// 2. We will only mark a staker as unstaked if at the beginning of a check cycle, they are + /// found out to have no eras to check. At the end of a check cycle, even if they are fully + /// checked, we don't finish the process. + pub(crate) fn do_on_idle(remaining_weight: Weight) -> Weight { + let mut eras_to_check_per_block = ErasToCheckPerBlock::::get(); + if eras_to_check_per_block.is_zero() { + return T::DbWeight::get().reads(1) + } + + // NOTE: here we're assuming that the number of validators has only ever increased, + // meaning that the number of exposures to check is either this per era, or less. + let validator_count = pallet_staking::ValidatorCount::::get(); + + // determine the number of eras to check. This is based on both `ErasToCheckPerBlock` + // and `remaining_weight` passed on to us from the runtime executive. + let max_weight = |v, u| { + ::WeightInfo::on_idle_check(v * u) + .max(::WeightInfo::on_idle_unstake()) + }; + while max_weight(validator_count, eras_to_check_per_block).any_gt(remaining_weight) { + eras_to_check_per_block.saturating_dec(); + if eras_to_check_per_block.is_zero() { + log!(debug, "early existing because eras_to_check_per_block is zero"); + return T::DbWeight::get().reads(2) + } + } + + if ::ElectionProvider::ongoing() { + // NOTE: we assume `ongoing` does not consume any weight. + // there is an ongoing election -- we better not do anything. Imagine someone is not + // exposed anywhere in the last era, and the snapshot for the election is already + // taken. In this time period, we don't want to accidentally unstake them. + return T::DbWeight::get().reads(2) + } + + let UnstakeRequest { stash, mut checked, maybe_pool_id } = match Head::::take() + .or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, maybe_pool_id)| UnstakeRequest { + stash, + maybe_pool_id, + checked: Default::default(), + }) + .next() + }) { + None => { + // There's no `Head` and nothing in the `Queue`, nothing to do here. + return T::DbWeight::get().reads(4) + }, + Some(head) => head, + }; + + log!( + debug, + "checking {:?}, eras_to_check_per_block = {:?}, remaining_weight = {:?}", + stash, + eras_to_check_per_block, + remaining_weight + ); + + // the range that we're allowed to check in this round. + let current_era = pallet_staking::CurrentEra::::get().unwrap_or_default(); + let bonding_duration = ::BondingDuration::get(); + // prune all the old eras that we don't care about. This will help us keep the bound + // of `checked`. + checked.retain(|e| *e >= current_era.saturating_sub(bonding_duration)); + let unchecked_eras_to_check = { + // get the last available `bonding_duration` eras up to current era in reverse + // order. + let total_check_range = (current_era.saturating_sub(bonding_duration)..= + current_era) + .rev() + .collect::>(); + debug_assert!( + total_check_range.len() <= (bonding_duration + 1) as usize, + "{:?}", + total_check_range + ); + + // remove eras that have already been checked, take a maximum of + // eras_to_check_per_block. + total_check_range + .into_iter() + .filter(|e| !checked.contains(e)) + .take(eras_to_check_per_block as usize) + .collect::>() + }; + + log!( + debug, + "{} eras to check: {:?}", + unchecked_eras_to_check.len(), + unchecked_eras_to_check + ); + + if unchecked_eras_to_check.is_empty() { + // `stash` is not exposed in any era now -- we can let go of them now. + let num_slashing_spans = Staking::::slashing_spans(&stash).iter().count() as u32; + + let ctrl = match pallet_staking::Bonded::::get(&stash) { + Some(ctrl) => ctrl, + None => { + Self::deposit_event(Event::::Errored { stash }); + return ::WeightInfo::on_idle_unstake() + }, + }; + + let ledger = match pallet_staking::Ledger::::get(ctrl) { + Some(ledger) => ledger, + None => { + Self::deposit_event(Event::::Errored { stash }); + return ::WeightInfo::on_idle_unstake() + }, + }; + + let unstake_result = pallet_staking::Pallet::::force_unstake( + RawOrigin::Root.into(), + stash.clone(), + num_slashing_spans, + ); + + let pool_stake_result = if let Some(pool_id) = maybe_pool_id { + pallet_nomination_pools::Pallet::::join( + RawOrigin::Signed(stash.clone()).into(), + ledger.total, + pool_id, + ) + } else { + Ok(()) + }; + + let result = unstake_result.and(pool_stake_result); + log!( + info, + "unstaked {:?}, maybe_pool {:?}, outcome: {:?}", + stash, + maybe_pool_id, + result + ); + + Self::deposit_event(Event::::Unstaked { stash, maybe_pool_id, result }); + ::WeightInfo::on_idle_unstake() + } else { + // eras remaining to be checked. + let mut eras_checked = 0u32; + let is_exposed = unchecked_eras_to_check.iter().any(|e| { + eras_checked.saturating_inc(); + Self::is_exposed_in_era(&stash, e) + }); + + log!( + debug, + "checked {:?} eras, exposed? {}, (v: {:?}, u: {:?})", + eras_checked, + is_exposed, + validator_count, + unchecked_eras_to_check.len() + ); + + // NOTE: you can be extremely unlucky and get slashed here: You are not exposed in + // the last 28 eras, have registered yourself to be unstaked, midway being checked, + // you are exposed. + if is_exposed { + let amount = T::SlashPerEra::get() + .saturating_mul(eras_checked.saturating_add(checked.len() as u32).into()); + pallet_staking::slashing::do_slash::( + &stash, + amount, + &mut Default::default(), + &mut Default::default(), + current_era, + ); + log!(info, "slashed {:?} by {:?}", stash, amount); + Self::deposit_event(Event::::Slashed { stash, amount }); + } else { + // Not exposed in these eras. + match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { + Ok(_) => { + Head::::put(UnstakeRequest { + stash: stash.clone(), + checked, + maybe_pool_id, + }); + Self::deposit_event(Event::::Checking { + stash, + eras: unchecked_eras_to_check, + }); + }, + Err(_) => { + // don't put the head back in -- there is an internal error in the + // pallet. + frame_support::defensive!("`checked is pruned via retain above`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError); + }, + } + } + + ::WeightInfo::on_idle_check(validator_count * eras_checked) + } + } + + /// Checks whether an account `staker` has been exposed in an era. + fn is_exposed_in_era(staker: &T::AccountId, era: &EraIndex) -> bool { + pallet_staking::ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { + validator == *staker || exposures.others.iter().any(|i| i.who == *staker) + }) + } + } +} diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs new file mode 100644 index 0000000000000..b9cf16e18e8d1 --- /dev/null +++ b/frame/fast-unstake/src/mock.rs @@ -0,0 +1,387 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{self as fast_unstake}; +use frame_support::{ + assert_ok, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, ConstU8, Currency}, + weights::constants::WEIGHT_PER_SECOND, + PalletId, +}; +use sp_runtime::{ + traits::{Convert, IdentityLookup}, + FixedU128, +}; + +use frame_system::RawOrigin; +use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; +use sp_std::prelude::*; + +pub type AccountId = u128; +pub type AccountIndex = u32; +pub type BlockNumber = u64; +pub type Balance = u128; +pub type T = Runtime; + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); +} + +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type RuntimeCall = RuntimeCall; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = sp_runtime::testing::Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +parameter_types! { + pub static ExistentialDeposit: Balance = 1; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<128>; + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub static BondingDuration: u32 = 3; + pub static CurrentEra: u32 = 0; + pub static Ongoing: bool = false; +} + +pub struct MockElection; +impl frame_election_provider_support::ElectionProvider for MockElection { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type DataProvider = Staking; + type Error = (); + + fn ongoing() -> bool { + Ongoing::get() + } + + fn elect() -> Result, Self::Error> { + Err(()) + } +} + +impl pallet_staking::Config for Runtime { + type MaxNominations = ConstU32<16>; + type Currency = Balances; + type CurrencyBalance = Balance; + type UnixTime = pallet_timestamp::Pallet; + type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = (); + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = (); + type HistoryDepth = ConstU32<84>; + type MaxNominatorRewardedPerValidator = ConstU32<64>; + type OffendingValidatorsThreshold = (); + type ElectionProvider = MockElection; + type GenesisElectionProvider = Self::ElectionProvider; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxUnlockingChunks = ConstU32<32>; + type OnStakerSlash = Pools; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); +} + +pub struct BalanceToU256; +impl Convert for BalanceToU256 { + fn convert(n: Balance) -> sp_core::U256 { + n.into() + } +} + +pub struct U256ToBalance; +impl Convert for U256ToBalance { + fn convert(n: sp_core::U256) -> Balance { + n.try_into().unwrap() + } +} + +parameter_types! { + pub const PostUnbondingPoolsWindow: u32 = 10; + pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); + pub static MaxMetadataLen: u32 = 10; + pub static CheckLevel: u8 = 255; +} + +impl pallet_nomination_pools::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type CurrencyBalance = Balance; + type RewardCounter = FixedU128; + type BalanceToU256 = BalanceToU256; + type U256ToBalance = U256ToBalance; + type StakingInterface = Staking; + type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; + type MaxMetadataLen = MaxMetadataLen; + type MaxUnbonding = ConstU32<8>; + type MaxPointsToBalance = ConstU8<10>; + type PalletId = PoolsPalletId; +} + +parameter_types! { + pub static SlashPerEra: u32 = 100; +} + +impl fast_unstake::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SlashPerEra = SlashPerEra; + type ControlOrigin = frame_system::EnsureRoot; + type WeightInfo = (); +} + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + Pools: pallet_nomination_pools, + FastUnstake: fast_unstake, + } +); + +parameter_types! { + static FastUnstakeEvents: u32 = 0; +} + +pub(crate) fn fast_unstake_events_since_last_call() -> Vec> { + let events = System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let RuntimeEvent::FastUnstake(inner) = e { Some(inner) } else { None }) + .collect::>(); + let already_seen = FastUnstakeEvents::get(); + FastUnstakeEvents::set(events.len() as u32); + events.into_iter().skip(already_seen as usize).collect() +} + +pub struct ExtBuilder { + exposed_nominators: Vec<(AccountId, AccountId, Balance)>, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + exposed_nominators: vec![ + (1, 2, 100), + (3, 4, 100), + (5, 6, 100), + (7, 8, 100), + (9, 10, 100), + ], + } + } +} + +pub(crate) const VALIDATORS_PER_ERA: AccountId = 32; +pub(crate) const VALIDATOR_PREFIX: AccountId = 100; +pub(crate) const NOMINATORS_PER_VALIDATOR_PER_ERA: AccountId = 4; +pub(crate) const NOMINATOR_PREFIX: AccountId = 1000; + +impl ExtBuilder { + pub(crate) fn register_stakers_for_era(era: u32) { + // validators are prefixed with 100 and nominators with 1000 to prevent conflict. Make sure + // all the other accounts used in tests are below 100. Also ensure here that we don't + // overlap. + assert!(VALIDATOR_PREFIX + VALIDATORS_PER_ERA < NOMINATOR_PREFIX); + + (VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA) + .map(|v| { + // for the sake of sanity, let's register this taker as an actual validator. + let others = (NOMINATOR_PREFIX.. + (NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA)) + .map(|n| IndividualExposure { who: n, value: 0 as Balance }) + .collect::>(); + (v, Exposure { total: 0, own: 0, others }) + }) + .for_each(|(validator, exposure)| { + pallet_staking::ErasStakers::::insert(era, validator, exposure); + }); + } + + pub(crate) fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::default().build_storage::().unwrap(); + + // create one default pool. + let _ = pallet_nomination_pools::GenesisConfig:: { ..Default::default() } + .assimilate_storage(&mut storage); + + let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; + let nominators_range = + NOMINATOR_PREFIX..NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA; + + let _ = pallet_balances::GenesisConfig:: { + balances: self + .exposed_nominators + .clone() + .into_iter() + .map(|(stash, _, balance)| (stash, balance * 2)) + .chain( + self.exposed_nominators + .clone() + .into_iter() + .map(|(_, ctrl, balance)| (ctrl, balance * 2)), + ) + .chain(validators_range.clone().map(|x| (x, 100))) + .chain(nominators_range.clone().map(|x| (x, 100))) + .collect::>(), + } + .assimilate_storage(&mut storage); + + let _ = pallet_staking::GenesisConfig:: { + stakers: self + .exposed_nominators + .into_iter() + .map(|(x, y, z)| (x, y, z, pallet_staking::StakerStatus::Nominator(vec![42]))) + .chain(validators_range.map(|x| (x, x, 100, StakerStatus::Validator))) + .chain(nominators_range.map(|x| (x, x, 100, StakerStatus::Nominator(vec![x])))) + .collect::>(), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + + ext.execute_with(|| { + // for events to be deposited. + frame_system::Pallet::::set_block_number(1); + + for era in 0..=(BondingDuration::get()) { + Self::register_stakers_for_era(era); + } + + // because we read this value as a measure of how many validators we have. + pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); + + // make a pool + let amount_to_bond = Pools::depositor_min_bond(); + Balances::make_free_balance_be(&10, amount_to_bond * 5); + assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); + }); + ext + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(|| { + test(); + }) + } +} + +pub(crate) fn run_to_block(n: u64, on_idle: bool) { + let current_block = System::block_number(); + assert!(n > current_block); + while System::block_number() < n { + Balances::on_finalize(System::block_number()); + Staking::on_finalize(System::block_number()); + Pools::on_finalize(System::block_number()); + FastUnstake::on_finalize(System::block_number()); + + System::set_block_number(System::block_number() + 1); + + Balances::on_initialize(System::block_number()); + Staking::on_initialize(System::block_number()); + Pools::on_initialize(System::block_number()); + FastUnstake::on_initialize(System::block_number()); + if on_idle { + FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); + } + } +} + +pub(crate) fn next_block(on_idle: bool) { + let current = System::block_number(); + run_to_block(current + 1, on_idle); +} + +pub fn assert_unstaked(stash: &AccountId) { + assert!(!pallet_staking::Bonded::::contains_key(stash)); + assert!(!pallet_staking::Payee::::contains_key(stash)); + assert!(!pallet_staking::Validators::::contains_key(stash)); + assert!(!pallet_staking::Nominators::::contains_key(stash)); +} diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs new file mode 100644 index 0000000000000..a51c1acdf06eb --- /dev/null +++ b/frame/fast-unstake/src/tests.rs @@ -0,0 +1,1036 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-fast-unstake. + +use super::*; +use crate::{mock::*, types::*, weights::WeightInfo, Event}; +use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; +use pallet_nomination_pools::{BondedPools, LastPoolId, RewardPools}; +use pallet_staking::{CurrentEra, IndividualExposure, RewardDestination}; + +use sp_runtime::{traits::BadOrigin, DispatchError, ModuleError}; +use sp_staking::StakingInterface; + +#[test] +fn test_setup_works() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(BondedPools::::count(), 1); + assert_eq!(RewardPools::::count(), 1); + assert_eq!(Staking::bonding_duration(), 3); + let last_pool = LastPoolId::::get(); + assert_eq!(last_pool, 1); + }); +} + +#[test] +fn register_works() { + ExtBuilder::default().build_and_execute(|| { + // Controller account registers for fast unstake. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + // Ensure stash is in the queue. + assert_ne!(Queue::::get(1), None); + }); +} + +#[test] +fn cannot_register_if_not_bonded() { + ExtBuilder::default().build_and_execute(|| { + // Mint accounts 1 and 2 with 200 tokens. + for _ in 1..2 { + let _ = Balances::make_free_balance_be(&1, 200); + } + // Attempt to fast unstake. + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1), Some(1_u32)), + Error::::NotController + ); + }); +} + +#[test] +fn cannot_register_if_in_queue() { + ExtBuilder::default().build_and_execute(|| { + // Insert some Queue item + Queue::::insert(1, Some(1_u32)); + // Cannot re-register, already in queue + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + Error::::AlreadyQueued + ); + }); +} + +#[test] +fn cannot_register_if_head() { + ExtBuilder::default().build_and_execute(|| { + // Insert some Head item for stash + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + // Controller attempts to regsiter + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + Error::::AlreadyHead + ); + }); +} + +#[test] +fn cannot_register_if_has_unlocking_chunks() { + ExtBuilder::default().build_and_execute(|| { + // Start unbonding half of staked tokens + assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); + // Cannot register for fast unstake with unlock chunks active + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + Error::::NotFullyBonded + ); + }); +} + +#[test] +fn deregister_works() { + ExtBuilder::default().build_and_execute(|| { + // Controller account registers for fast unstake. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + // Controller then changes mind and deregisters. + assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); + // Ensure stash no longer exists in the queue. + assert_eq!(Queue::::get(1), None); + }); +} + +#[test] +fn cannot_deregister_if_not_controller() { + ExtBuilder::default().build_and_execute(|| { + // Controller account registers for fast unstake. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + // Stash tries to deregister. + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(1)), Error::::NotController); + }); +} + +#[test] +fn cannot_deregister_if_not_queued() { + ExtBuilder::default().build_and_execute(|| { + // Controller tries to deregister without first registering + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::NotQueued); + }); +} + +#[test] +fn cannot_deregister_already_head() { + ExtBuilder::default().build_and_execute(|| { + // Controller attempts to register, should fail + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + // Insert some Head item for stash. + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + // Controller attempts to deregister + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); + }); +} + +#[test] +fn control_works() { + ExtBuilder::default().build_and_execute(|| { + // account with control (root) origin wants to only check 1 era per block. + assert_ok!(FastUnstake::control(RuntimeOrigin::root(), 1_u32)); + }); +} + +#[test] +fn control_must_be_control_origin() { + ExtBuilder::default().build_and_execute(|| { + // account without control (root) origin wants to only check 1 era per block. + assert_noop!(FastUnstake::control(RuntimeOrigin::signed(1), 1_u32), BadOrigin); + }); +} + +mod on_idle { + use super::*; + + #[test] + fn early_exit() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // set up Queue item + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); + assert_eq!(Queue::::get(1), Some(Some(1))); + + // call on_idle with no remaining weight + FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); + + // assert nothing changed in Queue and Head + assert_eq!(Head::::get(), None); + assert_eq!(Queue::::get(1), Some(Some(1))); + }); + } + + #[test] + fn respects_weight() { + ExtBuilder::default().build_and_execute(|| { + // we want to check all eras in one block... + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // given + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); + assert_eq!(Queue::::get(1), Some(Some(1))); + + assert_eq!(Queue::::count(), 1); + assert_eq!(Head::::get(), None); + + // when: call fast unstake with not enough weight to process the whole thing, just one + // era. + let remaining_weight = ::WeightInfo::on_idle_check( + pallet_staking::ValidatorCount::::get() * 1, + ); + assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); + + // then + assert_eq!( + fast_unstake_events_since_last_call(), + vec![Event::Checking { stash: 1, eras: vec![3] }] + ); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + ); + + // when: another 1 era. + let remaining_weight = ::WeightInfo::on_idle_check( + pallet_staking::ValidatorCount::::get() * 1, + ); + assert_eq!(FastUnstake::on_idle(0, remaining_weight), remaining_weight); + + // then: + assert_eq!( + fast_unstake_events_since_last_call(), + vec![Event::Checking { stash: 1, eras: bounded_vec![2] }] + ); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2], + maybe_pool_id: Some(1) + }) + ); + + // when: then 5 eras, we only need 2 more. + let remaining_weight = ::WeightInfo::on_idle_check( + pallet_staking::ValidatorCount::::get() * 5, + ); + assert_eq!( + FastUnstake::on_idle(0, remaining_weight), + // note the amount of weight consumed: 2 eras worth of weight. + ::WeightInfo::on_idle_check( + pallet_staking::ValidatorCount::::get() * 2, + ) + ); + + // then: + assert_eq!( + fast_unstake_events_since_last_call(), + vec![Event::Checking { stash: 1, eras: vec![1, 0] }] + ); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: Some(1) + }) + ); + + // when: not enough weight to unstake: + let remaining_weight = + ::WeightInfo::on_idle_unstake() - Weight::from_ref_time(1); + assert_eq!(FastUnstake::on_idle(0, remaining_weight), Weight::from_ref_time(0)); + + // then nothing happens: + assert_eq!(fast_unstake_events_since_last_call(), vec![]); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: Some(1) + }) + ); + + // when: enough weight to get over at least one iteration: then we are unblocked and can + // unstake. + let remaining_weight = ::WeightInfo::on_idle_check( + pallet_staking::ValidatorCount::::get() * 1, + ); + assert_eq!( + FastUnstake::on_idle(0, remaining_weight), + ::WeightInfo::on_idle_unstake() + ); + + // then we finish the unbonding: + assert_eq!( + fast_unstake_events_since_last_call(), + vec![Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }] + ); + assert_eq!(Head::::get(), None,); + + assert_unstaked(&1); + }); + } + + #[test] + fn if_head_not_set_one_random_fetched_from_queue() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // given + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10), None)); + + assert_eq!(Queue::::count(), 5); + assert_eq!(Head::::get(), None); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: None + }) + ); + assert_eq!(Queue::::count(), 4); + + // when + next_block(true); + + // then + assert_eq!(Head::::get(), None,); + assert_eq!(Queue::::count(), 4); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 5, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: None + }), + ); + assert_eq!(Queue::::count(), 3); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) }, + Event::Checking { stash: 5, eras: vec![3, 2, 1, 0] } + ] + ); + }); + } + + #[test] + fn successful_multi_queue() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // register multi accounts for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); + assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), Some(1))); + assert_eq!(Queue::::get(3), Some(Some(1))); + + // assert 2 queue items are in Queue & None in Head to start with + assert_eq!(Queue::::count(), 2); + assert_eq!(Head::::get(), None); + + // process on idle and check eras for next Queue item + next_block(true); + + // process on idle & let go of current Head + next_block(true); + + // confirm Head / Queue items remaining + assert_eq!(Queue::::count(), 1); + assert_eq!(Head::::get(), None); + + // process on idle and check eras for next Queue item + next_block(true); + + // process on idle & let go of current Head + next_block(true); + + // Head & Queue should now be empty + assert_eq!(Head::::get(), None); + assert_eq!(Queue::::count(), 0); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Checking { stash: 3, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 3, maybe_pool_id: Some(1), result: Ok(()) }, + ] + ); + + assert_unstaked(&1); + assert_unstaked(&3); + }); + } + + #[test] + fn successful_unstake_without_pool_join() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_eq!(Queue::::get(1), Some(None)); + + // process on idle + next_block(true); + + // assert queue item has been moved to head + assert_eq!(Queue::::get(1), None); + + // assert head item present + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: None + }) + ); + + next_block(true); + assert_eq!(Head::::get(), None,); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } + ] + ); + assert_unstaked(&1); + }); + } + + #[test] + fn successful_unstake_joining_bad_pool() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(0))); + assert_eq!(Queue::::get(1), Some(Some(0))); + + // process on idle + next_block(true); + + // assert queue item has been moved to head + assert_eq!(Queue::::get(1), None); + + // assert head item present + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: Some(0) + }) + ); + + next_block(true); + assert_eq!(Head::::get(), None,); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { + stash: 1, + maybe_pool_id: Some(0), + result: Err(DispatchError::Module(ModuleError { + index: 4, + error: [0, 0, 0, 0], + message: None + })) + } + ] + ); + assert_unstaked(&1); + }); + } + + #[test] + fn successful_unstake_all_eras_per_block() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_eq!(Queue::::get(1), Some(Some(1))); + + // process on idle + next_block(true); + + // assert queue item has been moved to head + assert_eq!(Queue::::get(1), None); + + // assert head item present + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: Some(1) + }) + ); + + next_block(true); + assert_eq!(Head::::get(), None,); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + ] + ); + assert_unstaked(&1); + assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); + }); + } + + #[test] + fn successful_unstake_one_era_per_block() { + ExtBuilder::default().build_and_execute(|| { + // put 1 era per block + ErasToCheckPerBlock::::put(1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_eq!(Queue::::get(1), Some(Some(1))); + + // process on idle + next_block(true); + + // assert queue item has been moved to head + assert_eq!(Queue::::get(1), None); + + // assert head item present + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + ); + + next_block(true); + + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2], + maybe_pool_id: Some(1) + }) + ); + + next_block(true); + + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1], + maybe_pool_id: Some(1) + }) + ); + + next_block(true); + + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: Some(1) + }) + ); + + next_block(true); + + assert_eq!(Head::::get(), None,); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3] }, + Event::Checking { stash: 1, eras: vec![2] }, + Event::Checking { stash: 1, eras: vec![1] }, + Event::Checking { stash: 1, eras: vec![0] }, + Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + ] + ); + assert_unstaked(&1); + assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); + }); + } + + #[test] + fn old_checked_era_pruned() { + // the only scenario where checked era pruning (checked.retain) comes handy is a follows: + // the whole vector is full and at capacity and in the next call we are ready to unstake, + // but then a new era happens. + ExtBuilder::default().build_and_execute(|| { + // given + ErasToCheckPerBlock::::put(1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_eq!(Queue::::get(1), Some(None)); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + ); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2], maybe_pool_id: None }) + ); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1], + maybe_pool_id: None + }) + ); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: None + }) + ); + + // when: a new era happens right before one is free. + CurrentEra::::put(CurrentEra::::get().unwrap() + 1); + ExtBuilder::register_stakers_for_era(CurrentEra::::get().unwrap()); + + // then + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + // note era 0 is pruned to keep the vector length sane. + checked: bounded_vec![3, 2, 1, 4], + maybe_pool_id: None + }) + ); + + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3] }, + Event::Checking { stash: 1, eras: vec![2] }, + Event::Checking { stash: 1, eras: vec![1] }, + Event::Checking { stash: 1, eras: vec![0] }, + Event::Checking { stash: 1, eras: vec![4] }, + Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } + ] + ); + assert_unstaked(&1); + }); + } + + #[test] + fn unstake_paused_mid_election() { + ExtBuilder::default().build_and_execute(|| { + // give: put 1 era per block + ErasToCheckPerBlock::::put(1); + CurrentEra::::put(BondingDuration::get()); + + // register for fast unstake + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + + // process 2 blocks + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + ); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2], + maybe_pool_id: Some(1) + }) + ); + + // when + Ongoing::set(true); + + // then nothing changes + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2], + maybe_pool_id: Some(1) + }) + ); + + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2], + maybe_pool_id: Some(1) + }) + ); + + // then we register a new era. + Ongoing::set(false); + CurrentEra::::put(CurrentEra::::get().unwrap() + 1); + ExtBuilder::register_stakers_for_era(CurrentEra::::get().unwrap()); + + // then we can progress again, but notice that the new era that had to be checked. + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 4], + maybe_pool_id: Some(1) + }) + ); + + // progress to end + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 1, + checked: bounded_vec![3, 2, 4, 1], + maybe_pool_id: Some(1) + }) + ); + + // but notice that we don't care about era 0 instead anymore! we're done. + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 1, eras: vec![3] }, + Event::Checking { stash: 1, eras: vec![2] }, + Event::Checking { stash: 1, eras: vec![4] }, + Event::Checking { stash: 1, eras: vec![1] }, + Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + ] + ); + + assert_unstaked(&1); + assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); + }); + } + + #[test] + fn exposed_nominator_cannot_unstake() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + SlashPerEra::set(7); + CurrentEra::::put(BondingDuration::get()); + + // create an exposed nominator in era 1 + let exposed = 666 as AccountId; + pallet_staking::ErasStakers::::mutate(1, VALIDATORS_PER_ERA, |expo| { + expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); + }); + Balances::make_free_balance_be(&exposed, 100); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(exposed), + exposed, + 10, + RewardDestination::Staked + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + + // register the exposed one. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + + // a few blocks later, we realize they are slashed + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: exposed, + checked: bounded_vec![3], + maybe_pool_id: None + }) + ); + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: exposed, + checked: bounded_vec![3, 2], + maybe_pool_id: None + }) + ); + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + // we slash them by 21, since we checked 3 eras in total (3, 2, 1). + vec![ + Event::Checking { stash: exposed, eras: vec![3] }, + Event::Checking { stash: exposed, eras: vec![2] }, + Event::Slashed { stash: exposed, amount: 3 * 7 } + ] + ); + }); + } + + #[test] + fn exposed_nominator_cannot_unstake_multi_check() { + ExtBuilder::default().build_and_execute(|| { + // same as the previous check, but we check 2 eras per block, and we make the exposed be + // exposed in era 0, so that it is detected halfway in a check era. + ErasToCheckPerBlock::::put(2); + SlashPerEra::set(7); + CurrentEra::::put(BondingDuration::get()); + + // create an exposed nominator in era 1 + let exposed = 666 as AccountId; + pallet_staking::ErasStakers::::mutate(0, VALIDATORS_PER_ERA, |expo| { + expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); + }); + Balances::make_free_balance_be(&exposed, 100); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(exposed), + exposed, + 10, + RewardDestination::Staked + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + + // register the exposed one. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + + // a few blocks later, we realize they are slashed + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: exposed, + checked: bounded_vec![3, 2], + maybe_pool_id: None + }) + ); + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + // we slash them by 28, since we checked 4 eras in total. + vec![ + Event::Checking { stash: exposed, eras: vec![3, 2] }, + Event::Slashed { stash: exposed, amount: 4 * 7 } + ] + ); + }); + } + + #[test] + fn validators_cannot_bail() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // a validator switches role and register... + assert_ok!(Staking::nominate( + RuntimeOrigin::signed(VALIDATOR_PREFIX), + vec![VALIDATOR_PREFIX] + )); + assert_ok!(FastUnstake::register_fast_unstake( + RuntimeOrigin::signed(VALIDATOR_PREFIX), + None + )); + + // but they indeed are exposed! + assert!(pallet_staking::ErasStakers::::contains_key( + BondingDuration::get() - 1, + VALIDATOR_PREFIX + )); + + // process a block, this validator is exposed and has been slashed. + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![Event::Slashed { stash: 100, amount: 100 }] + ); + }); + } + + #[test] + fn unexposed_validator_can_fast_unstake() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + // create a new validator that 100% not exposed. + Balances::make_free_balance_be(&42, 100); + assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); + assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); + + // let them register: + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42), None)); + + // 2 block's enough to unstake them. + next_block(true); + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stash: 42, + checked: bounded_vec![3, 2, 1, 0], + maybe_pool_id: None + }) + ); + next_block(true); + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Checking { stash: 42, eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 42, maybe_pool_id: None, result: Ok(()) } + ] + ); + }); + } +} + +mod signed_extension { + use super::*; + use sp_runtime::traits::SignedExtension; + + const STAKING_CALL: crate::mock::RuntimeCall = + crate::mock::RuntimeCall::Staking(pallet_staking::Call::::chill {}); + + #[test] + fn does_nothing_if_not_queued() { + ExtBuilder::default().build_and_execute(|| { + assert!(PreventStakingOpsIfUnbonding::::new() + .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) + .is_ok()); + }) + } + + #[test] + fn prevents_queued() { + ExtBuilder::default().build_and_execute(|| { + // given: stash for 2 is 1. + // when + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + + // then + // stash can't. + assert!(PreventStakingOpsIfUnbonding::::new() + .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) + .is_err()); + + // controller can't. + assert!(PreventStakingOpsIfUnbonding::::new() + .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) + .is_err()); + }) + } + + #[test] + fn prevents_head_stash() { + ExtBuilder::default().build_and_execute(|| { + // given: stash for 2 is 1. + // when + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + + ErasToCheckPerBlock::::put(1); + CurrentEra::::put(BondingDuration::get()); + next_block(true); + + assert_eq!( + Head::::get(), + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + ); + + // then + // stash can't + assert!(PreventStakingOpsIfUnbonding::::new() + .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) + .is_err()); + + // controller can't + assert!(PreventStakingOpsIfUnbonding::::new() + .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) + .is_err()); + }) + } +} diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs new file mode 100644 index 0000000000000..ae8702e56a842 --- /dev/null +++ b/frame/fast-unstake/src/types.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Types used in the Fast Unstake pallet. + +use crate::*; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + traits::{Currency, Get, IsSubType}, + BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, +}; +use pallet_nomination_pools::PoolId; +use scale_info::TypeInfo; +use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; +use sp_staking::EraIndex; +use sp_std::{fmt::Debug, prelude::*}; + +pub type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +/// An unstake request. +#[derive( + Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, +)] +pub struct UnstakeRequest> { + /// Their stash account. + pub(crate) stash: AccountId, + /// The list of eras for which they have been checked. + pub(crate) checked: BoundedVec, + /// The pool they wish to join, if any. + pub(crate) maybe_pool_id: Option, +} + +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] +#[scale_info(skip_type_params(T))] +pub struct PreventStakingOpsIfUnbonding(sp_std::marker::PhantomData); + +#[cfg(test)] +impl PreventStakingOpsIfUnbonding { + pub fn new() -> Self { + Self(Default::default()) + } +} + +impl sp_runtime::traits::SignedExtension + for PreventStakingOpsIfUnbonding +where + ::RuntimeCall: IsSubType>, +{ + type AccountId = T::AccountId; + type Call = ::RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "PreventStakingOpsIfUnbonding"; + + fn additional_signed(&self) -> Result { + Ok(()) + } + + fn pre_dispatch( + self, + // NOTE: we want to prevent this stash-controller pair from doing anything in the + // staking system as long as they are registered here. + stash_or_controller: &Self::AccountId, + call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + // we don't check this in the tx-pool as it requires a storage read. + if >>::is_sub_type(call).is_some() { + let check_stash = |stash: &T::AccountId| { + if Queue::::contains_key(&stash) || + Head::::get().map_or(false, |u| &u.stash == stash) + { + Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) + } else { + Ok(()) + } + }; + match ( + // mapped from controller. + pallet_staking::Ledger::::get(&stash_or_controller), + // mapped from stash. + pallet_staking::Bonded::::get(&stash_or_controller), + ) { + (Some(ledger), None) => { + // it is a controller. + check_stash(&ledger.stash) + }, + (_, Some(_)) => { + // it's a stash. + let stash = stash_or_controller; + check_stash(stash) + }, + (None, None) => { + // They are not a staker -- let them execute. + Ok(()) + }, + } + } else { + Ok(()) + } + } +} diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs new file mode 100644 index 0000000000000..04857d0dcc865 --- /dev/null +++ b/frame/fast-unstake/src/weights.rs @@ -0,0 +1,210 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_fast_unstake +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-09-07, STEPS: `10`, REPEAT: 1, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `Kians-MacBook-Pro-2.local`, CPU: `` +//! EXECUTION: Some(Native), WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 + +// Executed Command: +// target/release/substrate +// benchmark +// pallet +// --steps=10 +// --repeat=1 +// --pallet=pallet_fast_unstake +// --extrinsic=* +// --execution=native +// --output +// weight.rs +// --template +// ./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_fast_unstake. +pub trait WeightInfo { + fn on_idle_unstake() -> Weight; + fn on_idle_check(x: u32, ) -> Weight; + fn register_fast_unstake() -> Weight; + fn deregister() -> Weight; + fn control() -> Weight; +} + +/// Weights for pallet_fast_unstake using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: FastUnstake Head (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Bonded (r:2 w:1) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:3 w:2) + // Storage: Balances Locks (r:2 w:2) + // Storage: NominationPools MinJoinBond (r:1 w:0) + // Storage: NominationPools PoolMembers (r:1 w:1) + // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: NominationPools RewardPools (r:1 w:1) + // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) + // Storage: NominationPools MaxPoolMembers (r:1 w:0) + // Storage: NominationPools CounterForPoolMembers (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + fn on_idle_unstake() -> Weight { + Weight::from_ref_time(102_000_000 as u64) + .saturating_add(T::DbWeight::get().reads(25 as u64)) + .saturating_add(T::DbWeight::get().writes(13 as u64)) + } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: FastUnstake Head (r:1 w:1) + // Storage: FastUnstake Queue (r:2 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasStakers (r:1344 w:0) + /// The range of component `x` is `[672, 86016]`. + fn on_idle_check(x: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 244_000 + .saturating_add(Weight::from_ref_time(13_913_000 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(585 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:1) + // Storage: FastUnstake Queue (r:1 w:1) + // Storage: FastUnstake Head (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + fn register_fast_unstake() -> Weight { + Weight::from_ref_time(57_000_000 as u64) + .saturating_add(T::DbWeight::get().reads(12 as u64)) + .saturating_add(T::DbWeight::get().writes(9 as u64)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: FastUnstake Queue (r:1 w:1) + // Storage: FastUnstake Head (r:1 w:0) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + fn deregister() -> Weight { + Weight::from_ref_time(15_000_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) + } + // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + fn control() -> Weight { + Weight::from_ref_time(3_000_000 as u64) + .saturating_add(T::DbWeight::get().writes(1 as u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: FastUnstake Head (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Bonded (r:2 w:1) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: System Account (r:3 w:2) + // Storage: Balances Locks (r:2 w:2) + // Storage: NominationPools MinJoinBond (r:1 w:0) + // Storage: NominationPools PoolMembers (r:1 w:1) + // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: NominationPools RewardPools (r:1 w:1) + // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) + // Storage: NominationPools MaxPoolMembers (r:1 w:0) + // Storage: NominationPools CounterForPoolMembers (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + fn on_idle_unstake() -> Weight { + Weight::from_ref_time(102_000_000 as u64) + .saturating_add(RocksDbWeight::get().reads(25 as u64)) + .saturating_add(RocksDbWeight::get().writes(13 as u64)) + } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + // Storage: FastUnstake Head (r:1 w:1) + // Storage: FastUnstake Queue (r:2 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasStakers (r:1344 w:0) + /// The range of component `x` is `[672, 86016]`. + fn on_idle_check(x: u32, ) -> Weight { + Weight::from_ref_time(0 as u64) + // Standard Error: 244_000 + .saturating_add(Weight::from_ref_time(13_913_000 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(585 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:1) + // Storage: FastUnstake Queue (r:1 w:1) + // Storage: FastUnstake Head (r:1 w:0) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: BagsList ListNodes (r:1 w:1) + // Storage: BagsList ListBags (r:1 w:1) + // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + fn register_fast_unstake() -> Weight { + Weight::from_ref_time(57_000_000 as u64) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) + .saturating_add(RocksDbWeight::get().writes(9 as u64)) + } + // Storage: Staking Ledger (r:1 w:0) + // Storage: FastUnstake Queue (r:1 w:1) + // Storage: FastUnstake Head (r:1 w:0) + // Storage: FastUnstake CounterForQueue (r:1 w:1) + fn deregister() -> Weight { + Weight::from_ref_time(15_000_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) + } + // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) + fn control() -> Weight { + Weight::from_ref_time(3_000_000 as u64) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) + } +} diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index 040a9fa8e828e..c31bcb1546ecd 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -52,12 +52,6 @@ pub trait Config: pub struct Pallet(Pools); -fn min_create_bond() -> BalanceOf { - MinCreateBond::::get() - .max(T::StakingInterface::minimum_bond()) - .max(CurrencyOf::::minimum_balance()) -} - fn create_funded_user_with_balance( string: &'static str, n: u32, @@ -220,7 +214,7 @@ impl ListScenario { frame_benchmarking::benchmarks! { join { - let origin_weight = min_create_bond::() * 2u32.into(); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; @@ -246,7 +240,7 @@ frame_benchmarking::benchmarks! { } bond_extra_transfer { - let origin_weight = min_create_bond::() * 2u32.into(); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; let extra = scenario.dest_weight - origin_weight; @@ -261,7 +255,7 @@ frame_benchmarking::benchmarks! { } bond_extra_reward { - let origin_weight = min_create_bond::() * 2u32.into(); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let scenario = ListScenario::::new(origin_weight, true)?; let extra = (scenario.dest_weight - origin_weight).max(CurrencyOf::::minimum_balance()); @@ -279,7 +273,7 @@ frame_benchmarking::benchmarks! { } claim_payout { - let origin_weight = min_create_bond::() * 2u32.into(); + let origin_weight = Pools::::depositor_min_bond() * 2u32.into(); let ed = CurrencyOf::::minimum_balance(); let (depositor, pool_account) = create_pool_account::(0, origin_weight); let reward_account = Pools::::create_reward_account(1); @@ -309,7 +303,7 @@ frame_benchmarking::benchmarks! { unbond { // The weight the nominator will start at. The value used here is expected to be // significantly higher than the first position in a list (e.g. the first bag threshold). - let origin_weight = min_create_bond::() * 200u32.into(); + let origin_weight = Pools::::depositor_min_bond() * 200u32.into(); let scenario = ListScenario::::new(origin_weight, false)?; let amount = origin_weight - scenario.dest_weight; @@ -340,7 +334,7 @@ frame_benchmarking::benchmarks! { pool_withdraw_unbonded { let s in 0 .. MAX_SPANS; - let min_create_bond = min_create_bond::(); + let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Add a new member @@ -382,7 +376,7 @@ frame_benchmarking::benchmarks! { withdraw_unbonded_update { let s in 0 .. MAX_SPANS; - let min_create_bond = min_create_bond::(); + let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Add a new member @@ -428,7 +422,7 @@ frame_benchmarking::benchmarks! { withdraw_unbonded_kill { let s in 0 .. MAX_SPANS; - let min_create_bond = min_create_bond::(); + let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); @@ -493,14 +487,14 @@ frame_benchmarking::benchmarks! { } create { - let min_create_bond = min_create_bond::(); + let min_create_bond = Pools::::depositor_min_bond(); let depositor: T::AccountId = account("depositor", USER_SEED, 0); let depositor_lookup = T::Lookup::unlookup(depositor.clone()); // Give the depositor some balance to bond CurrencyOf::::make_free_balance_be(&depositor, min_create_bond * 2u32.into()); - // Make sure no pools exist as a pre-condition for our verify checks + // Make sure no Pools exist at a pre-condition for our verify checks assert_eq!(RewardPools::::count(), 0); assert_eq!(BondedPools::::count(), 0); @@ -540,7 +534,7 @@ frame_benchmarking::benchmarks! { let n in 1 .. T::MaxNominations::get(); // Create a pool - let min_create_bond = min_create_bond::() * 2u32.into(); + let min_create_bond = Pools::::depositor_min_bond() * 2u32.into(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); // Create some accounts to nominate. For the sake of benchmarking they don't need to be @@ -577,7 +571,7 @@ frame_benchmarking::benchmarks! { set_state { // Create a pool - let min_create_bond = min_create_bond::(); + let min_create_bond = Pools::::depositor_min_bond(); let (depositor, pool_account) = create_pool_account::(0, min_create_bond); BondedPools::::mutate(&1, |maybe_pool| { // Force the pool into an invalid state @@ -595,7 +589,7 @@ frame_benchmarking::benchmarks! { let n in 1 .. ::MaxMetadataLen::get(); // Create a pool - let (depositor, pool_account) = create_pool_account::(0, min_create_bond::() * 2u32.into()); + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); // Create metadata of the max possible size let metadata: Vec = (0..n).map(|_| 42).collect(); @@ -624,7 +618,7 @@ frame_benchmarking::benchmarks! { update_roles { let first_id = pallet_nomination_pools::LastPoolId::::get() + 1; - let (root, _) = create_pool_account::(0, min_create_bond::() * 2u32.into()); + let (root, _) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); let random: T::AccountId = account("but is anything really random in computers..?", 0, USER_SEED); }:_( RuntimeOrigin::Signed(root.clone()), @@ -646,7 +640,7 @@ frame_benchmarking::benchmarks! { chill { // Create a pool - let (depositor, pool_account) = create_pool_account::(0, min_create_bond::() * 2u32.into()); + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into()); // Nominate with the pool. let validators: Vec<_> = (0..T::MaxNominations::get()) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index dfe4bf1931068..28d10ce573401 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -2184,10 +2184,11 @@ impl Pallet { /// /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former /// is coming from the staking pallet and the latter two are configured in this pallet. - fn depositor_min_bond() -> BalanceOf { + pub fn depositor_min_bond() -> BalanceOf { T::StakingInterface::minimum_bond() .max(MinCreateBond::::get()) .max(MinJoinBond::::get()) + .max(T::Currency::minimum_balance()) } /// Remove everything related to the given bonded pool. /// diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 7b8d14164d63f..5074a7ffa695a 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -2771,7 +2771,7 @@ mod unbond { #[test] fn partial_unbond_era_tracking() { - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().ed(1).build_and_execute(|| { // to make the depositor capable of withdrawing. StakingMinBond::set(1); MinCreateBond::::set(1); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 56fcb1965bc13..df568d6b596ba 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -563,7 +563,7 @@ impl StakingLedger { /// /// This calls `Config::OnStakerSlash::on_slash` with information as to how the slash was /// applied. - fn slash( + pub fn slash( &mut self, slash_amount: BalanceOf, minimum_balance: BalanceOf, diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index abb8649169fdd..399f50aaed865 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -653,10 +653,10 @@ impl Pallet { #[cfg(feature = "runtime-benchmarks")] pub fn add_era_stakers( current_era: EraIndex, - controller: T::AccountId, + stash: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &controller, &exposure); + >::insert(¤t_era, &stash, &exposure); } #[cfg(feature = "runtime-benchmarks")] diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index d113c0bb2243c..4db3870c62d8b 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -504,6 +504,7 @@ pub mod pallet { /// Slashing spans for stash accounts. #[pallet::storage] + #[pallet::getter(fn slashing_spans)] #[pallet::unbounded] pub(crate) type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; @@ -656,8 +657,8 @@ pub mod pallet { EraPaid(EraIndex, BalanceOf, BalanceOf), /// The nominator has been rewarded by this amount. \[stash, amount\] Rewarded(T::AccountId, BalanceOf), - /// One validator (and its nominators) has been slashed by the given amount. - /// \[validator, amount\] + /// One staker (and potentially its nominators) has been slashed by the given amount. + /// \[staker, amount\] Slashed(T::AccountId, BalanceOf), /// An old slashing report from a prior era was discarded because it could /// not be processed. \[session_index\] From 397da2753081b3cbe4cbb0893f17b4188f0d5997 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 23 Sep 2022 12:42:36 +0200 Subject: [PATCH 164/348] bench: Use `_` instead of `::` in auto-generated file names (#12332) * Replace :: with _ in auto-generated file names Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi --- frame/benchmarking/README.md | 2 +- .../benchmarking-cli/src/pallet/command.rs | 15 ++++++------ .../benchmarking-cli/src/pallet/writer.rs | 24 ++++++++++++++----- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/frame/benchmarking/README.md b/frame/benchmarking/README.md index 6316cd5903c8b..76673c5f69b33 100644 --- a/frame/benchmarking/README.md +++ b/frame/benchmarking/README.md @@ -185,7 +185,7 @@ Then you can run a benchmark like so: ``` This will output a file `pallet_name.rs` which implements the `WeightInfo` trait you should include -in your pallet. Each blockchain should generate their own benchmark file with their custom +in your pallet. Double colons `::` will be replaced with a `_` in the output name if you specify a directory. Each blockchain should generate their own benchmark file with their custom implementation of the `WeightInfo` trait. This means that you will be able to use these modular Substrate pallets while still keeping your network safe for your specific configuration and requirements. diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 0fc7cc4d783f7..6870ec386d23d 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -228,7 +228,7 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); for (pallet, extrinsic, components) in benchmarks_to_run { - log::info!( + println!( "Starting benchmark: {}::{}", String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), @@ -376,7 +376,7 @@ impl PalletCmd { if let Ok(elapsed) = timer.elapsed() { if elapsed >= time::Duration::from_secs(5) { timer = time::SystemTime::now(); - log::info!( + println!( "Running Benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), @@ -398,16 +398,17 @@ impl PalletCmd { // are together. let batches: Vec = combine_batches(batches, batches_db); - // Create the weights.rs file. - if let Some(output_path) = &self.output { - writer::write_results(&batches, &storage_info, &component_ranges, output_path, self)?; - } - // Jsonify the result and write it to a file or stdout if desired. if !self.jsonify(&batches)? { // Print the summary only if `jsonify` did not write to stdout. self.print_summary(&batches, &storage_info) } + + // Create the weights.rs file. + if let Some(output_path) = &self.output { + writer::write_results(&batches, &storage_info, &component_ranges, output_path, self)?; + } + Ok(()) } diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index a601c09eb5033..cd52ffc329d1c 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -24,6 +24,7 @@ use std::{ }; use inflector::Inflector; +use itertools::Itertools; use serde::Serialize; use crate::{pallet::command::ComponentRange, shared::UnderscoreHelper, PalletCmd}; @@ -314,18 +315,21 @@ pub(crate) fn write_results( // Organize results by pallet into a JSON map let all_results = map_results(batches, storage_info, component_ranges, &analysis_choice)?; + let mut created_files = Vec::new(); + for ((pallet, instance), results) in all_results.iter() { let mut file_path = path.clone(); // If a user only specified a directory... if file_path.is_dir() { + // Start with "path/to/pallet_name". + let mut file_name = pallet.clone(); // Check if there might be multiple instances benchmarked. if all_results.keys().any(|(p, i)| p == pallet && i != instance) { - // Create new file: "path/to/pallet_name_instance_name.rs". - file_path.push(pallet.clone() + "_" + instance.to_snake_case().as_str()); - } else { - // Create new file: "path/to/pallet_name.rs". - file_path.push(pallet.clone()); + // Append "_instance_name". + file_name = format!("{}_{}", file_name, instance.to_snake_case()); } + // "mod::pallet_name.rs" becomes "mod_pallet_name.rs". + file_path.push(file_name.replace("::", "_")); file_path.set_extension("rs"); } @@ -342,10 +346,18 @@ pub(crate) fn write_results( benchmarks: results.clone(), }; - let mut output_file = fs::File::create(file_path)?; + let mut output_file = fs::File::create(&file_path)?; handlebars .render_template_to_write(&template, &hbs_data, &mut output_file) .map_err(|e| io_error(&e.to_string()))?; + println!("Created file: {:?}", &file_path); + created_files.push(file_path); + } + + for file in created_files.iter().duplicates() { + // This can happen when there are multiple instances of a pallet deployed + // and `--output` forces the output of all instances into the same file. + println!("Multiple benchmarks were written to the same file: {:?}.", file); } Ok(()) } From 8161ee1aa1ccd122150e40a8aef23df3d0a16d02 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Fri, 23 Sep 2022 16:30:55 +0200 Subject: [PATCH 165/348] [Feature] Sequential migration execution for try-runtime (#12319) * [Feature] Sequential migration execution for try-runtime * remove unused * guards * reinstate encode/decode * proper feature gate * proper test feature gate * Update frame/support/src/traits/hooks.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/support/src/traits/hooks.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix tests * redo Tuple tests * Update frame/support/src/traits/hooks.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * use parameter_types for testing * lint fix * Update frame/support/src/traits/hooks.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> * Update frame/support/src/traits/hooks.rs Co-authored-by: Oliver Tale-Yazdi * eloquent feature gate * redo tests * more fixes * properly handle pre/post errors * remove some tests and fix the others * add format import * import fix * more import fixes Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> --- frame/executive/src/lib.rs | 8 -- frame/support/src/traits/hooks.rs | 169 +++++++++++++++++++----------- 2 files changed, 106 insertions(+), 71 deletions(-) diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 01b71aab745d5..a41c82da5757c 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -287,15 +287,7 @@ where /// /// This should only be used for testing. pub fn try_runtime_upgrade() -> Result { - // ensure both `pre_upgrade` and `post_upgrade` won't change the storage root - let state = { - let _guard = frame_support::StorageNoopGuard::default(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::pre_upgrade().unwrap() - }; let weight = Self::execute_on_runtime_upgrade(); - let _guard = frame_support::StorageNoopGuard::default(); - <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::post_upgrade(state) - .unwrap(); Ok(weight) } } diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index eb1106e4c383f..3415682c0b382 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -22,7 +22,7 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_runtime::traits::AtLeast32BitUnsigned; use sp_std::prelude::*; -#[cfg(feature = "try-runtime")] +#[cfg(all(feature = "try-runtime", test))] use codec::{Decode, Encode}; /// The block initialization trait. @@ -165,6 +165,7 @@ pub trait OnRuntimeUpgrade { #[cfg_attr(all(feature = "tuples-96", not(feature = "tuples-128")), impl_for_tuples(96))] #[cfg_attr(feature = "tuples-128", impl_for_tuples(128))] impl OnRuntimeUpgrade for Tuple { + #[cfg(not(feature = "try-runtime"))] fn on_runtime_upgrade() -> Weight { let mut weight = Weight::zero(); for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); @@ -172,20 +173,42 @@ impl OnRuntimeUpgrade for Tuple { } #[cfg(feature = "try-runtime")] + /// We are executing pre- and post-checks sequentially in order to be able to test several + /// consecutive migrations for the same pallet without errors. Therefore pre and post upgrade + /// hooks for tuples are a noop. + fn on_runtime_upgrade() -> Weight { + use scale_info::prelude::format; + + let mut weight = Weight::zero(); + // migration index in the tuple, start with 1 for better readability + let mut i = 1; + for_tuples!( #( + let _guard = frame_support::StorageNoopGuard::default(); + // we want to panic if any checks fail right here right now. + let state = Tuple::pre_upgrade().expect(&format!("PreUpgrade failed for migration #{}", i)); + drop(_guard); + + weight = weight.saturating_add(Tuple::on_runtime_upgrade()); + + let _guard = frame_support::StorageNoopGuard::default(); + // we want to panic if any checks fail right here right now. + Tuple::post_upgrade(state).expect(&format!("PostUpgrade failed for migration #{}", i)); + drop(_guard); + + i += 1; + )* ); + weight + } + + #[cfg(feature = "try-runtime")] + /// noop fn pre_upgrade() -> Result, &'static str> { - let mut state: Vec> = Vec::default(); - for_tuples!( #( state.push(Tuple::pre_upgrade()?); )* ); - Ok(state.encode()) + Ok(Vec::new()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let state: Vec> = Decode::decode(&mut state.as_slice()) - .expect("the state parameter should be the same as pre_upgrade generated"); - let mut state_iter = state.into_iter(); - for_tuples!( #( Tuple::post_upgrade( - state_iter.next().expect("the state parameter should be the same as pre_upgrade generated") - )?; )* ); + /// noop + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { Ok(()) } } @@ -342,7 +365,9 @@ mod tests { #[test] fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + use sp_io::TestExternalities; struct Test; + impl OnInitialize for Test { fn on_initialize(_n: u8) -> Weight { Weight::from_ref_time(10) @@ -354,8 +379,10 @@ mod tests { } } - assert_eq!(<(Test, Test)>::on_initialize(0), Weight::from_ref_time(20)); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), Weight::from_ref_time(40)); + TestExternalities::default().execute_with(|| { + assert_eq!(<(Test, Test)>::on_initialize(0), Weight::from_ref_time(20)); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), Weight::from_ref_time(40)); + }); } #[test] @@ -416,91 +443,107 @@ mod tests { #[cfg(feature = "try-runtime")] #[test] + #[allow(dead_code)] fn on_runtime_upgrade_tuple() { + use frame_support::parameter_types; + use sp_io::TestExternalities; + struct Test1; struct Test2; struct Test3; + parameter_types! { + static Test1Assertions: u8 = 0; + static Test2Assertions: u8 = 0; + static Test3Assertions: u8 = 0; + static EnableSequentialTest: bool = false; + static SequentialAssertions: u8 = 0; + } + impl OnRuntimeUpgrade for Test1 { fn pre_upgrade() -> Result, &'static str> { Ok("Test1".encode()) } fn post_upgrade(state: Vec) -> Result<(), &'static str> { let s: String = Decode::decode(&mut state.as_slice()).unwrap(); + Test1Assertions::mutate(|val| *val += 1); + if EnableSequentialTest::get() { + SequentialAssertions::mutate(|val| *val += 1); + } assert_eq!(s, "Test1"); Ok(()) } } + impl OnRuntimeUpgrade for Test2 { fn pre_upgrade() -> Result, &'static str> { Ok(100u32.encode()) } fn post_upgrade(state: Vec) -> Result<(), &'static str> { let s: u32 = Decode::decode(&mut state.as_slice()).unwrap(); + Test2Assertions::mutate(|val| *val += 1); + if EnableSequentialTest::get() { + assert_eq!(SequentialAssertions::get(), 1); + SequentialAssertions::mutate(|val| *val += 1); + } assert_eq!(s, 100); Ok(()) } } + impl OnRuntimeUpgrade for Test3 { fn pre_upgrade() -> Result, &'static str> { Ok(true.encode()) } fn post_upgrade(state: Vec) -> Result<(), &'static str> { let s: bool = Decode::decode(&mut state.as_slice()).unwrap(); + Test3Assertions::mutate(|val| *val += 1); + if EnableSequentialTest::get() { + assert_eq!(SequentialAssertions::get(), 2); + SequentialAssertions::mutate(|val| *val += 1); + } assert_eq!(s, true); Ok(()) } } - type TestEmpty = (); - let origin_state = ::pre_upgrade().unwrap(); - let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); - assert!(states.is_empty()); - ::post_upgrade(origin_state).unwrap(); - - type Test1Tuple = (Test1,); - let origin_state = ::pre_upgrade().unwrap(); - let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); - assert_eq!(states.len(), 1); - assert_eq!( - ::decode(&mut states[0].as_slice()).unwrap(), - "Test1".to_owned() - ); - ::post_upgrade(origin_state).unwrap(); - - type Test123 = (Test1, Test2, Test3); - let origin_state = ::pre_upgrade().unwrap(); - let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); - assert_eq!( - ::decode(&mut states[0].as_slice()).unwrap(), - "Test1".to_owned() - ); - assert_eq!(::decode(&mut states[1].as_slice()).unwrap(), 100u32); - assert_eq!(::decode(&mut states[2].as_slice()).unwrap(), true); - ::post_upgrade(origin_state).unwrap(); - - type Test321 = (Test3, Test2, Test1); - let origin_state = ::pre_upgrade().unwrap(); - let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); - assert_eq!(::decode(&mut states[0].as_slice()).unwrap(), true); - assert_eq!(::decode(&mut states[1].as_slice()).unwrap(), 100u32); - assert_eq!( - ::decode(&mut states[2].as_slice()).unwrap(), - "Test1".to_owned() - ); - ::post_upgrade(origin_state).unwrap(); - - type TestNested123 = (Test1, (Test2, Test3)); - let origin_state = ::pre_upgrade().unwrap(); - let states: Vec> = Decode::decode(&mut origin_state.as_slice()).unwrap(); - assert_eq!( - ::decode(&mut states[0].as_slice()).unwrap(), - "Test1".to_owned() - ); - // nested state for (Test2, Test3) - let nested_states: Vec> = Decode::decode(&mut states[1].as_slice()).unwrap(); - assert_eq!(::decode(&mut nested_states[0].as_slice()).unwrap(), 100u32); - assert_eq!(::decode(&mut nested_states[1].as_slice()).unwrap(), true); - ::post_upgrade(origin_state).unwrap(); + TestExternalities::default().execute_with(|| { + type TestEmpty = (); + let origin_state = ::pre_upgrade().unwrap(); + assert!(origin_state.is_empty()); + ::post_upgrade(origin_state).unwrap(); + + type Test1Tuple = (Test1,); + let origin_state = ::pre_upgrade().unwrap(); + assert!(origin_state.is_empty()); + ::post_upgrade(origin_state).unwrap(); + assert_eq!(Test1Assertions::get(), 0); + ::on_runtime_upgrade(); + assert_eq!(Test1Assertions::take(), 1); + + type Test321 = (Test3, Test2, Test1); + ::on_runtime_upgrade(); + assert_eq!(Test1Assertions::take(), 1); + assert_eq!(Test2Assertions::take(), 1); + assert_eq!(Test3Assertions::take(), 1); + + // enable sequential tests + EnableSequentialTest::mutate(|val| *val = true); + + type Test123 = (Test1, Test2, Test3); + ::on_runtime_upgrade(); + assert_eq!(Test1Assertions::take(), 1); + assert_eq!(Test2Assertions::take(), 1); + assert_eq!(Test3Assertions::take(), 1); + + // reset assertions + SequentialAssertions::take(); + + type TestNested123 = (Test1, (Test2, Test3)); + ::on_runtime_upgrade(); + assert_eq!(Test1Assertions::take(), 1); + assert_eq!(Test2Assertions::take(), 1); + assert_eq!(Test3Assertions::take(), 1); + }); } } From b34936a38731c7f70536f7ce2502024d0d573424 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Fri, 23 Sep 2022 16:42:24 +0200 Subject: [PATCH 166/348] [Fix] parameter_types! dead code errors (#12340) * [Fix] parameter_types! dead code errors * mark the whole impl allow(unused) --- frame/support/src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 57f29e592b242..51aa05261dac3 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -361,9 +361,9 @@ macro_rules! parameter_types { } }; (IMPL_STORAGE $name:ident, $type:ty, $value:expr $(, $ty_params:ident)*) => { + #[allow(unused)] impl< $($ty_params),* > $name< $($ty_params),* > { /// Returns the key for this parameter type. - #[allow(unused)] pub fn key() -> [u8; 16] { $crate::sp_core_hashing_proc_macro::twox_128!(b":", $name, b":") } @@ -372,7 +372,6 @@ macro_rules! parameter_types { /// /// This needs to be executed in an externalities provided /// environment. - #[allow(unused)] pub fn set(value: &$type) { $crate::storage::unhashed::put(&Self::key(), value); } @@ -448,6 +447,7 @@ macro_rules! parameter_types_impl_thread_local { } /// Mutate the internal value in place. + #[allow(unused)] pub fn mutate R>(mutate: F) -> R{ let mut current = Self::get(); let result = mutate(&mut current); @@ -456,6 +456,7 @@ macro_rules! parameter_types_impl_thread_local { } /// Get current value and replace with initial value of the parameter type. + #[allow(unused)] pub fn take() -> $type { let current = Self::get(); Self::set($value); From ffcd5232a89b556dcf558db429c6c3d9a020cc27 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 23 Sep 2022 19:51:57 +0200 Subject: [PATCH 167/348] Independence of Slot-based algorithms from system Timestamp (#12224) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove timestamp from SlotInfo * Expose as millis instead of secs * Nits * Fix test after field removal * Yet another test fix * On the fly timestamp computation * Removed slot timestamp from logs * Removed reference to timestamp from slots subsystem * Slot based algorithm tests do not require timstamp inherent anymore * Remove junk files * Further tests cleanup * Trigger pipeline * Apply code suggestions * Trigger pipeline Co-authored-by: André Silva --- Cargo.lock | 1 - bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 4 +- client/consensus/aura/src/lib.rs | 26 ++++++------- client/consensus/babe/src/tests.rs | 22 +++++------ client/consensus/slots/Cargo.toml | 1 - client/consensus/slots/src/lib.rs | 56 +++++++++------------------ client/consensus/slots/src/slots.rs | 14 +------ primitives/timestamp/src/lib.rs | 25 ++++++------ 9 files changed, 56 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1349ce488fa7..74784eb9a6c24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8246,7 +8246,6 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-timestamp", "substrate-test-runtime-client", "thiserror", ] diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index caa01761636df..6ec9a33749a69 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -126,7 +126,7 @@ pub fn new_partial( slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), @@ -269,7 +269,7 @@ pub fn new_full(mut config: Configuration) -> Result slot_duration, ); - Ok((timestamp, slot)) + Ok((slot, timestamp)) }, force_authoring, backoff_authoring_blocks, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index f2b4feb58075f..a3098eac6402f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -223,7 +223,7 @@ pub fn new_partial( let uncles = sp_authorship::InherentDataProvider::<::Header>::check_inherents(); - Ok((timestamp, slot, uncles)) + Ok((slot, timestamp, uncles)) }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), @@ -453,7 +453,7 @@ pub fn new_full_base( &parent, )?; - Ok((timestamp, slot, uncles, storage_proof)) + Ok((slot, timestamp, uncles, storage_proof)) } }, force_authoring, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 0bdc663815051..c538200bb315c 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -569,7 +569,7 @@ mod tests { traits::{Block as BlockT, Header as _}, Digest, }; - use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; + use sp_timestamp::Timestamp; use std::{ task::Poll, time::{Duration, Instant}, @@ -579,6 +579,8 @@ mod tests { TestClient, }; + const SLOT_DURATION_MS: u64 = 1000; + type Error = sp_blockchain::Error; struct DummyFactory(Arc); @@ -619,8 +621,6 @@ mod tests { } } - const SLOT_DURATION: u64 = 1000; - type AuraVerifier = import_queue::AuraVerifier< PeersFullClient, AuthorityPair, @@ -628,7 +628,7 @@ mod tests { dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + InherentDataProviders = (InherentDataProvider,), >, >, >; @@ -648,17 +648,15 @@ mod tests { let client = client.as_client(); let slot_duration = slot_duration(&*client).expect("slot duration available"); - assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION); + assert_eq!(slot_duration.as_millis() as u64, SLOT_DURATION_MS); import_queue::AuraVerifier::new( client, Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), CheckForEquivocation::Yes, None, @@ -736,13 +734,12 @@ mod tests { sync_oracle: DummyOracle, justification_sync_link: (), create_inherent_data_providers: |_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - Ok((timestamp, slot)) + Ok((slot,)) }, force_authoring: false, backoff_authoring_blocks: Some( @@ -875,7 +872,6 @@ mod tests { let res = executor::block_on(worker.on_slot(SlotInfo { slot: 0.into(), - timestamp: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index a3467e0020200..58f5e7b8eb6d4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -43,7 +43,7 @@ use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; +use sp_timestamp::Timestamp; use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; @@ -68,6 +68,8 @@ type Mutator = Arc; type BabeBlockImport = PanickingBlockImport>>; +const SLOT_DURATION_MS: u64 = 1000; + #[derive(Clone)] struct DummyFactory { client: Arc, @@ -239,7 +241,7 @@ pub struct TestVerifier { dyn CreateInherentDataProviders< TestBlock, (), - InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + InherentDataProviders = (InherentDataProvider,), >, >, >, @@ -321,13 +323,11 @@ impl TestNetFactory for BabeTestNet { client: client.clone(), select_chain: longest_chain, create_inherent_data_providers: Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), @@ -433,13 +433,11 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static env: environ, sync_oracle: DummyOracle, create_inherent_data_providers: Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); let slot = InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - SlotDuration::from_millis(6000), + Timestamp::current(), + SlotDuration::from_millis(SLOT_DURATION_MS), ); - - Ok((timestamp, slot)) + Ok((slot,)) }), force_authoring: false, backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 208e31971257d..fae499ad7c7c6 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -31,7 +31,6 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } -sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index b9f8c03f2ac88..7c5d5d4a73bc1 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -42,7 +42,6 @@ use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; -use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; /// The changes that need to applied to the storage to create the state for a block. @@ -252,7 +251,7 @@ pub trait SimpleSlotWorker { where Self: Sync, { - let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); + let slot = slot_info.slot; let telemetry = self.telemetry(); let logging_target = self.logging_target(); @@ -316,23 +315,14 @@ pub trait SimpleSlotWorker { return None } - debug!( - target: logging_target, - "Starting authorship at slot {}; timestamp = {}", slot, *timestamp, - ); + debug!(target: logging_target, "Starting authorship at slot: {slot}"); - telemetry!( - telemetry; - CONSENSUS_DEBUG; - "slots.starting_authorship"; - "slot_num" => *slot, - "timestamp" => *timestamp, - ); + telemetry!(telemetry; CONSENSUS_DEBUG; "slots.starting_authorship"; "slot_num" => slot); let proposer = match self.proposer(&slot_info.chain_head).await { Ok(p) => p, Err(err) => { - warn!(target: logging_target, "Unable to author block in slot {:?}: {}", slot, err,); + warn!(target: logging_target, "Unable to author block in slot {slot:?}: {err}"); telemetry!( telemetry; @@ -440,44 +430,35 @@ impl + Send + Sync, B: BlockT> /// Slot specific extension that the inherent data provider needs to implement. pub trait InherentDataProviderExt { - /// The current timestamp that will be found in the - /// [`InherentData`](`sp_inherents::InherentData`). - fn timestamp(&self) -> Timestamp; - /// The current slot that will be found in the [`InherentData`](`sp_inherents::InherentData`). fn slot(&self) -> Slot; } /// Small macro for implementing `InherentDataProviderExt` for inherent data provider tuple. macro_rules! impl_inherent_data_provider_ext_tuple { - ( T, S $(, $TN:ident)* $( , )?) => { - impl InherentDataProviderExt for (T, S, $($TN),*) + ( S $(, $TN:ident)* $( , )?) => { + impl InherentDataProviderExt for (S, $($TN),*) where - T: Deref, S: Deref, { - fn timestamp(&self) -> Timestamp { - *self.0.deref() - } - fn slot(&self) -> Slot { - *self.1.deref() + *self.0.deref() } } } } -impl_inherent_data_provider_ext_tuple!(T, S); -impl_inherent_data_provider_ext_tuple!(T, S, A); -impl_inherent_data_provider_ext_tuple!(T, S, A, B); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I); -impl_inherent_data_provider_ext_tuple!(T, S, A, B, C, D, E, F, G, H, I, J); +impl_inherent_data_provider_ext_tuple!(S); +impl_inherent_data_provider_ext_tuple!(S, A); +impl_inherent_data_provider_ext_tuple!(S, A, B); +impl_inherent_data_provider_ext_tuple!(S, A, B, C); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I); +impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J); /// Start a new slot worker. /// @@ -806,7 +787,6 @@ mod test { super::slots::SlotInfo { slot: slot.into(), duration: SLOT_DURATION, - timestamp: Default::default(), inherent_data: Default::default(), ends_at: Instant::now() + SLOT_DURATION, chain_head: Header::new( diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index accf24b6b4e78..f3dc485a8e819 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -50,8 +50,6 @@ pub fn time_until_next_slot(slot_duration: Duration) -> Duration { pub struct SlotInfo { /// The slot number as found in the inherent data. pub slot: Slot, - /// Current timestamp as found in the inherent data. - pub timestamp: sp_timestamp::Timestamp, /// The instant at which the slot ends. pub ends_at: Instant, /// The inherent data. @@ -72,7 +70,6 @@ impl SlotInfo { /// `ends_at` is calculated using `timestamp` and `duration`. pub fn new( slot: Slot, - timestamp: sp_timestamp::Timestamp, inherent_data: InherentData, duration: Duration, chain_head: B::Header, @@ -80,7 +77,6 @@ impl SlotInfo { ) -> Self { Self { slot, - timestamp, inherent_data, duration, chain_head, @@ -175,7 +171,6 @@ where ); } - let timestamp = inherent_data_providers.timestamp(); let slot = inherent_data_providers.slot(); let inherent_data = inherent_data_providers.create_inherent_data()?; @@ -183,14 +178,7 @@ where if slot > self.last_slot { self.last_slot = slot; - break Ok(SlotInfo::new( - slot, - timestamp, - inherent_data, - self.slot_duration, - chain_head, - None, - )) + break Ok(SlotInfo::new(slot, inherent_data, self.slot_duration, chain_head, None)) } } } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index b98a87c37f69d..d88b1839babe6 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -56,6 +56,17 @@ impl Timestamp { pub fn checked_sub(self, other: Self) -> Option { self.0.checked_sub(other.0).map(Self) } + + /// The current timestamp using the system time. + #[cfg(feature = "std")] + pub fn current() -> Self { + use std::time::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is always after unix epoch; qed") + .into() + } } impl sp_std::ops::Deref for Timestamp { @@ -165,18 +176,6 @@ impl TimestampInherentData for InherentData { } } -/// The current timestamp using the system time. -/// -/// This timestamp is the time since the UNIX epoch. -#[cfg(feature = "std")] -fn current_timestamp() -> std::time::Duration { - use std::time::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .expect("Current time is always after unix epoch; qed") -} - /// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] pub struct InherentDataProvider { @@ -190,7 +189,7 @@ impl InherentDataProvider { pub fn from_system_time() -> Self { Self { max_drift: std::time::Duration::from_secs(60).into(), - timestamp: current_timestamp().into(), + timestamp: Timestamp::current(), } } From fb779212ca6b59bd158d72deeab2502cb9670cca Mon Sep 17 00:00:00 2001 From: Frederik Gartenmeister Date: Sat, 24 Sep 2022 08:59:44 +0200 Subject: [PATCH 168/348] Const impls of base arithmetics for `Weights` with `u64` (#12322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Const impls * Adding explainatory comments * Update primitives/weights/src/weight_v2.rs Doc comment suggestions Co-authored-by: Bastian Köcher * Update primitives/weights/src/weight_v2.rs Doc comment suggestions Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- primitives/weights/src/weight_v2.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index a4e4da4f8a7b3..af0f469ebaaeb 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -160,6 +160,34 @@ impl Weight { Self { ref_time: 0 } } + /// Constant version of Add with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn add(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time + scalar } + } + + /// Constant version of Sub with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn sub(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time - scalar } + } + + /// Constant version of Div with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn div(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time / scalar } + } + + /// Constant version of Mul with u64. + /// + /// Is only overflow safe when evaluated at compile-time. + pub const fn mul(self, scalar: u64) -> Self { + Self { ref_time: self.ref_time * scalar } + } + /// Returns true if any of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn any_gt(self, other: Self) -> bool { From 9476d2157a0d266677dee144a8a7bc44a6b821e1 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Sat, 24 Sep 2022 17:17:44 +0200 Subject: [PATCH 169/348] Add base-weight to `System::Extrinsic*` events (#12329) * Add base-weight to events Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- bin/node/executor/tests/basic.rs | 13 +- frame/system/src/extensions/check_weight.rs | 12 +- frame/system/src/lib.rs | 8 +- frame/system/src/mock.rs | 1 + frame/system/src/tests.rs | 312 +++++++++++--------- 5 files changed, 198 insertions(+), 148 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 99a9b83596acf..fc4e138faafc2 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -311,10 +311,19 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - let transfer_weight = default_transfer_call().get_dispatch_info().weight; + let transfer_weight = default_transfer_call().get_dispatch_info().weight.saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic, + ); let timestamp_weight = pallet_timestamp::Call::set:: { now: Default::default() } .get_dispatch_info() - .weight; + .weight + .saturating_add( + ::BlockWeights::get() + .get(DispatchClass::Mandatory) + .base_extrinsic, + ); executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 466231b8455ec..15a88913cd337 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -342,7 +342,7 @@ mod tests { .get(DispatchClass::Operational) .max_total .unwrap_or_else(|| weights.max_block); - let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; + let base_weight = weights.get(DispatchClass::Operational).base_extrinsic; let weight = operational_limit - base_weight; let okay = @@ -378,11 +378,11 @@ mod tests { // Max normal is 768 (75%) // 10 is taken for block execution weight // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) - // And Operational can be 256 to produce a full block (-5 for base) + // And Operational can be 246 to produce a full block (-10 for base) let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -406,7 +406,7 @@ mod tests { let max_normal = DispatchInfo { weight: Weight::from_ref_time(753), ..Default::default() }; let rest_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; @@ -414,7 +414,7 @@ mod tests { let len = 0_usize; assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - // Extra 15 here from block execution + base extrinsic weight + // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); @@ -433,7 +433,7 @@ mod tests { ..Default::default() }; let dispatch_operational = DispatchInfo { - weight: Weight::from_ref_time(251), + weight: Weight::from_ref_time(246), class: DispatchClass::Operational, ..Default::default() }; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 36360f8fae2c2..dc74157da79de 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1509,9 +1509,15 @@ impl Pallet { } /// To be called immediately after an extrinsic has been applied. + /// + /// Emits an `ExtrinsicSuccess` or `ExtrinsicFailed` event depending on the outcome. + /// The emitted event contains the post-dispatch corrected weight including + /// the base-weight for its dispatch class. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { - info.weight = extract_actual_weight(r, &info); + info.weight = extract_actual_weight(r, &info) + .saturating_add(T::BlockWeights::get().get(info.class).base_extrinsic); info.pays_fee = extract_actual_pays_fee(r, &info); + Self::deposit_event(match r { Ok(_) => Event::ExtrinsicSuccess { dispatch_info: info }, Err(err) => { diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 1c0511787eb76..b6fc121612050 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -67,6 +67,7 @@ parameter_types! { weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT); }) .for_class(DispatchClass::Operational, |weights| { + weights.base_extrinsic = Weight::from_ref_time(10); weights.max_total = Some(MAX_BLOCK_WEIGHT); weights.reserved = Some( MAX_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAX_BLOCK_WEIGHT diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 92563f4ad1747..c42131c450228 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -169,6 +169,10 @@ fn deposit_event_should_work() { }] ); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + System::reset_events(); System::initialize(&2, &[0u8; 32].into(), &Default::default()); System::deposit_event(SysEvent::NewAccount { account: 32 }); @@ -194,14 +198,17 @@ fn deposit_event_should_work() { }, EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { dispatch_info: Default::default() }.into(), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } + } + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: SysEvent::ExtrinsicFailed { dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: Default::default() + dispatch_info: DispatchInfo { weight: normal_base, ..Default::default() } } .into(), topics: vec![] @@ -223,6 +230,9 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { System::initialize(&1, &[0u8; 32].into(), &Default::default()); System::note_finished_initialize(); + let normal_base = ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; let pre_info = DispatchInfo { weight: Weight::from_ref_time(1000), ..Default::default() }; System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); @@ -267,144 +277,168 @@ fn deposit_event_uses_actual_weight_and_pays_fee() { }), pre_info, ); + // Also works for operational. + let operational_base = ::BlockWeights::get() + .get(DispatchClass::Operational) + .base_extrinsic; + assert!(normal_base != operational_base, "Test pre-condition violated"); + let pre_info = DispatchInfo { + weight: Weight::from_ref_time(1000), + class: DispatchClass::Operational, + ..Default::default() + }; + System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(300), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(3), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(4), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(5), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(6), - event: SysEvent::ExtrinsicSuccess { - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(500), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(7), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(999), - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(8), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(1000), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(9), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::Yes, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - EventRecord { - phase: Phase::ApplyExtrinsic(10), - event: SysEvent::ExtrinsicFailed { - dispatch_error: DispatchError::BadOrigin.into(), - dispatch_info: DispatchInfo { - weight: Weight::from_ref_time(800), - pays_fee: Pays::No, - ..Default::default() - }, - } - .into(), - topics: vec![] - }, - ] - ); + let got = System::events(); + let want = vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(3), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(4), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(5), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(6), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(500).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(7), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(999).saturating_add(normal_base), + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(8), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(1000).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(9), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::Yes, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(10), + event: SysEvent::ExtrinsicFailed { + dispatch_error: DispatchError::BadOrigin.into(), + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(800).saturating_add(normal_base), + pays_fee: Pays::No, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(11), + event: SysEvent::ExtrinsicSuccess { + dispatch_info: DispatchInfo { + weight: Weight::from_ref_time(300).saturating_add(operational_base), + class: DispatchClass::Operational, + ..Default::default() + }, + } + .into(), + topics: vec![], + }, + ]; + for (i, event) in want.into_iter().enumerate() { + assert_eq!(got[i], event, "Event mismatch at index {}", i); + } }); } From d0214e7c77de639d60301d682ced7e155def15da Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Sat, 24 Sep 2022 21:32:35 +0200 Subject: [PATCH 170/348] re add the migration checks for staking (#12330) Co-authored-by: parity-processbot <> --- frame/staking/src/migrations.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index f47545af694cf..8f37ae30dd056 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -35,6 +35,11 @@ pub mod v12 { impl OnRuntimeUpgrade for MigrateToV12 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { + frame_support::ensure!( + StorageVersion::::get() == Releases::V11_0_0, + "Expected v11 before upgrading to v12" + ); + frame_support::ensure!( T::HistoryDepth::get() == HistoryDepth::::get(), "Provided value of HistoryDepth should be same as the existing storage value" @@ -129,6 +134,11 @@ pub mod v11 { #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == crate::Releases::V11_0_0, + "wrong version after the upgrade" + ); + let old_pallet_name = N::get(); let new_pallet_name =

::name(); From 4219b3ab45ff2201eb8aecbbb444f9ae055f528e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michael=20M=C3=BCller?= Date: Sun, 25 Sep 2022 11:24:35 +0200 Subject: [PATCH 171/348] Allow specifying immediate finalize for `manual-seal` (#12106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Alexander Theißen --- client/consensus/manual-seal/src/lib.rs | 54 +++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index c5dd169e281f2..4672e7275a56b 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -247,6 +247,60 @@ pub async fn run_instant_seal( .await } +/// Runs the background authorship task for the instant seal engine. +/// instant-seal creates a new block for every transaction imported into +/// the transaction pool. +/// +/// This function will finalize the block immediately as well. If you don't +/// want this behavior use `run_instant_seal` instead. +pub async fn run_instant_seal_and_finalize( + InstantSealParams { + block_import, + env, + client, + pool, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, + P: Send + Sync + 'static, +{ + // Creates and finalizes blocks as soon as transactions are imported + // into the transaction pool. + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: true, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await +} + #[cfg(test)] mod tests { use super::*; From badc92ac20dbf006595a6af9418da9942527cbd7 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sun, 25 Sep 2022 23:22:54 +0200 Subject: [PATCH 172/348] Removed OuterCall alias & doc fixes (#12349) --- frame/alliance/src/lib.rs | 2 +- frame/collective/src/lib.rs | 6 +++--- .../election-provider-multi-phase/src/unsigned.rs | 14 +++++++------- frame/multisig/src/benchmarking.rs | 2 +- frame/ranked-collective/src/lib.rs | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 82c7e21dba3af..2ef6718538122 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -243,7 +243,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable + From> diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index e934924033552..ae68ae2fe3e16 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -180,10 +180,10 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config { - /// The outer origin type. + /// The runtime origin type. type RuntimeOrigin: From>; - /// The outer call dispatch type. + /// The runtime call dispatch type. type Proposal: Parameter + Dispatchable< RuntimeOrigin = >::RuntimeOrigin, @@ -191,7 +191,7 @@ pub mod pallet { > + From> + GetDispatchInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index cf8df237bafb0..833f80c90d13e 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -856,7 +856,7 @@ mod tests { use crate::{ mock::{ roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall as OuterCall, RuntimeOrigin, System, + MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, UnsignedPhase, }, CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, @@ -1070,8 +1070,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: witness(), }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1096,8 +1096,8 @@ mod tests { raw_solution: Box::new(solution.clone()), witness: correct_witness, }; - let outer_call: OuterCall = call.into(); - let _ = outer_call.dispatch(RuntimeOrigin::none()); + let runtime_call: RuntimeCall = call.into(); + let _ = runtime_call.dispatch(RuntimeOrigin::none()); }) } @@ -1560,7 +1560,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); let call = extrinsic.call; - assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned { .. }))); + assert!(matches!(call, RuntimeCall::MultiPhase(Call::submit_unsigned { .. }))); }) } @@ -1577,7 +1577,7 @@ mod tests { let encoded = pool.read().transactions[0].clone(); let extrinsic = Extrinsic::decode(&mut &*encoded).unwrap(); let call = match extrinsic.call { - OuterCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, + RuntimeCall::MultiPhase(call @ Call::submit_unsigned { .. }) => call, _ => panic!("bad call: unexpected submission"), }; diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 8d0651002305b..c0b0097b07236 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -41,7 +41,7 @@ fn setup_multi( signatories.push(signatory); } signatories.sort(); - // Must first convert to outer call type. + // Must first convert to runtime call type. let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); let call_data = OpaqueCall::::from_encoded(call.encode()); diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index bdd5d26373980..fa3a473fe7d73 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -361,7 +361,7 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - /// The outer event type. + /// The runtime event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; From fbd7e5aa99e20b7019576ecf598bca2d7019cddd Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Mon, 26 Sep 2022 08:05:05 +0200 Subject: [PATCH 173/348] [Enhancement] Remove optional Pool subscription from fast-unstake (#12344) * [Enhancement] Remove optional Pool subscription from fast-unstake * remove nomination-pools pallet dependency * fixes * more fixes * more fixes * more fixes --- Cargo.lock | 1 - frame/fast-unstake/Cargo.toml | 2 - frame/fast-unstake/src/benchmarking.rs | 30 +-- frame/fast-unstake/src/lib.rs | 95 ++------ frame/fast-unstake/src/mock.rs | 50 +--- frame/fast-unstake/src/tests.rs | 302 +++++++------------------ frame/fast-unstake/src/types.rs | 3 - 7 files changed, 105 insertions(+), 378 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 74784eb9a6c24..6ea79a120361e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5729,7 +5729,6 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "pallet-nomination-pools", "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 1fa118dba4a8d..69aeaff35993c 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -28,7 +28,6 @@ sp-staking = { default-features = false, path = "../../primitives/staking" } pallet-balances = { default-features = false, path = "../balances" } pallet-timestamp = { default-features = false, path = "../timestamp" } pallet-staking = { default-features = false, path = "../staking" } -pallet-nomination-pools = { default-features = false, path = "../nomination-pools" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -55,7 +54,6 @@ std = [ "sp-std/std", "pallet-staking/std", - "pallet-nomination-pools/std", "pallet-balances/std", "pallet-timestamp/std", "frame-election-provider-support/std", diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 68a3da0d40af3..5690d5ce6f29f 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -26,7 +26,6 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, Hooks}, }; use frame_system::RawOrigin; -use pallet_nomination_pools::{Pallet as Pools, PoolId}; use pallet_staking::Pallet as Staking; use sp_runtime::traits::{StaticLookup, Zero}; use sp_staking::EraIndex; @@ -76,25 +75,6 @@ pub(crate) fn fast_unstake_events() -> Vec> { .collect::>() } -fn setup_pool() -> PoolId { - let depositor = frame_benchmarking::account::("depositor_42", 0, USER_SEED); - let depositor_lookup = l::(depositor.clone()); - - let stake = Pools::::depositor_min_bond(); - CurrencyOf::::make_free_balance_be(&depositor, stake * 10u32.into()); - - Pools::::create( - RawOrigin::Signed(depositor.clone()).into(), - stake, - depositor_lookup.clone(), - depositor_lookup.clone(), - depositor_lookup, - ) - .unwrap(); - - pallet_nomination_pools::LastPoolId::::get() -} - fn setup_staking(v: u32, until: EraIndex) { let ed = CurrencyOf::::minimum_balance(); @@ -131,10 +111,8 @@ benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { let who = create_unexposed_nominator::(); - let pool_id = setup_pool::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - Some(pool_id) )); ErasToCheckPerBlock::::put(1); @@ -143,7 +121,7 @@ benchmarks! { on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), maybe_pool_id: Some(pool_id) }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) ); } : { @@ -172,7 +150,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None, )); // no one is queued thus far. @@ -185,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked, maybe_pool_id: None }) + Some(UnstakeRequest { stash: who.clone(), checked }) ); assert!(matches!( fast_unstake_events::().last(), @@ -199,7 +176,7 @@ benchmarks! { assert_eq!(Queue::::count(), 0); } - :_(RawOrigin::Signed(who.clone()), None) + :_(RawOrigin::Signed(who.clone())) verify { assert_eq!(Queue::::count(), 1); } @@ -208,7 +185,6 @@ benchmarks! { let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), - None )); assert_eq!(Queue::::count(), 1); whitelist_account!(who); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 51416808f48c8..5acc9940debf1 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -19,8 +19,7 @@ //! //! If a nominator is not exposed in any `ErasStakers` (i.e. "has not actively backed any //! validators in the last `BondingDuration` days"), then they can register themselves in this -//! pallet, unstake faster than having to wait an entire bonding duration, and potentially move -//! into a nomination pool. +//! pallet, unstake faster than having to wait an entire bonding duration. //! //! Appearing in the exposure of a validator means being exposed equal to that validator from the //! point of view of the staking system. This usually means earning rewards with the validator, and @@ -43,8 +42,7 @@ //! to prevent them from accidentally exposing themselves behind a validator etc. //! //! Once processed, if successful, no additional fee for the checking process is taken, and the -//! staker is instantly unbonded. Optionally, if they have asked to join a pool, their *entire* -//! stake is joined into their pool of choice. +//! staker is instantly unbonded. //! //! If unsuccessful, meaning that the staker was exposed sometime in the last `BondingDuration` eras //! they will end up being slashed for the amount of wasted work they have inflicted on the chian. @@ -85,7 +83,6 @@ pub mod pallet { use frame_election_provider_support::ElectionProvider; use frame_support::pallet_prelude::*; use frame_system::{pallet_prelude::*, RawOrigin}; - use pallet_nomination_pools::PoolId; use pallet_staking::Pallet as Staking; use sp_runtime::{ traits::{Saturating, Zero}, @@ -109,12 +106,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: - frame_system::Config - + pallet_staking::Config< - CurrencyBalance = ::CurrencyBalance, - > + pallet_nomination_pools::Config - { + pub trait Config: frame_system::Config + pallet_staking::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent> @@ -139,10 +131,9 @@ pub mod pallet { /// The map of all accounts wishing to be unstaked. /// - /// Points the `AccountId` wishing to unstake to the optional `PoolId` they wish to join - /// thereafter. + /// Keeps track of `AccountId` wishing to unstake. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, Option>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; /// Number of eras to check per block. /// @@ -158,7 +149,7 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// A staker was unstaked. - Unstaked { stash: T::AccountId, maybe_pool_id: Option, result: DispatchResult }, + Unstaked { stash: T::AccountId, result: DispatchResult }, /// A staker was slashed for requesting fast-unstake whilst being exposed. Slashed { stash: T::AccountId, amount: BalanceOf }, /// A staker was partially checked for the given eras, but the process did not finish. @@ -213,16 +204,13 @@ pub mod pallet { /// they are guaranteed to remain eligible, because the call will chill them as well. /// /// If the check works, the entire staking data is removed, i.e. the stash is fully - /// unstaked, and they potentially join a pool with their entire bonded stake. + /// unstaked. /// /// If the check fails, the stash remains chilled and waiting for being unbonded as in with /// the normal staking system, but they lose part of their unbonding chunks due to consuming /// the chain's resources. #[pallet::weight(::WeightInfo::register_fast_unstake())] - pub fn register_fast_unstake( - origin: OriginFor, - maybe_pool_id: Option, - ) -> DispatchResult { + pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; let ledger = @@ -243,12 +231,11 @@ pub mod pallet { Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; // enqueue them. - Queue::::insert(ledger.stash, maybe_pool_id); + Queue::::insert(ledger.stash, ()); Ok(()) } - /// Deregister oneself from the fast-unstake (also cancels joining the pool if that was - /// supplied on `register_fast_unstake` . + /// Deregister oneself from the fast-unstake. /// /// This is useful if one is registered, they are still waiting, and they change their mind. /// @@ -327,17 +314,12 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked, maybe_pool_id } = match Head::::take() - .or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, maybe_pool_id)| UnstakeRequest { - stash, - maybe_pool_id, - checked: Default::default(), - }) - .next() - }) { + let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) + .next() + }) { None => { // There's no `Head` and nothing in the `Queue`, nothing to do here. return T::DbWeight::get().reads(4) @@ -392,48 +374,15 @@ pub mod pallet { // `stash` is not exposed in any era now -- we can let go of them now. let num_slashing_spans = Staking::::slashing_spans(&stash).iter().count() as u32; - let ctrl = match pallet_staking::Bonded::::get(&stash) { - Some(ctrl) => ctrl, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let ledger = match pallet_staking::Ledger::::get(ctrl) { - Some(ledger) => ledger, - None => { - Self::deposit_event(Event::::Errored { stash }); - return ::WeightInfo::on_idle_unstake() - }, - }; - - let unstake_result = pallet_staking::Pallet::::force_unstake( + let result = pallet_staking::Pallet::::force_unstake( RawOrigin::Root.into(), stash.clone(), num_slashing_spans, ); - let pool_stake_result = if let Some(pool_id) = maybe_pool_id { - pallet_nomination_pools::Pallet::::join( - RawOrigin::Signed(stash.clone()).into(), - ledger.total, - pool_id, - ) - } else { - Ok(()) - }; + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); - let result = unstake_result.and(pool_stake_result); - log!( - info, - "unstaked {:?}, maybe_pool {:?}, outcome: {:?}", - stash, - maybe_pool_id, - result - ); - - Self::deposit_event(Event::::Unstaked { stash, maybe_pool_id, result }); + Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -471,11 +420,7 @@ pub mod pallet { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { - stash: stash.clone(), - checked, - maybe_pool_id, - }); + Head::::put(UnstakeRequest { stash: stash.clone(), checked }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index b9cf16e18e8d1..62f343709e245 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -17,19 +17,10 @@ use crate::{self as fast_unstake}; use frame_support::{ - assert_ok, - pallet_prelude::*, - parameter_types, - traits::{ConstU64, ConstU8, Currency}, - weights::constants::WEIGHT_PER_SECOND, - PalletId, -}; -use sp_runtime::{ - traits::{Convert, IdentityLookup}, - FixedU128, + pallet_prelude::*, parameter_types, traits::ConstU64, weights::constants::WEIGHT_PER_SECOND, }; +use sp_runtime::traits::{Convert, IdentityLookup}; -use frame_system::RawOrigin; use pallet_staking::{Exposure, IndividualExposure, StakerStatus}; use sp_std::prelude::*; @@ -153,7 +144,7 @@ impl pallet_staking::Config for Runtime { type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; - type OnStakerSlash = Pools; + type OnStakerSlash = (); type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; type WeightInfo = (); } @@ -172,29 +163,6 @@ impl Convert for U256ToBalance { } } -parameter_types! { - pub const PostUnbondingPoolsWindow: u32 = 10; - pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); - pub static MaxMetadataLen: u32 = 10; - pub static CheckLevel: u8 = 255; -} - -impl pallet_nomination_pools::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - type Currency = Balances; - type CurrencyBalance = Balance; - type RewardCounter = FixedU128; - type BalanceToU256 = BalanceToU256; - type U256ToBalance = U256ToBalance; - type StakingInterface = Staking; - type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; - type MaxMetadataLen = MaxMetadataLen; - type MaxUnbonding = ConstU32<8>; - type MaxPointsToBalance = ConstU8<10>; - type PalletId = PoolsPalletId; -} - parameter_types! { pub static SlashPerEra: u32 = 100; } @@ -218,7 +186,6 @@ frame_support::construct_runtime!( Timestamp: pallet_timestamp, Balances: pallet_balances, Staking: pallet_staking, - Pools: pallet_nomination_pools, FastUnstake: fast_unstake, } ); @@ -287,10 +254,6 @@ impl ExtBuilder { let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // create one default pool. - let _ = pallet_nomination_pools::GenesisConfig:: { ..Default::default() } - .assimilate_storage(&mut storage); - let validators_range = VALIDATOR_PREFIX..VALIDATOR_PREFIX + VALIDATORS_PER_ERA; let nominators_range = NOMINATOR_PREFIX..NOMINATOR_PREFIX + NOMINATORS_PER_VALIDATOR_PER_ERA; @@ -337,11 +300,6 @@ impl ExtBuilder { // because we read this value as a measure of how many validators we have. pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); - - // make a pool - let amount_to_bond = Pools::depositor_min_bond(); - Balances::make_free_balance_be(&10, amount_to_bond * 5); - assert_ok!(Pools::create(RawOrigin::Signed(10).into(), amount_to_bond, 900, 901, 902)); }); ext } @@ -359,14 +317,12 @@ pub(crate) fn run_to_block(n: u64, on_idle: bool) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Staking::on_finalize(System::block_number()); - Pools::on_finalize(System::block_number()); FastUnstake::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); Balances::on_initialize(System::block_number()); Staking::on_initialize(System::block_number()); - Pools::on_initialize(System::block_number()); FastUnstake::on_initialize(System::block_number()); if on_idle { FastUnstake::on_idle(System::block_number(), BlockWeights::get().max_block); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index a51c1acdf06eb..5586443ce797c 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -20,20 +20,15 @@ use super::*; use crate::{mock::*, types::*, weights::WeightInfo, Event}; use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; -use pallet_nomination_pools::{BondedPools, LastPoolId, RewardPools}; use pallet_staking::{CurrentEra, IndividualExposure, RewardDestination}; -use sp_runtime::{traits::BadOrigin, DispatchError, ModuleError}; +use sp_runtime::traits::BadOrigin; use sp_staking::StakingInterface; #[test] fn test_setup_works() { ExtBuilder::default().build_and_execute(|| { - assert_eq!(BondedPools::::count(), 1); - assert_eq!(RewardPools::::count(), 1); assert_eq!(Staking::bonding_duration(), 3); - let last_pool = LastPoolId::::get(); - assert_eq!(last_pool, 1); }); } @@ -41,7 +36,7 @@ fn test_setup_works() { fn register_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. assert_ne!(Queue::::get(1), None); }); @@ -56,7 +51,7 @@ fn cannot_register_if_not_bonded() { } // Attempt to fast unstake. assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(1)), Error::::NotController ); }); @@ -66,10 +61,10 @@ fn cannot_register_if_not_bonded() { fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { // Insert some Queue item - Queue::::insert(1, Some(1_u32)); + Queue::::insert(1, ()); // Cannot re-register, already in queue assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyQueued ); }); @@ -79,10 +74,10 @@ fn cannot_register_if_in_queue() { fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to regsiter assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::AlreadyHead ); }); @@ -95,7 +90,7 @@ fn cannot_register_if_has_unlocking_chunks() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active assert_noop!( - FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32)), + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), Error::::NotFullyBonded ); }); @@ -105,7 +100,7 @@ fn cannot_register_if_has_unlocking_chunks() { fn deregister_works() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); // Ensure stash no longer exists in the queue. @@ -117,7 +112,7 @@ fn deregister_works() { fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { // Controller account registers for fast unstake. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(1)), Error::::NotController); }); @@ -135,9 +130,9 @@ fn cannot_deregister_if_not_queued() { fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { // Controller attempts to register, should fail - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![], maybe_pool_id: None }); + Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -169,15 +164,15 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // set up Queue item - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_eq!(Queue::::get(1), Some(())); }); } @@ -189,8 +184,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -209,7 +204,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // when: another 1 era. @@ -225,11 +220,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when: then 5 eras, we only need 2 more. @@ -251,11 +242,7 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: not enough weight to unstake: @@ -267,11 +254,7 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -287,7 +270,7 @@ mod on_idle { // then we finish the unbonding: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }] + vec![Event::Unstaked { stash: 1, result: Ok(()) }] ); assert_eq!(Head::::get(), None,); @@ -302,11 +285,11 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8), None)); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -317,11 +300,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); assert_eq!(Queue::::count(), 4); @@ -338,11 +317,7 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 5, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }), + Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), ); assert_eq!(Queue::::count(), 3); @@ -350,7 +325,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 5, eras: vec![3, 2, 1, 0] } ] ); @@ -364,10 +339,10 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register multi accounts for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1))); - assert_eq!(Queue::::get(1), Some(Some(1))); - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4), Some(1))); - assert_eq!(Queue::::get(3), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_eq!(Queue::::get(3), Some(())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -397,9 +372,9 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 1, result: Ok(()) }, Event::Checking { stash: 3, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 3, maybe_pool_id: Some(1), result: Ok(()) }, + Event::Unstaked { stash: 3, result: Ok(()) }, ] ); @@ -409,14 +384,14 @@ mod on_idle { } #[test] - fn successful_unstake_without_pool_join() { + fn successful_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -427,11 +402,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -441,55 +412,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } - ] - ); - assert_unstaked(&1); - }); - } - - #[test] - fn successful_unstake_joining_bad_pool() { - ExtBuilder::default().build_and_execute(|| { - ErasToCheckPerBlock::::put(BondingDuration::get() + 1); - CurrentEra::::put(BondingDuration::get()); - - // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(0))); - assert_eq!(Queue::::get(1), Some(Some(0))); - - // process on idle - next_block(true); - - // assert queue item has been moved to head - assert_eq!(Queue::::get(1), None); - - // assert head item present - assert_eq!( - Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(0) - }) - ); - - next_block(true); - assert_eq!(Head::::get(), None,); - - assert_eq!( - fast_unstake_events_since_last_call(), - vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { - stash: 1, - maybe_pool_id: Some(0), - result: Err(DispatchError::Module(ModuleError { - index: 4, - error: [0, 0, 0, 0], - message: None - })) - } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -503,8 +426,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -515,11 +438,7 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -529,11 +448,10 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -545,8 +463,8 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); - assert_eq!(Queue::::get(1), Some(Some(1))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); // process on idle next_block(true); @@ -557,40 +475,28 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); @@ -604,11 +510,10 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -623,39 +528,31 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); - assert_eq!(Queue::::get(1), Some(None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(Queue::::get(1), Some(())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) ); // when: a new era happens right before one is free. @@ -670,7 +567,6 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], - maybe_pool_id: None }) ); @@ -685,7 +581,7 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![1] }, Event::Checking { stash: 1, eras: vec![0] }, Event::Checking { stash: 1, eras: vec![4] }, - Event::Unstaked { stash: 1, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); @@ -700,23 +596,19 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // register for fast unstake - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), Some(1_u32))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // process 2 blocks next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: Some(1) }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // when @@ -726,21 +618,13 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) ); // then we register a new era. @@ -752,22 +636,14 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 1, - checked: bounded_vec![3, 2, 4, 1], - maybe_pool_id: Some(1) - }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -781,12 +657,11 @@ mod on_idle { Event::Checking { stash: 1, eras: vec![2] }, Event::Checking { stash: 1, eras: vec![4] }, Event::Checking { stash: 1, eras: vec![1] }, - Event::Unstaked { stash: 1, maybe_pool_id: Some(1), result: Ok(()) } + Event::Unstaked { stash: 1, result: Ok(()) } ] ); assert_unstaked(&1); - assert!(pallet_nomination_pools::PoolMembers::::contains_key(&1)); }); } @@ -812,26 +687,18 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -872,17 +739,13 @@ mod on_idle { assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: exposed, - checked: bounded_vec![3, 2], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -909,10 +772,7 @@ mod on_idle { RuntimeOrigin::signed(VALIDATOR_PREFIX), vec![VALIDATOR_PREFIX] )); - assert_ok!(FastUnstake::register_fast_unstake( - RuntimeOrigin::signed(VALIDATOR_PREFIX), - None - )); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); // but they indeed are exposed! assert!(pallet_staking::ErasStakers::::contains_key( @@ -943,17 +803,13 @@ mod on_idle { assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); // let them register: - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(42))); // 2 block's enough to unstake them. next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { - stash: 42, - checked: bounded_vec![3, 2, 1, 0], - maybe_pool_id: None - }) + Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -962,7 +818,7 @@ mod on_idle { fast_unstake_events_since_last_call(), vec![ Event::Checking { stash: 42, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 42, maybe_pool_id: None, result: Ok(()) } + Event::Unstaked { stash: 42, result: Ok(()) } ] ); }); @@ -990,7 +846,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // then // stash can't. @@ -1010,7 +866,7 @@ mod signed_extension { ExtBuilder::default().build_and_execute(|| { // given: stash for 2 is 1. // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2), None)); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); ErasToCheckPerBlock::::put(1); CurrentEra::::put(BondingDuration::get()); @@ -1018,7 +874,7 @@ mod signed_extension { assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3], maybe_pool_id: None }) + Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) ); // then diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index ae8702e56a842..e8d538dce4802 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -23,7 +23,6 @@ use frame_support::{ traits::{Currency, Get, IsSubType}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use pallet_nomination_pools::PoolId; use scale_info::TypeInfo; use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; @@ -42,8 +41,6 @@ pub struct UnstakeRequest, - /// The pool they wish to join, if any. - pub(crate) maybe_pool_id: Option, } #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] From a0ec652e341f694f182a3c5fcc80d4c3fb280003 Mon Sep 17 00:00:00 2001 From: ZhiYong Date: Mon, 26 Sep 2022 15:46:59 +0800 Subject: [PATCH 174/348] Remove discarded blocks and states from database by default (#11983) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 1.Add pruning param "canonical" in sc-cli. 2.Make PruningMode's default value to ArchiveCanonical. * Update tests in sc-state-db. * Update tests in sc-state-db. * 1.Add a new value `AllWithNonFinalized` in `enum BlocksPruning` which Corresponds to `blocks_pruning 0` in CLI . 2.Change value `All` to `AllFinalized` in `enum BlocksPruning` and make it to keep full finalized block history. * Make some corresponding adjustments based on the content in the conversation. * Update client/db/src/lib.rs Co-authored-by: Bastian Köcher * Apply suggestions from code review. * 1.Change `blocks_pruning` to be like `state_pruning` . * Fmt and add some doc. * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update client/cli/src/params/pruning_params.rs Co-authored-by: Bastian Köcher * Update doc. * Change `new_test_with_tx_storage` to take `BlocksPruning`. * Fmt Co-authored-by: Bastian Köcher --- bin/node/cli/benches/block_production.rs | 2 +- bin/node/cli/benches/transaction_pool.rs | 2 +- bin/node/testing/src/bench.rs | 2 +- client/cli/src/config.rs | 4 +- client/cli/src/params/pruning_params.rs | 31 +++- client/db/benches/state_access.rs | 2 +- client/db/src/lib.rs | 215 +++++++++++++++++++---- client/service/test/src/client/mod.rs | 4 +- client/service/test/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 5 +- 10 files changed, 211 insertions(+), 58 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 0a734fa447448..4fcebb123d9e3 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -74,7 +74,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index e6084fba8242a..a8839642ddc26 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -68,7 +68,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 7980cc102fb38..59f1fa94c9b20 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -392,7 +392,7 @@ impl BenchDb { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), - blocks_pruning: sc_client_db::BlocksPruning::All, + blocks_pruning: sc_client_db::BlocksPruning::KeepAll, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index bc5941914de89..fad2ec7bc4a93 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -251,11 +251,11 @@ pub trait CliConfiguration: Sized { /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its - /// `BlocksPruning::All`. + /// `BlocksPruning::KeepFinalized`. fn blocks_pruning(&self) -> Result { self.pruning_params() .map(|x| x.blocks_pruning()) - .unwrap_or_else(|| Ok(BlocksPruning::All)) + .unwrap_or_else(|| Ok(BlocksPruning::KeepFinalized)) } /// Get the chain ID (string). diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 34a0982e63d95..b764e4722e94d 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -30,13 +30,16 @@ pub struct PruningParams { /// or for all of the canonical blocks (i.e 'archive-canonical'). #[clap(alias = "pruning", long, value_name = "PRUNING_MODE")] pub state_pruning: Option, - /// Specify the number of finalized blocks to keep in the database. + /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. /// - /// Default is to keep all blocks. + /// Default is to keep all finalized blocks. + /// otherwise, all blocks can be kept (i.e 'archive'), + /// or for all canonical blocks (i.e 'archive-canonical'), + /// or for the last N blocks (i.e a number). /// /// NOTE: only finalized blocks are subject for removal! #[clap(alias = "keep-blocks", long, value_name = "COUNT")] - pub blocks_pruning: Option, + pub blocks_pruning: Option, } impl PruningParams { @@ -46,9 +49,12 @@ impl PruningParams { .as_ref() .map(|s| match s.as_str() { "archive" => Ok(PruningMode::ArchiveAll), + "archive-canonical" => Ok(PruningMode::ArchiveCanonical), bc => bc .parse() - .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string())) + .map_err(|_| { + error::Error::Input("Invalid state pruning mode specified".to_string()) + }) .map(PruningMode::blocks_pruning), }) .transpose() @@ -56,9 +62,18 @@ impl PruningParams { /// Get the block pruning value from the parameters pub fn blocks_pruning(&self) -> error::Result { - Ok(match self.blocks_pruning { - Some(n) => BlocksPruning::Some(n), - None => BlocksPruning::All, - }) + match self.blocks_pruning.as_ref() { + Some(bp) => match bp.as_str() { + "archive" => Ok(BlocksPruning::KeepAll), + "archive-canonical" => Ok(BlocksPruning::KeepFinalized), + bc => bc + .parse() + .map_err(|_| { + error::Error::Input("Invalid blocks pruning mode specified".to_string()) + }) + .map(BlocksPruning::Some), + }, + None => Ok(BlocksPruning::KeepFinalized), + } } } diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 78aed7858e342..714dda82d61b7 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -122,7 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend trie_cache_maximum_size, state_pruning: Some(PruningMode::ArchiveAll), source: DatabaseSource::ParityDb { path }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, }; Backend::new(settings, 100).expect("Creates backend") diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 79ef7e9b6625d..32c4c9ef85ed9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -320,10 +320,12 @@ pub struct DatabaseSettings { } /// Block pruning settings. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, PartialEq)] pub enum BlocksPruning { - /// Keep full block history. - All, + /// Keep full block history, of every block that was ever imported. + KeepAll, + /// Keep full finalized block history. + KeepFinalized, /// Keep N recent finalized blocks. Some(u32), } @@ -1061,19 +1063,27 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(blocks_pruning, canonicalization_delay) + Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) } /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test_with_tx_storage(blocks_pruning: u32, canonicalization_delay: u64) -> Self { + pub fn new_test_with_tx_storage( + blocks_pruning: BlocksPruning, + canonicalization_delay: u64, + ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); + let state_pruning = match blocks_pruning { + BlocksPruning::KeepAll => PruningMode::ArchiveAll, + BlocksPruning::KeepFinalized => PruningMode::ArchiveCanonical, + BlocksPruning::Some(n) => PruningMode::blocks_pruning(n), + }; let db_setting = DatabaseSettings { trie_cache_maximum_size: Some(16 * 1024 * 1024), - state_pruning: Some(PruningMode::blocks_pruning(blocks_pruning)), + state_pruning: Some(state_pruning), source: DatabaseSource::Custom { db, require_create_flag: true }, - blocks_pruning: BlocksPruning::Some(blocks_pruning), + blocks_pruning, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1707,32 +1717,47 @@ impl Backend { finalized: NumberFor, displaced: &FinalizationOutcome>, ) -> ClientResult<()> { - if let BlocksPruning::Some(blocks_pruning) = self.blocks_pruning { - // Always keep the last finalized block - let keep = std::cmp::max(blocks_pruning, 1); - if finalized >= keep.into() { - let number = finalized.saturating_sub(keep.into()); - self.prune_block(transaction, BlockId::::number(number))?; - } + match self.blocks_pruning { + BlocksPruning::KeepAll => {}, + BlocksPruning::Some(blocks_pruning) => { + // Always keep the last finalized block + let keep = std::cmp::max(blocks_pruning, 1); + if finalized >= keep.into() { + let number = finalized.saturating_sub(keep.into()); + self.prune_block(transaction, BlockId::::number(number))?; + } + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + BlocksPruning::KeepFinalized => { + self.prune_displaced_branches(transaction, finalized, displaced)?; + }, + } + Ok(()) + } - // Also discard all blocks from displaced branches - for h in displaced.leaves() { - let mut number = finalized; - let mut hash = *h; - // Follow displaced chains back until we reach a finalized block. - // Since leaves are discarded due to finality, they can't have parents - // that are canonical, but not yet finalized. So we stop deleting as soon as - // we reach canonical chain. - while self.blockchain.hash(number)? != Some(hash) { - let id = BlockId::::hash(hash); - match self.blockchain.header(id)? { - Some(header) => { - self.prune_block(transaction, id)?; - number = header.number().saturating_sub(One::one()); - hash = *header.parent_hash(); - }, - None => break, - } + fn prune_displaced_branches( + &self, + transaction: &mut Transaction, + finalized: NumberFor, + displaced: &FinalizationOutcome>, + ) -> ClientResult<()> { + // Discard all blocks from displaced branches + for h in displaced.leaves() { + let mut number = finalized; + let mut hash = *h; + // Follow displaced chains back until we reach a finalized block. + // Since leaves are discarded due to finality, they can't have parents + // that are canonical, but not yet finalized. So we stop deleting as soon as + // we reach canonical chain. + while self.blockchain.hash(number)? != Some(hash) { + let id = BlockId::::hash(hash); + match self.blockchain.header(id)? { + Some(header) => { + self.prune_block(transaction, id)?; + number = header.number().saturating_sub(One::one()); + hash = *header.parent_hash(); + }, + None => break, } } } @@ -1752,6 +1777,13 @@ impl Backend { columns::BODY, id, )?; + utils::remove_from_db( + transaction, + &*self.storage.db, + columns::KEY_LOOKUP, + columns::JUSTIFICATIONS, + id, + )?; if let Some(index) = read_db(&*self.storage.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { @@ -2506,7 +2538,7 @@ pub(crate) mod tests { trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, }, 0, ) @@ -3176,7 +3208,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(2, 0); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3210,9 +3242,114 @@ pub(crate) mod tests { assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); } + #[test] + fn prune_blocks_on_finalize_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + for i in 1..3 { + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + } + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + } + + #[test] + fn prune_blocks_on_finalize_with_fork_in_keep_all() { + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // insert a fork at block 2 + let fork_hash_root = insert_block( + &backend, + 2, + blocks[1], + None, + sp_core::H256::random(), + vec![2.into()], + None, + ) + .unwrap(); + insert_block( + &backend, + 3, + fork_hash_root, + None, + H256::random(), + vec![3.into(), 11.into()], + None, + ) + .unwrap(); + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + + for i in 1..5 { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap(); + op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + + assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + + assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + assert_eq!(bc.info().best_number, 4); + for i in 0..5 { + assert!(bc.hash(i).unwrap().is_some()); + } + } + #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3273,7 +3410,7 @@ pub(crate) mod tests { #[test] fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3315,7 +3452,7 @@ pub(crate) mod tests { #[test] fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(1, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3350,7 +3487,7 @@ pub(crate) mod tests { #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); @@ -3397,7 +3534,7 @@ pub(crate) mod tests { #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(2, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..2 { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2ab1415f8ca31..e0f47110d9046 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1200,7 +1200,7 @@ fn doesnt_import_blocks_that_revert_finality() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::ArchiveAll), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepAll, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, @@ -1426,7 +1426,7 @@ fn returns_status_for_pruned_blocks() { DatabaseSettings { trie_cache_maximum_size: Some(1 << 20), state_pruning: Some(PruningMode::blocks_pruning(1)), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, u64::MAX, diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 11c1cbaf7afb1..23245d46cba10 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -237,7 +237,7 @@ fn node_config< database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), - blocks_pruning: BlocksPruning::All, + blocks_pruning: BlocksPruning::KeepFinalized, chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index be4549c9957c0..d3e71f0ad28d6 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -26,7 +26,7 @@ pub use sc_client_api::{ execution_extensions::{ExecutionExtensions, ExecutionStrategies}, BadBlocks, ForkBlocks, }; -pub use sc_client_db::{self, Backend}; +pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; @@ -102,7 +102,8 @@ impl /// Create new `TestClientBuilder` with default backend and storage chain mode pub fn with_tx_storage(blocks_pruning: u32) -> Self { - let backend = Arc::new(Backend::new_test_with_tx_storage(blocks_pruning, 0)); + let backend = + Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0)); Self::with_backend(backend) } } From 0dbeaa0e98e78a848046a815f979bfce67f98c6d Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:08:34 +0100 Subject: [PATCH 175/348] re-export weight file for fast-unstsake pallet (#12352) --- frame/fast-unstake/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 5acc9940debf1..9bfb29f8457fa 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -90,7 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - use weights::WeightInfo; + pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] #[codec(mel_bound(T: Config))] From f360c87073cb5f59b7b55cfbfef5d91a40d1a217 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 26 Sep 2022 15:10:09 +0300 Subject: [PATCH 176/348] Move transactions protocol to its own crate (#12264) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move transaction protocol to its own crate * Update Cargo.lock * Fix binaries * Update client/network/transactions/src/lib.rs Co-authored-by: Dmitry Markin * Update client/service/src/builder.rs Co-authored-by: Bastian Köcher * Apply review comments * Revert one change and apply cargo-fmt * Remove Transaction from Message * Add array-bytes * trigger CI * Add comment about codec index Co-authored-by: Dmitry Markin Co-authored-by: Bastian Köcher --- Cargo.lock | 24 ++ bin/node-template/node/src/service.rs | 3 +- bin/node/cli/src/service.rs | 3 +- client/beefy/Cargo.toml | 1 + client/beefy/src/lib.rs | 4 +- client/cli/Cargo.toml | 1 + client/cli/src/params/network_params.rs | 5 +- client/finality-grandpa/src/lib.rs | 8 +- client/network/common/Cargo.toml | 3 + client/network/common/src/config.rs | 128 +++++++++++ client/network/{ => common}/src/error.rs | 3 +- client/network/common/src/lib.rs | 8 + client/network/common/src/service.rs | 29 --- client/network/{ => common}/src/utils.rs | 0 client/network/src/config.rs | 209 +----------------- client/network/src/discovery.rs | 3 +- client/network/src/lib.rs | 13 +- client/network/src/peer_info.rs | 2 +- client/network/src/protocol.rs | 13 +- client/network/src/protocol/message.rs | 10 +- client/network/src/service.rs | 77 ++----- client/network/src/service/tests.rs | 56 ++--- client/network/test/src/lib.rs | 14 +- client/network/transactions/Cargo.toml | 28 +++ client/network/transactions/src/config.rs | 98 ++++++++ .../src/lib.rs} | 108 ++++----- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 62 ++++-- client/service/src/config.rs | 6 +- client/service/src/error.rs | 3 +- client/service/src/lib.rs | 7 +- client/service/test/src/lib.rs | 7 +- 32 files changed, 466 insertions(+), 471 deletions(-) rename client/network/{ => common}/src/error.rs (96%) rename client/network/{ => common}/src/utils.rs (100%) create mode 100644 client/network/transactions/Cargo.toml create mode 100644 client/network/transactions/src/config.rs rename client/network/{src/transactions.rs => transactions/src/lib.rs} (84%) diff --git a/Cargo.lock b/Cargo.lock index 6ea79a120361e..a9a0eef551179 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -468,6 +468,7 @@ dependencies = [ "sc-finality-grandpa", "sc-keystore", "sc-network", + "sc-network-common", "sc-network-gossip", "sc-network-test", "sc-utils", @@ -7934,6 +7935,7 @@ dependencies = [ "sc-client-db", "sc-keystore", "sc-network", + "sc-network-common", "sc-service", "sc-telemetry", "sc-tracing", @@ -8547,7 +8549,9 @@ dependencies = [ "bitflags", "bytes", "futures", + "futures-timer", "libp2p", + "linked_hash_set", "parity-scale-codec", "prost-build 0.10.4", "sc-consensus", @@ -8558,6 +8562,7 @@ dependencies = [ "sp-consensus", "sp-finality-grandpa", "sp-runtime", + "substrate-prometheus-endpoint", "thiserror", ] @@ -8663,6 +8668,24 @@ dependencies = [ "substrate-test-runtime-client", ] +[[package]] +name = "sc-network-transactions" +version = "0.10.0-dev" +dependencies = [ + "array-bytes", + "futures", + "hex", + "libp2p", + "log", + "parity-scale-codec", + "pin-project", + "sc-network-common", + "sc-peerset", + "sp-consensus", + "sp-runtime", + "substrate-prometheus-endpoint", +] + [[package]] name = "sc-offchain" version = "4.0.0-dev" @@ -8851,6 +8874,7 @@ dependencies = [ "sc-network-common", "sc-network-light", "sc-network-sync", + "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 6ec9a33749a69..96de6e17f3bfd 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -191,7 +191,7 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -238,6 +238,7 @@ pub fn new_full(mut config: Configuration) -> Result rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, + tx_handler_controller, config, telemetry: telemetry.as_mut(), })?; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a3098eac6402f..6c29f0c08ee13 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -354,7 +354,7 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, network_starter) = + let (network, system_rpc_tx, tx_handler_controller, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), @@ -392,6 +392,7 @@ pub fn new_full_base( transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, + tx_handler_controller, telemetry: telemetry.as_mut(), })?; diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 47a3be859cbbb..a125d4c8d4f07 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -27,6 +27,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-finality-grandpa = { version = "0.10.0-dev", path = "../../client/finality-grandpa" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 41eeec43d64bd..ad527b2929585 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -83,8 +83,8 @@ pub(crate) mod beefy_protocol_name { /// For standard protocol name see [`beefy_protocol_name::standard_name`]. pub fn beefy_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { - let mut cfg = sc_network::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); cfg.allow_non_reserved(25, 25); cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e5cd6167596c0..37a8fd2e0b64d 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -34,6 +34,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../service" } sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 74c2db92c3215..0450b5f0e2566 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -19,11 +19,10 @@ use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use clap::Args; use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig}, multiaddr::Protocol, }; +use sc_network_common::config::{NonReservedPeerMode, SetConfig, TransportConfig}; use sc_service::{ config::{Multiaddr, MultiaddrWithPeerId}, ChainSpec, ChainType, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 7e47b70bd6b98..d5c05fea78aa2 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -688,18 +688,18 @@ pub struct GrandpaParams { /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { +) -> sc_network_common::config::NonDefaultSetConfig { use communication::grandpa_protocol_name; - sc_network::config::NonDefaultSetConfig { + sc_network_common::config::NonDefaultSetConfig { notifications_protocol: protocol_name, fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, - set_config: sc_network::config::SetConfig { + set_config: sc_network_common::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, + non_reserved_mode: sc_network_common::config::NonReservedPeerMode::Deny, }, } } diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 47d43e8b4b03f..1ee7b15538366 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -24,7 +24,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" +futures-timer = "3.0.2" libp2p = "0.46.1" +linked_hash_set = "0.1.3" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } smallvec = "1.8.0" sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index 8b7e045780d7d..fb23cd0174922 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -18,6 +18,8 @@ //! Configuration of the networking layer. +use crate::protocol; + use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; @@ -171,3 +173,129 @@ impl From for ParseErr { Self::MultiaddrParse(err) } } + +/// Configuration for a set of nodes. +#[derive(Clone, Debug)] +pub struct SetConfig { + /// Maximum allowed number of incoming substreams related to this set. + pub in_peers: u32, + /// Number of outgoing substreams related to this set that we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically + /// refused. + pub non_reserved_mode: NonReservedPeerMode, +} + +impl Default for SetConfig { + fn default() -> Self { + Self { + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + } + } +} + +/// Extension to [`SetConfig`] for sets that aren't the default set. +/// +/// > **Note**: As new fields might be added in the future, please consider using the `new` method +/// > and modifiers instead of creating this struct manually. +#[derive(Clone, Debug)] +pub struct NonDefaultSetConfig { + /// Name of the notifications protocols of this set. A substream on this set will be + /// considered established once this protocol is open. + /// + /// > **Note**: This field isn't present for the default set, as this is handled internally + /// > by the networking code. + pub notifications_protocol: protocol::ProtocolName, + /// If the remote reports that it doesn't support the protocol indicated in the + /// `notifications_protocol` field, then each of these fallback names will be tried one by + /// one. + /// + /// If a fallback is used, it will be reported in + /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` + pub fallback_names: Vec, + /// Maximum allowed size of single notifications. + pub max_notification_size: u64, + /// Base configuration. + pub set_config: SetConfig, +} + +impl NonDefaultSetConfig { + /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. + pub fn new(notifications_protocol: protocol::ProtocolName, max_notification_size: u64) -> Self { + Self { + notifications_protocol, + max_notification_size, + fallback_names: Vec::new(), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Modifies the configuration to allow non-reserved nodes. + pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { + self.set_config.in_peers = in_peers; + self.set_config.out_peers = out_peers; + self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; + } + + /// Add a node to the list of reserved nodes. + pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { + self.set_config.reserved_nodes.push(peer); + } + + /// Add a list of protocol names used for backward compatibility. + /// + /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. + pub fn add_fallback_names(&mut self, fallback_names: Vec) { + self.fallback_names.extend(fallback_names); + } +} + +/// Configuration for the transport layer. +#[derive(Clone, Debug)] +pub enum TransportConfig { + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in `::sc_network::config::NetworkConfiguration::boot_nodes`. + allow_private_ipv4: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, +} + +/// The policy for connections to non-reserved peers. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum NonReservedPeerMode { + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, +} + +impl NonReservedPeerMode { + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(Self::Accept), + "deny" => Some(Self::Deny), + _ => None, + } + } +} diff --git a/client/network/src/error.rs b/client/network/common/src/error.rs similarity index 96% rename from client/network/src/error.rs rename to client/network/common/src/error.rs index b4287ffbd55db..4326b1af52836 100644 --- a/client/network/src/error.rs +++ b/client/network/common/src/error.rs @@ -18,9 +18,8 @@ //! Substrate network possible errors. -use crate::config::TransportConfig; +use crate::{config::TransportConfig, protocol::ProtocolName}; use libp2p::{Multiaddr, PeerId}; -use sc_network_common::protocol::ProtocolName; use std::fmt; diff --git a/client/network/common/src/lib.rs b/client/network/common/src/lib.rs index 3a30d24900199..36e67f11e5cff 100644 --- a/client/network/common/src/lib.rs +++ b/client/network/common/src/lib.rs @@ -19,8 +19,16 @@ //! Common data structures of the networking layer. pub mod config; +pub mod error; pub mod message; pub mod protocol; pub mod request_responses; pub mod service; pub mod sync; +pub mod utils; + +/// Minimum Requirements for a Hash within Networking +pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} + +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +{} diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index 88583832e4c38..aa4967ba51700 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -604,35 +604,6 @@ where } } -/// Provides ability to propagate transactions over the network. -pub trait NetworkTransaction { - /// You may call this when new transactions are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn trigger_repropagate(&self); - - /// You must call when new transaction is imported by the transaction pool. - /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - fn propagate_transaction(&self, hash: H); -} - -impl NetworkTransaction for Arc -where - T: ?Sized, - T: NetworkTransaction, -{ - fn trigger_repropagate(&self) { - T::trigger_repropagate(self) - } - - fn propagate_transaction(&self, hash: H) { - T::propagate_transaction(self, hash) - } -} - /// Provides ability to announce blocks to the network. pub trait NetworkBlock { /// Make sure an important block is propagated to peers. diff --git a/client/network/src/utils.rs b/client/network/common/src/utils.rs similarity index 100% rename from client/network/src/utils.rs rename to client/network/common/src/utils.rs diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 202a628884d79..b2adfa81d065b 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -27,24 +27,24 @@ pub use sc_network_common::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, sync::warp::WarpSyncProvider, + ExHashT, }; pub use libp2p::{build_multiaddr, core::PublicKey, identity}; -use crate::ExHashT; - use core::{fmt, iter}; -use futures::future; use libp2p::{ identity::{ed25519, Keypair}, multiaddr, Multiaddr, }; use prometheus_endpoint::Registry; use sc_consensus::ImportQueue; -use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName, sync::ChainSync}; +use sc_network_common::{ + config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, + sync::ChainSync, +}; use sp_runtime::traits::Block as BlockT; use std::{ - collections::HashMap, error::Error, fs, future::Future, @@ -52,16 +52,14 @@ use std::{ net::Ipv4Addr, path::{Path, PathBuf}, pin::Pin, - str, sync::Arc, }; use zeroize::Zeroize; /// Network initialization parameters. -pub struct Params +pub struct Params where B: BlockT + 'static, - H: ExHashT, { /// Assigned role for our node (full, light, ...). pub role: Role, @@ -70,21 +68,12 @@ where /// default. pub executor: Option + Send>>) + Send>>, - /// How to spawn the background task dedicated to the transactions handler. - pub transactions_handler_executor: Box + Send>>) + Send>, - /// Network layer configuration. pub network_config: NetworkConfiguration, /// Client that contains the blockchain. pub chain: Arc, - /// Pool of transactions. - /// - /// The network worker will fetch transactions from this object in order to propagate them on - /// the network. - pub transaction_pool: Arc>, - /// Legacy name of the protocol to use on the wire. Should be different for each chain. pub protocol_id: ProtocolId, @@ -166,66 +155,6 @@ impl fmt::Display for Role { } } -/// Result of the transaction import. -#[derive(Clone, Copy, Debug)] -pub enum TransactionImport { - /// Transaction is good but already known by the transaction pool. - KnownGood, - /// Transaction is good and not yet known. - NewGood, - /// Transaction is invalid. - Bad, - /// Transaction import was not performed. - None, -} - -/// Future resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; - -/// Transaction pool interface -pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Get hash of transaction. - fn hash_of(&self, transaction: &B::Extrinsic) -> H; - /// Import a transaction into the pool. - /// - /// This will return future. - fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); - /// Get transaction by hash. - fn transaction(&self, hash: &H) -> Option; -} - -/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always -/// empty and discards all incoming transactions. -/// -/// Requires the "hash" type to implement the `Default` trait. -/// -/// Useful for testing purposes. -pub struct EmptyTransactionPool; - -impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &B::Extrinsic) -> H { - Default::default() - } - - fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { - Box::pin(future::ready(TransactionImport::KnownGood)) - } - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &H) -> Option { - None - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { @@ -394,132 +323,6 @@ impl NetworkConfiguration { } } -/// Configuration for a set of nodes. -#[derive(Clone, Debug)] -pub struct SetConfig { - /// Maximum allowed number of incoming substreams related to this set. - pub in_peers: u32, - /// Number of outgoing substreams related to this set that we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// Whether nodes that aren't in [`SetConfig::reserved_nodes`] are accepted or automatically - /// refused. - pub non_reserved_mode: NonReservedPeerMode, -} - -impl Default for SetConfig { - fn default() -> Self { - Self { - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - } - } -} - -/// Extension to [`SetConfig`] for sets that aren't the default set. -/// -/// > **Note**: As new fields might be added in the future, please consider using the `new` method -/// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] -pub struct NonDefaultSetConfig { - /// Name of the notifications protocols of this set. A substream on this set will be - /// considered established once this protocol is open. - /// - /// > **Note**: This field isn't present for the default set, as this is handled internally - /// > by the networking code. - pub notifications_protocol: ProtocolName, - /// If the remote reports that it doesn't support the protocol indicated in the - /// `notifications_protocol` field, then each of these fallback names will be tried one by - /// one. - /// - /// If a fallback is used, it will be reported in - /// [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. - pub fallback_names: Vec, - /// Maximum allowed size of single notifications. - pub max_notification_size: u64, - /// Base configuration. - pub set_config: SetConfig, -} - -impl NonDefaultSetConfig { - /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - } - } - - /// Modifies the configuration to allow non-reserved nodes. - pub fn allow_non_reserved(&mut self, in_peers: u32, out_peers: u32) { - self.set_config.in_peers = in_peers; - self.set_config.out_peers = out_peers; - self.set_config.non_reserved_mode = NonReservedPeerMode::Accept; - } - - /// Add a node to the list of reserved nodes. - pub fn add_reserved(&mut self, peer: MultiaddrWithPeerId) { - self.set_config.reserved_nodes.push(peer); - } - - /// Add a list of protocol names used for backward compatibility. - /// - /// See the explanations in [`NonDefaultSetConfig::fallback_names`]. - pub fn add_fallback_names(&mut self, fallback_names: Vec) { - self.fallback_names.extend(fallback_names); - } -} - -/// Configuration for the transport layer. -#[derive(Clone, Debug)] -pub enum TransportConfig { - /// Normal transport mode. - Normal { - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - enable_mdns: bool, - - /// If true, allow connecting to private IPv4 addresses (as defined in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in [`NetworkConfiguration::boot_nodes`]. - allow_private_ipv4: bool, - }, - - /// Only allow connections within the same process. - /// Only addresses of the form `/memory/...` will be supported. - MemoryOnly, -} - -/// The policy for connections to non-reserved peers. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, -} - -impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(Self::Accept), - "deny" => Some(Self::Deny), - _ => None, - } - } -} - /// The configuration of a node's secret key, describing the type of key /// and how it is obtained. A node's identity keypair is the result of /// the evaluation of the node key configuration. diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index ab93662968dc2..8422e34485125 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,7 +46,6 @@ //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -use crate::utils::LruHashSet; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -72,7 +71,7 @@ use libp2p::{ }, }; use log::{debug, error, info, trace, warn}; -use sc_network_common::config::ProtocolId; +use sc_network_common::{config::ProtocolId, utils::LruHashSet}; use sp_core::hexdisplay::HexDisplay; use std::{ cmp, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 320104d0f9554..d17f47328b804 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -251,12 +251,9 @@ mod protocol; mod request_responses; mod service; mod transport; -mod utils; pub mod config; -pub mod error; pub mod network_state; -pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; @@ -269,8 +266,8 @@ pub use sc_network_common::{ request_responses::{IfDisconnected, RequestFailure}, service::{ KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkRequest, NetworkSigner, - NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, - NetworkTransaction, Signature, SigningError, + NetworkStateInfo, NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, Signature, + SigningError, }, sync::{ warp::{WarpSyncPhase, WarpSyncProgress}, @@ -295,9 +292,3 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; - -/// Minimum Requirements for a Hash within Networking -pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} - -impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static -{} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index d668cb25ea455..c62c2ea1c5d98 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -16,7 +16,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::{ @@ -33,6 +32,7 @@ use libp2p::{ Multiaddr, }; use log::{debug, error, trace}; +use sc_network_common::utils::interval; use smallvec::SmallVec; use std::{ collections::hash_map::Entry, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c7a3cf4b2160f..fbf651de9d49a 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -16,10 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ - config, error, - utils::{interval, LruHashSet}, -}; +use crate::config; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; @@ -45,7 +42,8 @@ use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, }; use sc_network_common::{ - config::ProtocolId, + config::{NonReservedPeerMode, ProtocolId}, + error, protocol::ProtocolName, request_responses::RequestFailure, sync::{ @@ -57,6 +55,7 @@ use sc_network_common::{ OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PollBlockAnnounceValidation, SyncStatus, }, + utils::{interval, LruHashSet}, }; use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::BlockOrigin; @@ -341,7 +340,7 @@ where bootnodes, reserved_nodes: default_sets_reserved.clone(), reserved_only: network_config.default_peers_set.non_reserved_mode == - config::NonReservedPeerMode::Deny, + NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -352,7 +351,7 @@ where } let reserved_only = - set_cfg.set_config.non_reserved_mode == config::NonReservedPeerMode::Deny; + set_cfg.set_config.non_reserved_mode == NonReservedPeerMode::Deny; sets.push(sc_peerset::SetConfig { in_peers: set_cfg.set_config.in_peers, diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 50c4a264a5f95..3e1281753b82c 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -36,9 +36,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// A set of transactions. -pub type Transactions = Vec; - /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { @@ -59,7 +56,7 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { - use super::{RemoteCallResponse, RemoteReadResponse, Transactions}; + use super::{RemoteCallResponse, RemoteReadResponse}; use bitflags::bitflags; use codec::{Decode, Encode, Input, Output}; use sc_client_api::StorageProof; @@ -146,9 +143,10 @@ pub mod generic { BlockResponse(BlockResponse), /// Block announce. BlockAnnounce(BlockAnnounce

), - /// Transactions. - Transactions(Transactions), /// Consensus protocol message. + // NOTE: index is incremented by 1 due to transaction-related + // message that was removed + #[codec(index = 6)] Consensus(ConsensusMessage), /// Remote method call request. RemoteCallRequest(RemoteCallRequest), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index dceb57d9e695c..180482e75ece2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -29,9 +29,8 @@ use crate::{ behaviour::{self, Behaviour, BehaviourOut}, - config::{Params, TransportConfig}, + config::Params, discovery::DiscoveryConfig, - error::Error, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, @@ -39,7 +38,7 @@ use crate::{ self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready, }, - transactions, transport, ExHashT, ReputationChange, + transport, ReputationChange, }; use codec::Encode as _; @@ -60,7 +59,8 @@ use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, + error::Error, protocol::{ event::{DhtEvent, Event}, ProtocolName, @@ -73,6 +73,7 @@ use sc_network_common::{ NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, sync::{SyncState, SyncStatus}, + ExHashT, }; use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -101,7 +102,7 @@ mod out_events; mod tests; pub use libp2p::identity::{error::DecodingError, Keypair, PublicKey}; -use sc_network_common::service::{NetworkBlock, NetworkRequest, NetworkTransaction}; +use sc_network_common::service::{NetworkBlock, NetworkRequest}; /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { @@ -121,7 +122,7 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, + to_worker: TracingUnboundedSender>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -144,7 +145,7 @@ where /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(mut params: Params) -> Result { + pub fn new(mut params: Params) -> Result { // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -215,21 +216,6 @@ where fs::create_dir_all(path)?; } - let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( - params.protocol_id.clone(), - params - .chain - .hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - params.fork_id.clone(), - ); - params - .network_config - .extra_sets - .insert(0, transactions_handler_proto.set_config()); - info!( target: "sub-libp2p", "🏷 Local node identity is: {}", @@ -244,11 +230,8 @@ where params.protocol_id.clone(), ¶ms.fork_id, ¶ms.network_config, - iter::once(Vec::new()) - .chain( - (0..params.network_config.extra_sets.len() - 1) - .map(|_| default_notif_handshake_message.clone()), - ) + (0..params.network_config.extra_sets.len()) + .map(|_| default_notif_handshake_message.clone()) .collect(), params.metrics_registry.as_ref(), params.chain_sync, @@ -465,13 +448,6 @@ where _marker: PhantomData, }); - let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( - service.clone(), - params.transaction_pool, - params.metrics_registry.as_ref(), - )?; - (params.transactions_handler_executor)(tx_handler.run().boxed()); - Ok(NetworkWorker { external_addresses, num_connected, @@ -482,9 +458,9 @@ where from_service, event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, - tx_handler_controller, metrics, boot_node_ids, + _marker: Default::default(), }) } @@ -1149,20 +1125,6 @@ where } } -impl NetworkTransaction for NetworkService -where - B: BlockT + 'static, - H: ExHashT, -{ - fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); - } - - fn propagate_transaction(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); - } -} - impl NetworkBlock> for NetworkService where B: BlockT + 'static, @@ -1249,9 +1211,7 @@ impl<'a> NotificationSenderReadyT for NotificationSenderReady<'a> { /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg { - PropagateTransaction(H), - PropagateTransactions, +enum ServiceToWorkerMsg { RequestJustification(B::Hash, NumberFor), ClearJustificationRequests, AnnounceBlock(B::Hash, Option>), @@ -1309,7 +1269,7 @@ where /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. - from_service: TracingUnboundedReceiver>, + from_service: TracingUnboundedReceiver>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1319,8 +1279,9 @@ where /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, - /// Controller for the handler of incoming and outgoing transactions. - tx_handler_controller: transactions::TransactionsHandlerController, + /// Marker to pin the `H` generic. Serves no purpose except to not break backwards + /// compatibility. + _marker: PhantomData, } impl Future for NetworkWorker @@ -1376,10 +1337,6 @@ where .behaviour_mut() .user_protocol_mut() .clear_justification_requests(), - ServiceToWorkerMsg::PropagateTransaction(hash) => - this.tx_handler_controller.propagate_transaction(hash), - ServiceToWorkerMsg::PropagateTransactions => - this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.behaviour_mut().get_value(key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1922,8 +1879,6 @@ where SyncState::Downloading => true, }; - this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); - this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index a9505c5341c3d..c8f137f79c6dc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -21,7 +21,7 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, protocol::event::Event, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, }; @@ -135,12 +135,8 @@ fn build_test_full_node( let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(config::EmptyTransactionPool), protocol_id, fork_id, import_queue, @@ -178,23 +174,23 @@ fn build_nodes_one_proto() -> ( let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -203,7 +199,7 @@ fn build_nodes_one_proto() -> ( }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -368,13 +364,13 @@ fn lots_of_incoming_peers_works() { let (main_node, _) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -387,11 +383,11 @@ fn lots_of_incoming_peers_works() { for _ in 0..32 { let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![], - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), peer_id: main_node_peer_id, @@ -399,7 +395,7 @@ fn lots_of_incoming_peers_works() { ..Default::default() }, }], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -504,23 +500,23 @@ fn fallback_name_working() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: NEW_PROTOCOL_NAME.into(), fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![config::NonDefaultSetConfig { + extra_sets: vec![NonDefaultSetConfig { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: config::SetConfig { + set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, peer_id: node1.local_peer_id(), @@ -529,7 +525,7 @@ fn fallback_name_working() { }, }], listen_addresses: vec![], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }); @@ -572,7 +568,7 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -599,7 +595,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, boot_nodes: vec![boot_node], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); @@ -632,11 +628,8 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + transport: TransportConfig::MemoryOnly, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -652,10 +645,7 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - default_peers_set: config::SetConfig { - reserved_nodes: vec![reserved_node], - ..Default::default() - }, + default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -668,7 +658,7 @@ fn ensure_public_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, + transport: TransportConfig::MemoryOnly, public_addresses: vec![public_address], ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index e78b91a4e04ee..2f6b788e368b3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -47,16 +47,14 @@ use sc_consensus::{ ForkChoiceStrategy, ImportResult, JustificationImport, JustificationSyncLink, LongestChain, Verifier, }; -pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - config::{ - NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, Role, SyncMode, - TransportConfig, - }, + config::{NetworkConfiguration, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, + }, protocol::ProtocolName, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, @@ -879,12 +877,8 @@ where let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), network_config, chain: client.clone(), - transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, fork_id, import_queue, diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml new file mode 100644 index 0000000000000..5578bb2c7191e --- /dev/null +++ b/client/network/transactions/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "Substrate transaction protocol" +name = "sc-network-transactions" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2021" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-transactions" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +array-bytes = "4.1" +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } +futures = "0.3.21" +hex = "0.4.0" +libp2p = "0.46.1" +log = "0.4.17" +pin-project = "1.0.10" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/client/network/transactions/src/config.rs b/client/network/transactions/src/config.rs new file mode 100644 index 0000000000000..abb8cccd301ac --- /dev/null +++ b/client/network/transactions/src/config.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Configuration of the transaction protocol + +use futures::prelude::*; +use sc_network_common::ExHashT; +use sp_runtime::traits::Block as BlockT; +use std::{collections::HashMap, future::Future, pin::Pin, time}; + +/// Interval at which we propagate transactions; +pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +pub(crate) const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +pub(crate) const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. +pub(crate) const MAX_PENDING_TRANSACTIONS: usize = 8192; + +/// Result of the transaction import. +#[derive(Clone, Copy, Debug)] +pub enum TransactionImport { + /// Transaction is good but already known by the transaction pool. + KnownGood, + /// Transaction is good and not yet known. + NewGood, + /// Transaction is invalid. + Bad, + /// Transaction import was not performed. + None, +} + +/// Future resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; + +/// Transaction pool interface +pub trait TransactionPool: Send + Sync { + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// This will return future. + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; +} + +/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always +/// empty and discards all incoming transactions. +/// +/// Requires the "hash" type to implement the `Default` trait. +/// +/// Useful for testing purposes. +pub struct EmptyTransactionPool; + +impl TransactionPool for EmptyTransactionPool { + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } + + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { + Box::pin(future::ready(TransactionImport::KnownGood)) + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { + None + } +} diff --git a/client/network/src/transactions.rs b/client/network/transactions/src/lib.rs similarity index 84% rename from client/network/src/transactions.rs rename to client/network/transactions/src/lib.rs index da4547aefeab3..b75bd411b39c4 100644 --- a/client/network/src/transactions.rs +++ b/client/network/transactions/src/lib.rs @@ -26,27 +26,22 @@ //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. -use crate::{ - config::{self, TransactionImport, TransactionImportFuture, TransactionPool}, - error, - protocol::message, - service::NetworkService, - utils::{interval, LruHashSet}, - ExHashT, -}; - +use crate::config::*; use codec::{Decode, Encode}; use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ - config::ProtocolId, + config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, + error, protocol::{ event::{Event, ObservedRole}, ProtocolName, }, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, + utils::{interval, LruHashSet}, + ExHashT, }; use sp_runtime::traits::Block as BlockT; use std::{ @@ -54,27 +49,14 @@ use std::{ iter, num::NonZeroUsize, pin::Pin, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, + sync::Arc, task::Poll, - time, }; -/// Interval at which we propagate transactions; -const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); - -/// Maximum number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. +pub mod config; -/// Maximum allowed size for a transactions notification. -const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; - -/// Maximum number of transaction validation request we keep at any moment. -const MAX_PENDING_TRANSACTIONS: usize = 8192; +/// A set of transactions. +pub type Transactions = Vec; mod rep { use sc_peerset::ReputationChange as Rep; @@ -141,7 +123,7 @@ impl TransactionsHandlerPrototype { pub fn new>( protocol_id: ProtocolId, genesis_hash: Hash, - fork_id: Option, + fork_id: Option<&str>, ) -> Self { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { @@ -158,16 +140,16 @@ impl TransactionsHandlerPrototype { } /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> config::NonDefaultSetConfig { - config::NonDefaultSetConfig { + pub fn set_config(&self) -> NonDefaultSetConfig { + NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, - set_config: config::SetConfig { + set_config: SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), - non_reserved_mode: config::NonReservedPeerMode::Deny, + non_reserved_mode: NonReservedPeerMode::Deny, }, } } @@ -176,23 +158,25 @@ impl TransactionsHandlerPrototype { /// the behaviour of the handler while it's running. /// /// Important: the transactions handler is initially disabled and doesn't gossip transactions. - /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. - pub fn build( + /// Gossiping is enabled when major syncing is done. + pub fn build< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, + >( self, - service: Arc>, + service: S, transaction_pool: Arc>, metrics_registry: Option<&Registry>, - ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { let event_stream = service.event_stream("transactions-handler"); let (to_handler, from_controller) = mpsc::unbounded(); - let gossip_enabled = Arc::new(AtomicBool::new(false)); let handler = TransactionsHandler { protocol_name: self.protocol_name, propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_transactions: FuturesUnordered::new(), pending_transactions_peers: HashMap::new(), - gossip_enabled: gossip_enabled.clone(), service, event_stream, peers: HashMap::new(), @@ -205,7 +189,7 @@ impl TransactionsHandlerPrototype { }, }; - let controller = TransactionsHandlerController { to_handler, gossip_enabled }; + let controller = TransactionsHandlerController { to_handler }; Ok((handler, controller)) } @@ -214,15 +198,9 @@ impl TransactionsHandlerPrototype { /// Controls the behaviour of a [`TransactionsHandler`] it is connected to. pub struct TransactionsHandlerController { to_handler: mpsc::UnboundedSender>, - gossip_enabled: Arc, } impl TransactionsHandlerController { - /// Controls whether transactions are being gossiped on the network. - pub fn set_gossip_enabled(&mut self, enabled: bool) { - self.gossip_enabled.store(enabled, Ordering::Relaxed); - } - /// You may call this when new transactions are imported by the transaction pool. /// /// All transactions will be fetched from the `TransactionPool` that was passed at @@ -246,7 +224,11 @@ enum ToHandler { } /// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. -pub struct TransactionsHandler { +pub struct TransactionsHandler< + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +> { protocol_name: ProtocolName, /// Interval at which we call `propagate_transactions`. propagate_timeout: Pin + Send>>, @@ -258,13 +240,12 @@ pub struct TransactionsHandler { /// multiple times concurrently. pending_transactions_peers: HashMap>, /// Network service to use to send messages and manage peers. - service: Arc>, + service: S, /// Stream of networking events. event_stream: Pin + Send>>, // All connected peers peers: HashMap>, transaction_pool: Arc>, - gossip_enabled: Arc, from_controller: mpsc::UnboundedReceiver>, /// Prometheus metrics. metrics: Option, @@ -278,7 +259,12 @@ struct Peer { role: ObservedRole, } -impl TransactionsHandler { +impl TransactionsHandler +where + B: BlockT + 'static, + H: ExHashT, + S: NetworkPeers + NetworkEventStream + NetworkNotification + sp_consensus::SyncOracle, +{ /// Turns the [`TransactionsHandler`] into a future that should run forever and not be /// interrupted. pub async fn run(mut self) { @@ -360,9 +346,9 @@ impl TransactionsHandler { continue } - if let Ok(m) = as Decode>::decode( - &mut message.as_ref(), - ) { + if let Ok(m) = + as Decode>::decode(&mut message.as_ref()) + { self.on_transactions(remote, m); } else { warn!(target: "sub-libp2p", "Failed to decode transactions list"); @@ -376,10 +362,10 @@ impl TransactionsHandler { } /// Called when peer sends us new transactions - fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { - trace!(target: "sync", "{} Ignoring transactions while disabled", who); + fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { + trace!(target: "sync", "{} Ignoring transactions while major syncing", who); return } @@ -428,10 +414,11 @@ impl TransactionsHandler { /// Propagate one transaction. pub fn propagate_transaction(&mut self, hash: &H) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transaction [{:?}]", hash); if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); @@ -479,10 +466,11 @@ impl TransactionsHandler { /// Call when we must propagate ready transactions to peers. fn propagate_transactions(&mut self) { - // Accept transactions only when enabled - if !self.gossip_enabled.load(Ordering::Relaxed) { + // Accept transactions only when node is not major syncing + if self.service.is_major_syncing() { return } + debug!(target: "sync", "Propagating transactions"); let transactions = self.transaction_pool.transactions(); let propagated_to = self.do_propagate_transactions(&transactions); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 3c574ef13c8e6..e46c65cf018f5 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -56,6 +56,7 @@ sc-network-bitswap = { version = "0.10.0-dev", path = "../network/bitswap" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } +sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5a2f4cf978b41..dfd532a14c172 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -40,7 +40,7 @@ use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ - service::{NetworkStateInfo, NetworkStatusProvider, NetworkTransaction}, + service::{NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -326,7 +326,6 @@ where pub trait SpawnTaskNetwork: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -339,7 +338,6 @@ where Block: BlockT, T: sc_offchain::NetworkProvider + NetworkStateInfo - + NetworkTransaction + NetworkStatusProvider + Send + Sync @@ -368,6 +366,9 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub network: Arc>, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, + /// Controller for transactions handlers + pub tx_handler_controller: + sc_network_transactions::TransactionsHandlerController<::Hash>, /// Telemetry instance for this node. pub telemetry: Option<&'a mut Telemetry>, } @@ -446,6 +447,7 @@ where rpc_builder, network, system_rpc_tx, + tx_handler_controller, telemetry, } = params; @@ -481,7 +483,11 @@ where spawn_handle.spawn( "on-transaction-imported", Some("transaction-pool"), - transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), + transaction_notifications( + transaction_pool.clone(), + tx_handler_controller, + telemetry.clone(), + ), ); // Prometheus metrics. @@ -544,20 +550,21 @@ where Ok(rpc_handlers) } -async fn transaction_notifications( +async fn transaction_notifications( transaction_pool: Arc, - network: Network, + tx_handler_controller: sc_network_transactions::TransactionsHandlerController< + ::Hash, + >, telemetry: Option, ) where Block: BlockT, ExPool: MaintainedTransactionPool::Hash>, - Network: NetworkTransaction<::Hash> + Send + Sync, { // transaction notifications transaction_pool .import_notification_stream() .for_each(move |hash| { - network.propagate_transaction(hash); + tx_handler_controller.propagate_transaction(hash); let status = transaction_pool.status(); telemetry!( telemetry; @@ -719,6 +726,7 @@ pub fn build_network( ( Arc::Hash>>, TracingUnboundedSender>, + sc_network_transactions::TransactionsHandlerController<::Hash>, NetworkStarter, ), Error, @@ -761,9 +769,6 @@ where } } - let transaction_pool_adapter = - Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }); - let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { @@ -845,7 +850,7 @@ where protocol_config })); - let network_params = sc_network::config::Params { + let mut network_params = sc_network::config::Params { role: config.role.clone(), executor: { let spawn_handle = Clone::clone(&spawn_handle); @@ -853,16 +858,9 @@ where spawn_handle.spawn("libp2p-node", Some("networking"), fut); })) }, - transactions_handler_executor: { - let spawn_handle = Clone::clone(&spawn_handle); - Box::new(move |fut| { - spawn_handle.spawn("network-transactions-handler", Some("networking"), fut); - }) - }, network_config: config.network.clone(), chain: client.clone(), - transaction_pool: transaction_pool_adapter as _, - protocol_id, + protocol_id: protocol_id.clone(), fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), @@ -877,10 +875,32 @@ where .collect::>(), }; + // crate transactions protocol and add it to the list of supported protocols of `network_params` + let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + network_params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + network.clone(), + Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), + config.prometheus_config.as_ref().map(|config| &config.registry), + )?; + spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let future = build_network_future( @@ -928,7 +948,7 @@ where future.await }); - Ok((network, system_rpc_tx, NetworkStarter(network_start_tx))) + Ok((network, system_rpc_tx, tx_handler_controller, NetworkStarter(network_start_tx))) } /// Object used to start the network. diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 44153e3b914f3..bca0697bcbd08 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -24,13 +24,11 @@ pub use sc_executor::WasmExecutionMethod; #[cfg(feature = "wasmtime")] pub use sc_executor::WasmtimeInstantiationStrategy; pub use sc_network::{ - config::{ - NetworkConfiguration, NodeKeyConfig, NonDefaultSetConfig, Role, SetConfig, TransportConfig, - }, + config::{NetworkConfiguration, NodeKeyConfig, Role}, Multiaddr, }; pub use sc_network_common::{ - config::{MultiaddrWithPeerId, ProtocolId}, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 0d702c7f37b98..001a83922d776 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -19,7 +19,6 @@ //! Errors that can occur during the service operation. use sc_keystore; -use sc_network; use sp_blockchain; use sp_consensus; @@ -41,7 +40,7 @@ pub enum Error { Consensus(#[from] sp_consensus::Error), #[error(transparent)] - Network(#[from] sc_network::error::Error), + Network(#[from] sc_network_common::error::Error), #[error(transparent)] Keystore(#[from] sc_keystore::Error), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 19358c1e5bc4c..091b4bbe9fe5f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -72,7 +72,7 @@ pub use sc_chain_spec::{ pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network::config::{TransactionImport, TransactionImportFuture}; +pub use sc_network_transactions::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; @@ -148,7 +148,7 @@ async fn build_network_future< + Send + Sync + 'static, - H: sc_network::ExHashT, + H: sc_network_common::ExHashT, >( role: Role, mut network: sc_network::NetworkWorker, @@ -415,7 +415,8 @@ where .collect() } -impl sc_network::config::TransactionPool for TransactionPoolAdapter +impl sc_network_transactions::config::TransactionPool + for TransactionPoolAdapter where C: HeaderBackend + BlockBackend diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 23245d46cba10..5d29d34a3cbf2 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -22,12 +22,9 @@ use futures::{task::Poll, Future, TryFutureExt as _}; use log::{debug, info}; use parking_lot::Mutex; use sc_client_api::{Backend, CallExecutor}; -use sc_network::{ - config::{NetworkConfiguration, TransportConfig}, - multiaddr, -}; +use sc_network::{config::NetworkConfiguration, multiaddr}; use sc_network_common::{ - config::MultiaddrWithPeerId, + config::{MultiaddrWithPeerId, TransportConfig}, service::{NetworkBlock, NetworkPeers, NetworkStateInfo}, }; use sc_service::{ From 519fbaae886e2773b37363970433d36cbb47d853 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 27 Sep 2022 08:08:14 +0100 Subject: [PATCH 177/348] =?UTF-8?q?export=20more=20types=20from=20fast-unt?= =?UTF-8?q?sake=20=F0=9F=A4=A6=E2=80=8D=E2=99=82=EF=B8=8F=20(#12353)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * export more types from fast-untsake 🤦‍♂️ * make non-test * fmt --- frame/fast-unstake/src/lib.rs | 3 ++- frame/fast-unstake/src/types.rs | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 9bfb29f8457fa..7fbac8560ea6c 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -60,7 +60,7 @@ mod tests; // NOTE: enable benchmarking in tests as well. #[cfg(feature = "runtime-benchmarks")] mod benchmarking; -mod types; +pub mod types; pub mod weights; pub const LOG_TARGET: &'static str = "runtime::fast-unstake"; @@ -90,6 +90,7 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; + pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index e8d538dce4802..2ddb8dca27e9e 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -47,7 +47,6 @@ pub struct UnstakeRequest(sp_std::marker::PhantomData); -#[cfg(test)] impl PreventStakingOpsIfUnbonding { pub fn new() -> Self { Self(Default::default()) From 1763ff2273c4649fa969167503951371141a0272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 27 Sep 2022 11:37:45 +0200 Subject: [PATCH 178/348] Fix compilation on 1.66 nightly (#12363) --- primitives/state-machine/src/trie_backend_essence.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index dda7b51ab08c6..cd2a71163e2ee 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -178,7 +178,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> R { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let mut cache = self .trie_node_cache @@ -216,7 +219,10 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> (Option, R), ) -> R { let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); - let recorder = recorder.as_mut().map(|r| r as _); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; let result = if let Some(local_cache) = self.trie_node_cache.as_ref() { let mut cache = local_cache.as_local_trie_cache().as_trie_db_mut_cache(); From edca89177318580878bf11c0d49586ceea23909b Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Tue, 27 Sep 2022 13:16:30 +0200 Subject: [PATCH 179/348] Relax Slots-based engines from Epochs (#12360) Remove Epochs reference from slots subsystem --- client/consensus/aura/src/lib.rs | 14 ++++++------- client/consensus/babe/src/lib.rs | 12 ++++------- client/consensus/slots/src/lib.rs | 35 +++++++++++++++---------------- 3 files changed, 28 insertions(+), 33 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index c538200bb315c..a0eed6e35310e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -257,7 +257,7 @@ pub fn build_aura_worker( SyncOracle = SO, JustificationSyncLink = L, Claim = P::Public, - EpochData = Vec>, + AuxData = Vec>, > where B: BlockT, @@ -330,7 +330,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; - type EpochData = Vec>; + type AuxData = Vec>; fn logging_target(&self) -> &'static str { "aura" @@ -340,15 +340,15 @@ where &mut self.block_import } - fn epoch_data( + fn aux_data( &self, header: &B::Header, _slot: Slot, - ) -> Result { + ) -> Result { authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { Some(epoch_data.len()) } @@ -356,7 +356,7 @@ where &self, _header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + epoch_data: &Self::AuxData, ) -> Option { let expected_author = slot_author::

= <

::Block as BlockT>::Extrinsi pub type TransactionStatusStreamFor

= TransactionStatusStream, BlockHash

>; /// Transaction type for a local pool. pub type LocalTransactionFor

= <

::Block as BlockT>::Extrinsic; +/// Transaction's index within the block in which it was included. +pub type TxIndex = usize; /// Typical future type used in transaction pool api. pub type PoolFuture = std::pin::Pin> + Send>>; @@ -362,3 +367,52 @@ impl OffchainSubmitTransaction for TP }) } } + +/// Wrapper functions to keep the API backwards compatible over the wire for the old RPC spec. +mod v1_compatible { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(data: &(H, usize), serializer: S) -> Result + where + S: Serializer, + H: Serialize, + { + let (hash, _) = data; + serde::Serialize::serialize(&hash, serializer) + } + + pub fn deserialize<'de, D, H>(deserializer: D) -> Result<(H, usize), D::Error> + where + D: Deserializer<'de>, + H: Deserialize<'de>, + { + let hash: H = serde::Deserialize::deserialize(deserializer)?; + Ok((hash, 0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn tx_status_compatibility() { + let event: TransactionStatus = TransactionStatus::InBlock((1, 2)); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"inBlock":1}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, TransactionStatus::InBlock((1, 0))); + + let event: TransactionStatus = TransactionStatus::Finalized((1, 2)); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"finalized":1}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionStatus = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, TransactionStatus::Finalized((1, 0))); + } +} diff --git a/client/transaction-pool/src/graph/listener.rs b/client/transaction-pool/src/graph/listener.rs index d4f42b32fdbb8..776749abf2d5d 100644 --- a/client/transaction-pool/src/graph/listener.rs +++ b/client/transaction-pool/src/graph/listener.rs @@ -104,13 +104,18 @@ impl Listener { /// Transaction was pruned from the pool. pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { debug!(target: "txpool", "[{:?}] Pruned at {:?}", tx, block_hash); - self.fire(tx, |s| s.in_block(block_hash)); - self.finality_watchers.entry(block_hash).or_insert(vec![]).push(tx.clone()); + // Get the transactions included in the given block hash. + let txs = self.finality_watchers.entry(block_hash).or_insert(vec![]); + txs.push(tx.clone()); + // Current transaction is the last one included. + let tx_index = txs.len() - 1; + + self.fire(tx, |watcher| watcher.in_block(block_hash, tx_index)); while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { if let Some((hash, txs)) = self.finality_watchers.pop_front() { for tx in txs { - self.fire(&tx, |s| s.finality_timeout(hash)); + self.fire(&tx, |watcher| watcher.finality_timeout(hash)); } } } @@ -120,7 +125,7 @@ impl Listener { pub fn retracted(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { for hash in hashes { - self.fire(&hash, |s| s.retracted(block_hash)) + self.fire(&hash, |watcher| watcher.retracted(block_hash)) } } } @@ -128,9 +133,9 @@ impl Listener { /// Notify all watchers that transactions have been finalized pub fn finalized(&mut self, block_hash: BlockHash) { if let Some(hashes) = self.finality_watchers.remove(&block_hash) { - for hash in hashes { + for (tx_index, hash) in hashes.into_iter().enumerate() { log::debug!(target: "txpool", "[{:?}] Sent finalization event (block {:?})", hash, block_hash); - self.fire(&hash, |s| s.finalized(block_hash)) + self.fire(&hash, |watcher| watcher.finalized(block_hash, tx_index)) } } } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 19acbddbe7843..108ae791e37b3 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -770,7 +770,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), + Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), ); } @@ -803,7 +803,7 @@ mod tests { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!( stream.next(), - Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())), + Some(TransactionStatus::InBlock((H256::from_low_u64_be(2).into(), 0))), ); } diff --git a/client/transaction-pool/src/graph/watcher.rs b/client/transaction-pool/src/graph/watcher.rs index 8cd78cfc78240..0613300c8684b 100644 --- a/client/transaction-pool/src/graph/watcher.rs +++ b/client/transaction-pool/src/graph/watcher.rs @@ -84,13 +84,13 @@ impl Sender { } /// Extrinsic has been included in block with given hash. - pub fn in_block(&mut self, hash: BH) { - self.send(TransactionStatus::InBlock(hash)); + pub fn in_block(&mut self, hash: BH, index: usize) { + self.send(TransactionStatus::InBlock((hash, index))); } /// Extrinsic has been finalized by a finality gadget. - pub fn finalized(&mut self, hash: BH) { - self.send(TransactionStatus::Finalized(hash)); + pub fn finalized(&mut self, hash: BH, index: usize) { + self.send(TransactionStatus::Finalized((hash, index))); self.is_finalized = true; } diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index f04a27cf81e1d..be75523c1230f 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -328,7 +328,7 @@ fn should_revalidate_across_many_blocks() { block_on( watcher1 - .take_while(|s| future::ready(*s != TransactionStatus::InBlock(block_hash))) + .take_while(|s| future::ready(*s != TransactionStatus::InBlock((block_hash, 0)))) .collect::>(), ); @@ -398,24 +398,24 @@ fn should_push_watchers_during_maintenance() { futures::executor::block_on_stream(watcher0).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 0)), + TransactionStatus::Finalized((header_hash, 0)) ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 1)), + TransactionStatus::Finalized((header_hash, 1)) ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, - TransactionStatus::InBlock(header_hash), - TransactionStatus::Finalized(header_hash) + TransactionStatus::InBlock((header_hash, 2)), + TransactionStatus::Finalized((header_hash, 2)) ], ); } @@ -450,8 +450,8 @@ fn finalization() { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -573,30 +573,31 @@ fn fork_aware_finalization() { for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((h, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((h, 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c2, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d2, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1))); + // In block e1 we submitted: [dave, bob] xts in this order. + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((e1, 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((e1, 1)))); assert_eq!(stream.next(), None); } } @@ -646,10 +647,10 @@ fn prune_and_retract_tx_at_same_time() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1, 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2, 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2, 0)))); assert_eq!(stream.next(), None); } } From 60be7b8d03b8dc4a15080bddcefb1a9b20598d6a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 11 Oct 2022 15:56:18 +0200 Subject: [PATCH 228/348] Remove the unused light client requests (#12470) * Remove the unused light client requests * Add comment about new ids --- .../src/light_client_requests/handler.rs | 4 -- .../network/light/src/schema/light.v1.proto | 51 +------------------ 2 files changed, 2 insertions(+), 53 deletions(-) diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 5efdc3ff6a18b..9dc02eb9ff291 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -151,12 +151,8 @@ where self.on_remote_call_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => self.on_remote_read_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteHeaderRequest(_r)) => - return Err(HandleRequestError::BadRequest("Not supported.")), Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteChangesRequest(_r)) => - return Err(HandleRequestError::BadRequest("Not supported.")), None => return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; diff --git a/client/network/light/src/schema/light.v1.proto b/client/network/light/src/schema/light.v1.proto index 9b5d47719dc28..1df5466e21988 100644 --- a/client/network/light/src/schema/light.v1.proto +++ b/client/network/light/src/schema/light.v1.proto @@ -17,9 +17,8 @@ message Request { oneof request { RemoteCallRequest remote_call_request = 1; RemoteReadRequest remote_read_request = 2; - RemoteHeaderRequest remote_header_request = 3; RemoteReadChildRequest remote_read_child_request = 4; - RemoteChangesRequest remote_changes_request = 5; + // Note: ids 3 and 5 were used in the past. It would be preferable to not re-use them. } } @@ -28,8 +27,7 @@ message Response { oneof response { RemoteCallResponse remote_call_response = 1; RemoteReadResponse remote_read_response = 2; - RemoteHeaderResponse remote_header_response = 3; - RemoteChangesResponse remote_changes_response = 4; + // Note: ids 3 and 4 were used in the past. It would be preferable to not re-use them. } } @@ -73,48 +71,3 @@ message RemoteReadChildRequest { // Storage keys. repeated bytes keys = 6; } - -// Remote header request. -message RemoteHeaderRequest { - // Block number to request header for. - bytes block = 2; -} - -// Remote header response. -message RemoteHeaderResponse { - // Header. None if proof generation has failed (e.g. header is unknown). - bytes header = 2; // optional - // Header proof. - bytes proof = 3; -} - -/// Remote changes request. -message RemoteChangesRequest { - // Hash of the first block of the range (including first) where changes are requested. - bytes first = 2; - // Hash of the last block of the range (including last) where changes are requested. - bytes last = 3; - // Hash of the first block for which the requester has the changes trie root. All other - // affected roots must be proved. - bytes min = 4; - // Hash of the last block that we can use when querying changes. - bytes max = 5; - // Storage child node key which changes are requested. - bytes storage_key = 6; // optional - // Storage key which changes are requested. - bytes key = 7; -} - -// Remote changes response. -message RemoteChangesResponse { - // Proof has been generated using block with this number as a max block. Should be - // less than or equal to the RemoteChangesRequest::max block number. - bytes max = 2; - // Changes proof. - repeated bytes proof = 3; - // Changes tries roots missing on the requester' node. - repeated Pair roots = 4; - // Missing changes tries roots proof. - bytes roots_proof = 5; -} - From 6072b90096102767076b24da840d1e1ccf1baabc Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Tue, 11 Oct 2022 18:25:12 +0300 Subject: [PATCH 229/348] Fix flaky service test (#12472) Sometimes `NotificationStreamOpenened` would be received for the other protocol before `SyncConnected` was received so when the connection was closed, an incorrect event was read from the event stream. --- client/network/common/src/config.rs | 4 ++++ client/network/src/service/tests.rs | 26 ++++++++++++++------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index e4a7f04c8d6e8..96c7c11ec2696 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -244,6 +244,10 @@ pub struct NonDefaultSetConfig { /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` pub fallback_names: Vec, /// Handshake of the protocol + /// + /// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more + /// details. This field is temporarily used to allow moving the hardcoded block announcement + /// protocol out of `protocol.rs`. pub handshake: Option, /// Maximum allowed size of single notifications. pub max_notification_size: u64, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 7c651c675b83e..b656d7a7c0ddc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -637,19 +637,21 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() { ..config::NetworkConfiguration::new_local() }); - loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {}, - }; + async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { + let mut notif_received = false; + let mut sync_received = false; + + while !notif_received || !sync_received { + match stream.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => notif_received = true, + Event::SyncConnected { .. } => sync_received = true, + _ => {}, + }; + } } - loop { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, - _ => {}, - }; - } + wait_for_events(&mut events_stream1).await; + wait_for_events(&mut events_stream2).await; // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); @@ -659,7 +661,7 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() { )); let _ = events_stream2.next().await; // ignore the reopen event - // now disconnect using the block announcement protocol, verify that `SyncDisconnected` is + // now disconnect using `BLOCK_ANNOUNCE_PROTO_NAME`, verify that `SyncDisconnected` is // emitted node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); From 1bf2e6db6ccc0f9d872cddf9e3254227d100f2bf Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 12 Oct 2022 00:35:54 +0800 Subject: [PATCH 230/348] Rename `from_components` to `from_parts` (#12473) * Rename `from_components` to `from_parts` * Fixes * Spelling --- frame/contracts/src/gas.rs | 2 +- frame/election-provider-multi-phase/src/mock.rs | 2 +- frame/system/src/limits.rs | 2 +- primitives/weights/src/weight_v2.rs | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index d0076652dd6d4..c0cc2db2aa3eb 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -109,7 +109,7 @@ where pub fn nested(&mut self, amount: Weight) -> Result { // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - let amount = Weight::from_components( + let amount = Weight::from_parts( if amount.ref_time().is_zero() { self.gas_left().ref_time() } else { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 2615d863c91e0..6a638c8d2a2e1 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -239,7 +239,7 @@ parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults( - Weight::from_components(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), NORMAL_DISPATCH_RATIO, ); } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index 07ad240afe159..eb95b699eba32 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -208,7 +208,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { Self::with_sensible_defaults( - Weight::from_components(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + Weight::from_parts(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), DEFAULT_NORMAL_RATIO, ) } diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index 8596a782c1fa7..2933d80099dd7 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -112,8 +112,8 @@ impl Weight { Self { ref_time: 0, proof_size } } - /// Construct [`Weight`] with weight components, namely reference time and storage size weights. - pub const fn from_components(ref_time: u64, proof_size: u64) -> Self { + /// Construct [`Weight`] from weight parts, namely reference time and proof size weights. + pub const fn from_parts(ref_time: u64, proof_size: u64) -> Self { Self { ref_time, proof_size } } @@ -455,8 +455,8 @@ mod tests { #[test] fn is_zero_works() { assert!(Weight::zero().is_zero()); - assert!(!Weight::from_components(1, 0).is_zero()); - assert!(!Weight::from_components(0, 1).is_zero()); + assert!(!Weight::from_parts(1, 0).is_zero()); + assert!(!Weight::from_parts(0, 1).is_zero()); assert!(!Weight::MAX.is_zero()); } } From ccc8f6cc3debf7276cbdff78c7a906be6221ca35 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 11 Oct 2022 14:41:43 -0400 Subject: [PATCH 231/348] Avoid Unstable Sort (#12455) * dont use unstable sort * remove comment * add clippy rule --- .cargo/config.toml | 5 +++-- bin/node/bench/src/core.rs | 2 +- client/rpc-servers/src/lib.rs | 2 +- frame/benchmarking/src/analysis.rs | 4 ++-- frame/contracts/src/wasm/runtime.rs | 6 +----- frame/election-provider-multi-phase/src/mock.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 2 +- primitives/npos-elections/fuzzer/src/common.rs | 6 +++--- utils/frame/benchmarking-cli/src/shared/stats.rs | 2 +- 9 files changed, 14 insertions(+), 17 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 5355758f7a4fa..66b28b3485d86 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -18,14 +18,15 @@ rustflags = [ "-Aclippy::borrowed-box", # Reasonable to fix this one "-Aclippy::too-many-arguments", # (Turning this on would lead to) "-Aclippy::unnecessary_cast", # Types may change - "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::identity-op", # One case where we do 0 + "-Aclippy::useless_conversion", # Types may change "-Aclippy::unit_arg", # styalistic. "-Aclippy::option-map-unit-fn", # styalistic - "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic "-Aclippy::erasing_op", # E.g. 0 * DOLLARS "-Aclippy::eq_op", # In tests we test equality. "-Aclippy::while_immutable_condition", # false positives "-Aclippy::needless_option_as_deref", # false positives "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort ] diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 3b3060a888349..b6ad3ecd80068 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -132,7 +132,7 @@ pub fn run_benchmark(benchmark: Box, mode: Mode) -> Be durations.push(duration.as_nanos()); } - durations.sort_unstable(); + durations.sort(); let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 0e2aca0dcc829..7eb825e169bfa 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -198,7 +198,7 @@ fn format_allowed_hosts(addrs: &[SocketAddr]) -> Vec { fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModule { let mut available_methods = rpc_api.method_names().collect::>(); - available_methods.sort_unstable(); + available_methods.sort(); rpc_api .register_method("rpc_methods", move |_, _| { diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index 9ba2ea657bab0..a736cdc203182 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -181,7 +181,7 @@ impl Analysis { }) .collect(); - values.sort_unstable(); + values.sort(); let mid = values.len() / 2; Some(Self { @@ -311,7 +311,7 @@ impl Analysis { } for (_, rs) in results.iter_mut() { - rs.sort_unstable(); + rs.sort(); let ql = rs.len() / 4; *rs = rs[ql..rs.len() - ql].to_vec(); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 3296492994071..6d7e6bcf69e5f 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1976,11 +1976,7 @@ pub mod env { data_len: u32, ) -> Result<(), TrapReason> { fn has_duplicates(items: &mut Vec) -> bool { - // # Warning - // - // Unstable sorts are non-deterministic across architectures. The usage here is OK - // because we are rejecting duplicates which removes the non determinism. - items.sort_unstable(); + items.sort(); // Find any two consecutive equal elements. items.windows(2).any(|w| match &w { &[a, b] => a == b, diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 6a638c8d2a2e1..e04e0bf474caf 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -155,7 +155,7 @@ pub fn trim_helpers() -> TrimHelpers { seq_phragmen(desired_targets as usize, targets.clone(), voters.clone(), None).unwrap(); // sort by decreasing order of stake - assignments.sort_unstable_by_key(|assignment| { + assignments.sort_by_key(|assignment| { std::cmp::Reverse(stakes.get(&assignment.who).cloned().unwrap_or_default()) }); diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 4d1659af46460..87d65a0bfa5b6 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -63,7 +63,7 @@ benchmarks! { } }: { // The benchmark execution phase could also be a closure with custom code - m.sort_unstable(); + m.sort(); } // This line generates test cases for benchmarking, and could be run by: diff --git a/primitives/npos-elections/fuzzer/src/common.rs b/primitives/npos-elections/fuzzer/src/common.rs index e5853f28c4929..ad9bd43f9bce0 100644 --- a/primitives/npos-elections/fuzzer/src/common.rs +++ b/primitives/npos-elections/fuzzer/src/common.rs @@ -80,7 +80,7 @@ pub fn generate_random_npos_inputs( } candidates.push(id); } - candidates.sort_unstable(); + candidates.sort(); candidates.dedup(); assert_eq!(candidates.len(), candidate_count); @@ -99,11 +99,11 @@ pub fn generate_random_npos_inputs( let mut chosen_candidates = Vec::with_capacity(n_candidates_chosen); chosen_candidates.extend(candidates.choose_multiple(&mut rng, n_candidates_chosen)); - chosen_candidates.sort_unstable(); + chosen_candidates.sort(); voters.push((id, vote_weight, chosen_candidates)); } - voters.sort_unstable(); + voters.sort(); voters.dedup_by_key(|(id, _weight, _chosen_candidates)| *id); assert_eq!(voters.len(), voter_count); diff --git a/utils/frame/benchmarking-cli/src/shared/stats.rs b/utils/frame/benchmarking-cli/src/shared/stats.rs index 3234d5f2f94f7..ffae4a17724f8 100644 --- a/utils/frame/benchmarking-cli/src/shared/stats.rs +++ b/utils/frame/benchmarking-cli/src/shared/stats.rs @@ -112,7 +112,7 @@ impl Stats { /// Returns the specified percentile for the given data. /// This is best effort since it ignores the interpolation case. fn percentile(mut xs: Vec, p: f64) -> u64 { - xs.sort_unstable(); + xs.sort(); let index = (xs.len() as f64 * p).ceil() as usize - 1; xs[index.clamp(0, xs.len() - 1)] } From 94b731c4f7059662232fa7b5bdc5a64b17d69252 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 11 Oct 2022 22:20:13 +0200 Subject: [PATCH 232/348] Finalized block event triggers tx maintanance (#12305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * finalized block event triggers tx maintanance * tx-pool: enactment helper introduced * tx-pool: ChainApi: added tree_route method * enactment logic implemented + tests Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> * Some additional tests * minor improvements * trigger CI job * fix compilation errors ChainApi::tree_route return type changed to Result>, as some implementations (tests) are not required to provide this tree route. * formatting * trait removed * implementation slightly simplified (thanks to @koute) * get rid of Arc<> in EnactmentState return value * minor improvement * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Apply suggestions from code review * comment updated + formatting * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Davide Galassi * formatting * finalization notification bug fix + new test case + log::warn message when finalized block is being retracted by new event * added error message on tree_route failure * Apply suggestions from code review Co-authored-by: Bastian Köcher * use provided tree_route in Finalized event * Option removed from ChainApi::tree_route * doc added, test and logs improved * handle_enactment aligned with original implementation * use async-await * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Bastian Köcher * formatting + warn->debug * compilation error fix * enactment_state initializers added * enactment_state: Option removed * manual-seal: compilation & tests fix * manual-seal: tests fixed * tests cleanup * another compilation error fixed * TreeRoute::new added * get rid of pub hack * one more test added * formatting * TreeRoute::new doc added + formatting * Apply suggestions from code review Co-authored-by: Davide Galassi * (bool,Option) simplified to Option * log message improved * yet another review suggestions applied * get rid of hash in handle_enactment * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/transaction-pool/src/lib.rs Co-authored-by: Bastian Köcher * minor corrections * EnactmentState moved to new file * File header corrected * error formatting aligned with codebase * Apply suggestions from code review Co-authored-by: Bastian Köcher * remove commented code * small nits Signed-off-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Davide Galassi Co-authored-by: Bastian Köcher Co-authored-by: André Silva --- Cargo.lock | 2 + client/consensus/manual-seal/src/lib.rs | 28 +- client/transaction-pool/Cargo.toml | 1 + client/transaction-pool/api/Cargo.toml | 1 + client/transaction-pool/api/src/lib.rs | 4 +- client/transaction-pool/benches/basics.rs | 8 + client/transaction-pool/src/api.rs | 30 +- .../transaction-pool/src/enactment_state.rs | 579 ++++++++++++++++ client/transaction-pool/src/graph/pool.rs | 8 + client/transaction-pool/src/lib.rs | 346 +++++----- client/transaction-pool/src/tests.rs | 9 + client/transaction-pool/tests/pool.rs | 624 +++++++++++++++++- primitives/blockchain/src/header_metadata.rs | 7 + .../runtime/transaction-pool/src/lib.rs | 10 +- 14 files changed, 1466 insertions(+), 191 deletions(-) create mode 100644 client/transaction-pool/src/enactment_state.rs diff --git a/Cargo.lock b/Cargo.lock index 04b90dfffba1e..d29023f330ce1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9044,6 +9044,7 @@ version = "4.0.0-dev" dependencies = [ "array-bytes", "assert_matches", + "async-trait", "criterion", "futures", "futures-timer", @@ -9075,6 +9076,7 @@ dependencies = [ name = "sc-transaction-pool-api" version = "4.0.0-dev" dependencies = [ + "async-trait", "futures", "log", "serde", diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 4672e7275a56b..09ab139b91c73 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -305,9 +305,8 @@ pub async fn run_instant_seal_and_finalize( mod tests { use super::*; use sc_basic_authorship::ProposerFactory; - use sc_client_api::BlockBackend; use sc_consensus::ImportedAux; - use sc_transaction_pool::{BasicPool, Options, RevalidationType}; + use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; use sp_inherents::InherentData; use sp_runtime::generic::{BlockId, Digest, DigestItem}; @@ -359,6 +358,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -367,6 +367,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as transactions are imported into the @@ -429,6 +431,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -437,6 +440,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -505,8 +510,13 @@ mod tests { let builder = TestClientBuilder::new(); let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); - let pool_api = api(); + let pool_api = Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + )); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -515,6 +525,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the @@ -550,7 +562,6 @@ mod tests { .await .unwrap(); let created_block = rx.await.unwrap().unwrap(); - pool_api.increment_nonce(Alice.into()); // assert that the background task returns ok assert_eq!( @@ -566,8 +577,7 @@ mod tests { } } ); - let block = client.block(&BlockId::Number(1)).unwrap().unwrap().block; - pool_api.add_block(block, true); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); let header = client.header(&BlockId::Number(1)).expect("db error").expect("imported above"); @@ -588,9 +598,6 @@ mod tests { .await .is_ok()); assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); - let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; - pool_api.add_block(block, true); - pool_api.increment_nonce(Alice.into()); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); @@ -614,6 +621,7 @@ mod tests { let (client, select_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); + let genesis_hash = client.header(&BlockId::Number(0)).unwrap().unwrap().hash(); let pool = Arc::new(BasicPool::with_revalidation_type( Options::default(), true.into(), @@ -622,6 +630,8 @@ mod tests { RevalidationType::Full, spawner.clone(), 0, + genesis_hash, + genesis_hash, )); let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 5e005f5523ae8..0bdfb623e6c14 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -13,6 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" futures-timer = "3.0.2" diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index 1ab0f32bc8bad..366d0eb99b945 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -9,6 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "Transaction pool client facing API." [dependencies] +async-trait = "0.1.57" futures = "0.3.21" log = "0.4.17" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index c0a94516ffc97..c1e49ad07d7b1 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -21,6 +21,7 @@ pub mod error; +use async_trait::async_trait; use futures::{Future, Stream}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_runtime::{ @@ -303,9 +304,10 @@ pub enum ChainEvent { } /// Trait for transaction pool maintenance. +#[async_trait] pub trait MaintainedTransactionPool: TransactionPool { /// Perform maintenance - fn maintain(&self, event: ChainEvent) -> Pin + Send>>; + async fn maintain(&self, event: ChainEvent); } /// Transaction pool interface for submitting local transactions that exposes a diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index a7991269439ce..2632f8fb6aab5 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -121,6 +121,14 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } + + fn tree_route( + &self, + _from: ::Hash, + _to: ::Hash, + ) -> Result, Self::Error> { + unimplemented!() + } } fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 4710c96b003cd..110647b8cb3b0 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -30,6 +30,7 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{HeaderMetadata, TreeRoute}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, @@ -111,8 +112,11 @@ impl FullChainApi { impl graph::ChainApi for FullChainApi where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -190,6 +194,14 @@ where ) -> Result::Header>, Self::Error> { self.client.header(*at).map_err(Into::into) } + + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error> { + sp_blockchain::tree_route::(&*self.client, from, to).map_err(Into::into) + } } /// Helper function to validate a transaction using a full chain API. @@ -202,8 +214,11 @@ fn validate_transaction_blocking( ) -> error::Result where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -264,8 +279,11 @@ where impl FullChainApi where Block: BlockT, - Client: - ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: ProvideRuntimeApi + + BlockBackend + + BlockIdTo + + HeaderBackend + + HeaderMetadata, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs new file mode 100644 index 0000000000000..242b557dfbbbd --- /dev/null +++ b/client/transaction-pool/src/enactment_state.rs @@ -0,0 +1,579 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction pool implementation. + +use sc_transaction_pool_api::ChainEvent; +use sp_blockchain::TreeRoute; +use sp_runtime::traits::Block as BlockT; + +/// Helper struct for keeping track of the current state of processed new best +/// block and finalized events. The main purpose of keeping track of this state +/// is to figure out if a transaction pool enactment is needed or not. +/// +/// Given the following chain: +/// +/// B1-C1-D1-E1 +/// / +/// A +/// \ +/// B2-C2-D2-E2 +/// +/// Some scenarios and expected behavior for sequence of `NewBestBlock` (`nbb`) and `Finalized` +/// (`f`) events: +/// +/// - `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(C1))` +/// - `f(C1)`, `nbb(C1)` -> false (enactment was already performed in `f(C1))` +/// - `f(C1)`, `nbb(D2)` -> false (enactment was already performed in `f(C1)`, +/// we should not retract finalized block) +/// - `f(C1)`, `f(C2)`, `nbb(C1)` -> false +/// - `nbb(C1)`, `nbb(C2)` -> true (switching fork is OK) +/// - `nbb(B1)`, `nbb(B2)` -> true +/// - `nbb(B1)`, `nbb(C1)`, `f(C1)` -> false (enactment was already performed in `nbb(B1)`) +/// - `nbb(C1)`, `f(B1)` -> false (enactment was already performed in `nbb(B2)`) +pub struct EnactmentState +where + Block: BlockT, +{ + recent_best_block: Block::Hash, + recent_finalized_block: Block::Hash, +} + +impl EnactmentState +where + Block: BlockT, +{ + /// Returns a new `EnactmentState` initialized with the given parameters. + pub fn new(recent_best_block: Block::Hash, recent_finalized_block: Block::Hash) -> Self { + EnactmentState { recent_best_block, recent_finalized_block } + } + + /// Returns the recently finalized block. + pub fn recent_finalized_block(&self) -> Block::Hash { + self.recent_finalized_block + } + + /// Updates the state according to the given `ChainEvent`, returning + /// `Some(tree_route)` with a tree route including the blocks that need to + /// be enacted/retracted. If no enactment is needed then `None` is returned. + pub fn update( + &mut self, + event: &ChainEvent, + tree_route: &F, + ) -> Result>, String> + where + F: Fn(Block::Hash, Block::Hash) -> Result, String>, + { + let (new_hash, finalized) = match event { + ChainEvent::NewBestBlock { hash, .. } => (*hash, false), + ChainEvent::Finalized { hash, .. } => (*hash, true), + }; + + // block was already finalized + if self.recent_finalized_block == new_hash { + log::debug!(target: "txpool", "handle_enactment: block already finalized"); + return Ok(None) + } + + // compute actual tree route from best_block to notified block, and use + // it instead of tree_route provided with event + let tree_route = tree_route(self.recent_best_block, new_hash)?; + + log::debug!( + target: "txpool", + "resolve hash:{:?} finalized:{:?} tree_route:{:?} best_block:{:?} finalized_block:{:?}", + new_hash, finalized, tree_route, self.recent_best_block, self.recent_finalized_block + ); + + // check if recently finalized block is on retracted path. this could be + // happening if we first received a finalization event and then a new + // best event for some old stale best head. + if tree_route.retracted().iter().any(|x| x.hash == self.recent_finalized_block) { + log::debug!( + target: "txpool", + "Recently finalized block {} would be retracted by ChainEvent {}, skipping", + self.recent_finalized_block, new_hash + ); + return Ok(None) + } + + if finalized { + self.recent_finalized_block = new_hash; + + // if there are no enacted blocks in best_block -> hash tree_route, + // it means that block being finalized was already enacted (this + // case also covers best_block == new_hash), recent_best_block + // remains valid. + if tree_route.enacted().is_empty() { + log::trace!( + target: "txpool", + "handle_enactment: no newly enacted blocks since recent best block" + ); + return Ok(None) + } + + // otherwise enacted finalized block becomes best block... + } + + self.recent_best_block = new_hash; + + Ok(Some(tree_route)) + } +} + +#[cfg(test)] +mod enactment_state_tests { + use super::EnactmentState; + use sc_transaction_pool_api::ChainEvent; + use sp_blockchain::{HashAndNumber, TreeRoute}; + use std::sync::Arc; + use substrate_test_runtime_client::runtime::{Block, Hash}; + + // some helpers for convenient blocks' hash naming + fn a() -> HashAndNumber { + HashAndNumber { number: 1, hash: Hash::from([0xAA; 32]) } + } + fn b1() -> HashAndNumber { + HashAndNumber { number: 2, hash: Hash::from([0xB1; 32]) } + } + fn c1() -> HashAndNumber { + HashAndNumber { number: 3, hash: Hash::from([0xC1; 32]) } + } + fn d1() -> HashAndNumber { + HashAndNumber { number: 4, hash: Hash::from([0xD1; 32]) } + } + fn e1() -> HashAndNumber { + HashAndNumber { number: 5, hash: Hash::from([0xE1; 32]) } + } + fn b2() -> HashAndNumber { + HashAndNumber { number: 2, hash: Hash::from([0xB2; 32]) } + } + fn c2() -> HashAndNumber { + HashAndNumber { number: 3, hash: Hash::from([0xC2; 32]) } + } + fn d2() -> HashAndNumber { + HashAndNumber { number: 4, hash: Hash::from([0xD2; 32]) } + } + fn e2() -> HashAndNumber { + HashAndNumber { number: 5, hash: Hash::from([0xE2; 32]) } + } + + /// mock tree_route computing function for simple two-forks chain + fn tree_route(from: Hash, to: Hash) -> Result, String> { + let chain = vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()]; + let pivot = 4_usize; + + let from = chain + .iter() + .position(|bn| bn.hash == from) + .ok_or("existing block should be given")?; + let to = chain + .iter() + .position(|bn| bn.hash == to) + .ok_or("existing block should be given")?; + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + // + // [E1 D1 C1 B1 A B2 C2 D2 E2] + + let vec: Vec> = if from < to { + chain.into_iter().skip(from).take(to - from + 1).collect() + } else { + chain.into_iter().skip(to).take(from - to + 1).rev().collect() + }; + + let pivot = if from <= pivot && to <= pivot { + if from < to { + to - from + } else { + 0 + } + } else if from >= pivot && to >= pivot { + if from < to { + 0 + } else { + from - to + } + } else { + if from < to { + pivot - from + } else { + from - pivot + } + }; + + Ok(TreeRoute::new(vec, pivot)) + } + + mod mock_tree_route_tests { + use super::*; + + /// asserts that tree routes are equal + fn assert_treeroute_eq(expected: TreeRoute, result: TreeRoute) { + assert_eq!(result.common_block().hash, expected.common_block().hash); + assert_eq!(result.enacted().len(), expected.enacted().len()); + assert_eq!(result.retracted().len(), expected.retracted().len()); + assert!(result + .enacted() + .iter() + .zip(expected.enacted().iter()) + .all(|(a, b)| a.hash == b.hash)); + assert!(result + .retracted() + .iter() + .zip(expected.retracted().iter()) + .all(|(a, b)| a.hash == b.hash)); + } + + // some tests for mock tree_route function + #[test] + fn tree_route_mock_test_01() { + let result = tree_route(b1().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), a()], 1); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_02() { + let result = tree_route(a().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a(), b1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_03() { + let result = tree_route(a().hash, c2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a(), b2(), c2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_04() { + let result = tree_route(e2().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_05() { + let result = tree_route(d1().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_06() { + let result = tree_route(d2().hash, b2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_07() { + let result = tree_route(b1().hash, d1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_08() { + let result = tree_route(b2().hash, d2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_09() { + let result = tree_route(e2().hash, e1().hash).expect("tree route exists"); + let expected = + TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_10() { + let result = tree_route(e1().hash, e2().hash).expect("tree route exists"); + let expected = + TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4); + assert_treeroute_eq(result, expected); + } + #[test] + fn tree_route_mock_test_11() { + let result = tree_route(b1().hash, c2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_12() { + let result = tree_route(d2().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_13() { + let result = tree_route(c2().hash, e1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_14() { + let result = tree_route(b1().hash, b1().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b1()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_15() { + let result = tree_route(b2().hash, b2().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![b2()], 0); + assert_treeroute_eq(result, expected); + } + + #[test] + fn tree_route_mock_test_16() { + let result = tree_route(a().hash, a().hash).expect("tree route exists"); + let expected = TreeRoute::new(vec![a()], 0); + assert_treeroute_eq(result, expected); + } + } + + fn trigger_new_best_block( + state: &mut EnactmentState, + from: HashAndNumber, + acted_on: HashAndNumber, + ) -> bool { + let (from, acted_on) = (from.hash, acted_on.hash); + + let event_tree_route = tree_route(from, acted_on).expect("Tree route exists"); + + state + .update( + &ChainEvent::NewBestBlock { + hash: acted_on, + tree_route: Some(Arc::new(event_tree_route)), + }, + &tree_route, + ) + .unwrap() + .is_some() + } + + fn trigger_finalized( + state: &mut EnactmentState, + from: HashAndNumber, + acted_on: HashAndNumber, + ) -> bool { + let (from, acted_on) = (from.hash, acted_on.hash); + + let v = tree_route(from, acted_on) + .expect("Tree route exists") + .enacted() + .iter() + .map(|h| h.hash) + .collect::>(); + + state + .update(&ChainEvent::Finalized { hash: acted_on, tree_route: v.into() }, &tree_route) + .unwrap() + .is_some() + } + + fn assert_es_eq( + es: &EnactmentState, + expected_best_block: HashAndNumber, + expected_finalized_block: HashAndNumber, + ) { + assert_eq!(es.recent_best_block, expected_best_block.hash); + assert_eq!(es.recent_finalized_block, expected_finalized_block.hash); + } + + #[test] + fn test_enactment_helper() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + + let result = trigger_new_best_block(&mut es, a(), d1()); + assert!(result); + assert_es_eq(&es, d1(), a()); + + let result = trigger_new_best_block(&mut es, d1(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), d2()); + assert!(result); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, d2(), e1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), b2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), d2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_finalized(&mut es, a(), d2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), c2()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, d2(), d2()); + + let result = trigger_new_best_block(&mut es, d2(), e2()); + assert!(result); + assert_es_eq(&es, e2(), d2()); + + let result = trigger_finalized(&mut es, d2(), e2()); + assert_eq!(result, false); + assert_es_eq(&es, e2(), e2()); + } + + #[test] + fn test_enactment_helper_2() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), b1()); + assert!(result); + assert_es_eq(&es, b1(), a()); + + let result = trigger_new_best_block(&mut es, b1(), c1()); + assert!(result); + assert_es_eq(&es, c1(), a()); + + let result = trigger_new_best_block(&mut es, c1(), d1()); + assert!(result); + assert_es_eq(&es, d1(), a()); + + let result = trigger_new_best_block(&mut es, d1(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), c1()); + + let result = trigger_finalized(&mut es, c1(), e1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_3() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), a()); + + let result = trigger_finalized(&mut es, a(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), b1()); + } + + #[test] + fn test_enactment_helper_4() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_finalized(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), e1()); + + let result = trigger_finalized(&mut es, e1(), b1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_5() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // B1-C1-D1-E1 + // / + // A + // \ + // B2-C2-D2-E2 + + let result = trigger_finalized(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), e1()); + + let result = trigger_finalized(&mut es, e1(), e2()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), e1()); + } + + #[test] + fn test_enactment_helper_6() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + // A-B1-C1-D1-E1 + + let result = trigger_new_best_block(&mut es, a(), b1()); + assert!(result); + assert_es_eq(&es, b1(), a()); + + let result = trigger_finalized(&mut es, a(), d1()); + assert!(result); + assert_es_eq(&es, d1(), d1()); + + let result = trigger_new_best_block(&mut es, a(), e1()); + assert!(result); + assert_es_eq(&es, e1(), d1()); + + let result = trigger_new_best_block(&mut es, a(), c1()); + assert_eq!(result, false); + assert_es_eq(&es, e1(), d1()); + } +} diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 108ae791e37b3..9afceffe8dddf 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -20,6 +20,7 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; use futures::{channel::mpsc::Receiver, Future}; use sc_transaction_pool_api::error; +use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, SaturatedConversion}, @@ -97,6 +98,13 @@ pub trait ChainApi: Send + Sync { &self, at: &BlockId, ) -> Result::Header>, Self::Error>; + + /// Compute a tree-route between two blocks. See [`TreeRoute`] for more details. + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error>; } /// Pool configuration options. diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 7b9ce9d6047c0..410ab662e3601 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -23,6 +23,7 @@ #![warn(unused_extern_crates)] mod api; +mod enactment_state; pub mod error; mod graph; mod metrics; @@ -31,6 +32,8 @@ mod revalidation; mod tests; pub use crate::api::FullChainApi; +use async_trait::async_trait; +use enactment_state::EnactmentState; use futures::{ channel::oneshot, future::{self, ready}, @@ -62,6 +65,8 @@ use std::time::Instant; use crate::metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; +use sp_blockchain::{HashAndNumber, TreeRoute}; + type BoxedReadyIterator = Box>> + Send>; @@ -85,6 +90,7 @@ where revalidation_queue: Arc>, ready_poll: Arc, Block>>>, metrics: PrometheusMetrics, + enactment_state: Arc>>, } struct ReadyPoll { @@ -163,7 +169,11 @@ where PoolApi: graph::ChainApi + 'static, { /// Create new basic transaction pool with provided api, for tests. - pub fn new_test(pool_api: Arc) -> (Self, Pin + Send>>) { + pub fn new_test( + pool_api: Arc, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, + ) -> (Self, Pin + Send>>) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); @@ -175,6 +185,10 @@ where revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), ready_poll: Default::default(), metrics: Default::default(), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), }, background_task, ) @@ -190,6 +204,8 @@ where revalidation_type: RevalidationType, spawner: impl SpawnEssentialNamed, best_block_number: NumberFor, + best_block_hash: Block::Hash, + finalized_hash: Block::Hash, ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { @@ -217,6 +233,10 @@ where })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), metrics: PrometheusMetrics::new(prometheus), + enactment_state: Arc::new(Mutex::new(EnactmentState::new( + best_block_hash, + finalized_hash, + ))), } } @@ -358,6 +378,7 @@ where + sp_runtime::traits::BlockIdTo + sc_client_api::ExecutorProvider + sc_client_api::UsageProvider + + sp_blockchain::HeaderMetadata + Send + Sync + 'static, @@ -380,6 +401,8 @@ where RevalidationType::Full, spawner, client.usage_info().chain.best_number, + client.usage_info().chain.best_hash, + client.usage_info().chain.finalized_hash, )); // make transaction pool available for off-chain runtime calls. @@ -396,7 +419,8 @@ where Client: sp_api::ProvideRuntimeApi + sc_client_api::BlockBackend + sc_client_api::blockchain::HeaderBackend - + sp_runtime::traits::BlockIdTo, + + sp_runtime::traits::BlockIdTo + + sp_blockchain::HeaderMetadata, Client: Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, { @@ -563,166 +587,190 @@ async fn prune_known_txs_for_block MaintainedTransactionPool for BasicPool +impl BasicPool where Block: BlockT, PoolApi: 'static + graph::ChainApi, { - fn maintain(&self, event: ChainEvent) -> Pin + Send>> { - match event { - ChainEvent::NewBestBlock { hash, tree_route } => { - let pool = self.pool.clone(); - let api = self.api.clone(); - - let id = BlockId::hash(hash); - let block_number = match api.block_id_to_number(&id) { - Ok(Some(number)) => number, - _ => { - log::trace!( + /// Handles enactment and retraction of blocks, prunes stale transactions + /// (that have already been enacted) and resubmits transactions that were + /// retracted. + async fn handle_enactment(&self, tree_route: TreeRoute) { + log::trace!(target: "txpool", "handle_enactment tree_route: {tree_route:?}"); + let pool = self.pool.clone(); + let api = self.api.clone(); + + let (hash, block_number) = match tree_route.last() { + Some(HashAndNumber { hash, number }) => (hash, number), + None => { + log::warn!( + target: "txpool", + "Skipping ChainEvent - no last block in tree route {:?}", + tree_route, + ); + return + }, + }; + + let next_action = self.revalidation_strategy.lock().next( + *block_number, + Some(std::time::Duration::from_secs(60)), + Some(20u32.into()), + ); + + // We keep track of everything we prune so that later we won't add + // transactions with those hashes from the retracted blocks. + let mut pruned_log = HashSet::>::new(); + + // If there is a tree route, we use this to prune known tx based on the enacted + // blocks. Before pruning enacted transactions, we inform the listeners about + // retracted blocks and their transactions. This order is important, because + // if we enact and retract the same transaction at the same time, we want to + // send first the retract and than the prune event. + for retracted in tree_route.retracted() { + // notify txs awaiting finality that it has been retracted + pool.validated_pool().on_block_retracted(retracted.hash); + } + + future::join_all( + tree_route + .enacted() + .iter() + .map(|h| prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool)), + ) + .await + .into_iter() + .for_each(|enacted_log| { + pruned_log.extend(enacted_log); + }); + + self.metrics + .report(|metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64)); + + if next_action.resubmit { + let mut resubmit_transactions = Vec::new(); + + for retracted in tree_route.retracted() { + let hash = retracted.hash; + + let block_transactions = api + .block_body(&BlockId::hash(hash)) + .await + .unwrap_or_else(|e| { + log::warn!("Failed to fetch block body: {}", e); + None + }) + .unwrap_or_default() + .into_iter() + .filter(|tx| tx.is_signed().unwrap_or(true)); + + let mut resubmitted_to_report = 0; + + resubmit_transactions.extend(block_transactions.into_iter().filter(|tx| { + let tx_hash = pool.hash_of(tx); + let contains = pruned_log.contains(&tx_hash); + + // need to count all transactions, not just filtered, here + resubmitted_to_report += 1; + + if !contains { + log::debug!( target: "txpool", - "Skipping chain event - no number for that block {:?}", - id, + "[{:?}]: Resubmitting from retracted block {:?}", + tx_hash, + hash, ); - return Box::pin(ready(())) - }, - }; - - let next_action = self.revalidation_strategy.lock().next( - block_number, - Some(std::time::Duration::from_secs(60)), - Some(20u32.into()), - ); - let revalidation_strategy = self.revalidation_strategy.clone(); - let revalidation_queue = self.revalidation_queue.clone(); - let ready_poll = self.ready_poll.clone(); - let metrics = self.metrics.clone(); - - async move { - // We keep track of everything we prune so that later we won't add - // transactions with those hashes from the retracted blocks. - let mut pruned_log = HashSet::>::new(); - - // If there is a tree route, we use this to prune known tx based on the enacted - // blocks. Before pruning enacted transactions, we inform the listeners about - // retracted blocks and their transactions. This order is important, because - // if we enact and retract the same transaction at the same time, we want to - // send first the retract and than the prune event. - if let Some(ref tree_route) = tree_route { - for retracted in tree_route.retracted() { - // notify txs awaiting finality that it has been retracted - pool.validated_pool().on_block_retracted(retracted.hash); - } - - future::join_all(tree_route.enacted().iter().map(|h| { - prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool) - })) - .await - .into_iter() - .for_each(|enacted_log| { - pruned_log.extend(enacted_log); - }) } + !contains + })); - pruned_log.extend(prune_known_txs_for_block(id, &*api, &*pool).await); - - metrics.report(|metrics| { - metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) - }); - - if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) { - let mut resubmit_transactions = Vec::new(); - - for retracted in tree_route.retracted() { - let hash = retracted.hash; - - let block_transactions = api - .block_body(&BlockId::hash(hash)) - .await - .unwrap_or_else(|e| { - log::warn!("Failed to fetch block body: {}", e); - None - }) - .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - - let mut resubmitted_to_report = 0; - - resubmit_transactions.extend(block_transactions.into_iter().filter( - |tx| { - let tx_hash = pool.hash_of(tx); - let contains = pruned_log.contains(&tx_hash); - - // need to count all transactions, not just filtered, here - resubmitted_to_report += 1; - - if !contains { - log::debug!( - target: "txpool", - "[{:?}]: Resubmitting from retracted block {:?}", - tx_hash, - hash, - ); - } - !contains - }, - )); - - metrics.report(|metrics| { - metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - }); - } - - if let Err(e) = pool - .resubmit_at( - &id, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ) - .await - { - log::debug!( - target: "txpool", - "[{:?}] Error re-submitting transactions: {}", - id, - e, - ) - } - } + self.metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); + } - let extra_pool = pool.clone(); - // After #5200 lands, this arguably might be moved to the - // handler of "all blocks notification". - ready_poll.lock().trigger(block_number, move || { - Box::new(extra_pool.validated_pool().ready()) - }); + if let Err(e) = pool + .resubmit_at( + &BlockId::Hash(*hash), + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await + { + log::debug!( + target: "txpool", + "[{:?}] Error re-submitting transactions: {}", + hash, + e, + ) + } + } - if next_action.revalidate { - let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); - revalidation_queue.revalidate_later(block_number, hashes).await; + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the + // handler of "all blocks notification". + self.ready_poll + .lock() + .trigger(*block_number, move || Box::new(extra_pool.validated_pool().ready())); - revalidation_strategy.lock().clear(); - } - } - .boxed() + if next_action.revalidate { + let hashes = pool.validated_pool().ready().map(|tx| tx.hash).collect(); + self.revalidation_queue.revalidate_later(*block_number, hashes).await; + + self.revalidation_strategy.lock().clear(); + } + } +} + +#[async_trait] +impl MaintainedTransactionPool for BasicPool +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, +{ + async fn maintain(&self, event: ChainEvent) { + let prev_finalized_block = self.enactment_state.lock().recent_finalized_block(); + let compute_tree_route = |from, to| -> Result, String> { + match self.api.tree_route(from, to) { + Ok(tree_route) => Ok(tree_route), + Err(e) => + return Err(format!( + "Error occurred while computing tree_route from {from:?} to {to:?}: {e}" + )), + } + }; + + let result = self.enactment_state.lock().update(&event, &compute_tree_route); + + match result { + Err(msg) => { + log::warn!(target: "txpool", "{msg}"); + return }, - ChainEvent::Finalized { hash, tree_route } => { - let pool = self.pool.clone(); - async move { - for hash in tree_route.iter().chain(&[hash]) { - if let Err(e) = pool.validated_pool().on_block_finalized(*hash).await { - log::warn!( - target: "txpool", - "Error [{}] occurred while attempting to notify watchers of finalization {}", - e, hash - ) - } - } - } - .boxed() + Ok(None) => {}, + Ok(Some(tree_route)) => { + self.handle_enactment(tree_route).await; }, + }; + + if let ChainEvent::Finalized { hash, tree_route } = event { + log::trace!( + target: "txpool", + "on-finalized enacted: {tree_route:?}, previously finalized: \ + {prev_finalized_block:?}", + ); + + for hash in tree_route.iter().chain(std::iter::once(&hash)) { + if let Err(e) = self.pool.validated_pool().on_block_finalized(*hash).await { + log::warn!( + target: "txpool", + "Error occurred while attempting to notify watchers about finalization {}: {}", + hash, e + ) + } + } } } } diff --git a/client/transaction-pool/src/tests.rs b/client/transaction-pool/src/tests.rs index 79142e16a1b36..ce2c7872e32bb 100644 --- a/client/transaction-pool/src/tests.rs +++ b/client/transaction-pool/src/tests.rs @@ -22,6 +22,7 @@ use crate::graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}; use codec::Encode; use parking_lot::Mutex; use sc_transaction_pool_api::error; +use sp_blockchain::TreeRoute; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Hash}, @@ -173,6 +174,14 @@ impl ChainApi for TestApi { ) -> Result::Header>, Self::Error> { Ok(None) } + + fn tree_route( + &self, + _from: ::Hash, + _to: ::Hash, + ) -> Result, Self::Error> { + unimplemented!() + } } pub(crate) fn uxt(transfer: Transfer) -> Extrinsic { diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index be75523c1230f..5590051768e9a 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -30,13 +30,14 @@ use sc_transaction_pool::*; use sc_transaction_pool_api::{ ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, }; +use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::BlockId, traits::Block as _, transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; -use std::{collections::BTreeSet, sync::Arc}; +use std::{collections::BTreeSet, pin::Pin, sync::Arc}; use substrate_test_runtime_client::{ runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, AccountKeyring::*, @@ -50,13 +51,32 @@ fn pool() -> Pool { fn maintained_pool() -> (BasicPool, Arc, futures::executor::ThreadPool) { let api = Arc::new(TestApi::with_alice_nonce(209)); - let (pool, background_task) = BasicPool::new_test(api.clone()); + let (pool, background_task) = create_basic_pool_with_genesis(api.clone()); let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(background_task); (pool, api, thread_pool) } +fn create_basic_pool_with_genesis( + test_api: Arc, +) -> (BasicPool, Pin + Send>>) { + let genesis_hash = { + test_api + .chain() + .read() + .block_by_number + .get(&0) + .map(|blocks| blocks[0].0.header.hash()) + .expect("there is block 0. qed") + }; + BasicPool::new_test(test_api, genesis_hash, genesis_hash) +} + +fn create_basic_pool(test_api: TestApi) -> BasicPool { + create_basic_pool_with_genesis(Arc::from(test_api)).0 +} + const SOURCE: TransactionSource = TransactionSource::External; #[test] @@ -436,7 +456,7 @@ fn finalization() { let xt = uxt(Alice, 209); let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); pool.api().push_block(2, vec![xt.clone()], true); @@ -459,9 +479,9 @@ fn finalization() { fn fork_aware_finalization() { let api = TestApi::empty(); // starting block A1 (last finalized.) - api.push_block(1, vec![], true); + let a_header = api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); @@ -476,10 +496,13 @@ fn fork_aware_finalization() { let from_dave_watcher; let from_bob_watcher; let b1; + let c1; let d1; let c2; let d2; + block_on(pool.maintain(block_event(a_header))); + // block B1 { let watcher = @@ -489,6 +512,7 @@ fn fork_aware_finalization() { canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; b1 = header.hash(); block_on(pool.maintain(event)); @@ -504,6 +528,7 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> C2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; c2 = header.hash(); block_on(pool.maintain(event)); @@ -518,6 +543,7 @@ fn fork_aware_finalization() { assert_eq!(pool.status().ready, 1); let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); + log::trace!(target:"txpool", ">> D2: {:?} {:?}", header.hash(), header); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d2 = header.hash(); block_on(pool.maintain(event)); @@ -530,8 +556,9 @@ fn fork_aware_finalization() { block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) .expect("1.Imported"); assert_eq!(pool.status().ready, 1); - let header = pool.api().push_block(3, vec![from_charlie.clone()], true); - + let header = pool.api().push_block_with_parent(b1, vec![from_charlie.clone()], true); + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1 = header.hash(); canon_watchers.push((watcher, header.hash())); let event = block_event_with_retracted(header.clone(), d2, pool.api()); block_on(pool.maintain(event)); @@ -547,11 +574,12 @@ fn fork_aware_finalization() { let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) .expect("1. Imported"); assert_eq!(pool.status().ready, 3); - let header = pool.api().push_block(4, vec![xt.clone()], true); + let header = pool.api().push_block_with_parent(c1, vec![xt.clone()], true); + log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); + d1 = header.hash(); canon_watchers.push((w, header.hash())); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; - d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); let event = ChainEvent::Finalized { hash: d1, tree_route: Arc::from(vec![]) }; @@ -560,9 +588,10 @@ fn fork_aware_finalization() { let e1; - // block e1 + // block E1 { - let header = pool.api().push_block(5, vec![from_dave, from_bob], true); + let header = pool.api().push_block_with_parent(d1, vec![from_dave, from_bob], true); + log::trace!(target:"txpool", ">> E1: {:?} {:?}", header.hash(), header); e1 = header.hash(); let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); @@ -610,7 +639,7 @@ fn prune_and_retract_tx_at_same_time() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let from_alice = uxt(Alice, 1); pool.api().increment_nonce(Alice.into()); @@ -676,7 +705,7 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -721,7 +750,7 @@ fn resubmit_from_retracted_fork() { // starting block A1 (last finalized.) api.push_block(1, vec![], true); - let (pool, _background) = BasicPool::new_test(api.into()); + let pool = create_basic_pool(api); let tx0 = uxt(Alice, 1); let tx1 = uxt(Dave, 2); @@ -866,13 +895,14 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { #[test] fn should_not_accept_old_signatures() { let client = Arc::new(substrate_test_runtime_client::new()); - + let best_hash = client.info().best_hash; + let finalized_hash = client.info().finalized_hash; let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new( - client, - None, - &sp_core::testing::TaskExecutor::new(), - ))) + BasicPool::new_test( + Arc::new(FullChainApi::new(client, None, &sp_core::testing::TaskExecutor::new())), + best_hash, + finalized_hash, + ) .0, ); @@ -908,12 +938,19 @@ fn should_not_accept_old_signatures() { fn import_notification_to_pool_maintain_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); + let best_hash = client.info().best_hash; + let finalized_hash = client.info().finalized_hash; + let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new( - client.clone(), - None, - &sp_core::testing::TaskExecutor::new(), - ))) + BasicPool::new_test( + Arc::new(FullChainApi::new( + client.clone(), + None, + &sp_core::testing::TaskExecutor::new(), + )), + best_hash, + finalized_hash, + ) .0, ); @@ -998,3 +1035,540 @@ fn stale_transactions_are_pruned() { assert_eq!(pool.status().future, 0); assert_eq!(pool.status().ready, 0); } + +#[test] +fn finalized_only_handled_correctly() { + sp_tracing::try_init_simple(); + let xt = uxt(Alice, 209); + + let (pool, api, _guard) = maintained_pool(); + + let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + let header = api.push_block(1, vec![xt], false); + + let event = + ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + + assert_eq!(pool.status().ready, 0); + + { + let mut stream = futures::executor::block_on_stream(watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn best_block_after_finalized_handled_correctly() { + sp_tracing::try_init_simple(); + let xt = uxt(Alice, 209); + + let (pool, api, _guard) = maintained_pool(); + + let watcher = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, xt.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + let header = api.push_block(1, vec![xt], true); + + let event = + ChainEvent::Finalized { hash: header.clone().hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + block_on(pool.maintain(block_event(header.clone()))); + + assert_eq!(pool.status().ready, 0); + + { + let mut stream = futures::executor::block_on_stream(watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn switching_fork_with_finalized_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let b2_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block B2 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent( + a_header.hash(), + vec![from_alice.clone(), from_bob.clone()], + true, + ); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); + b2_header = header; + } + + { + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn switching_fork_multiple_times_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let b2_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block B2 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = pool.api().push_block_with_parent( + a_header.hash(), + vec![from_alice.clone(), from_bob.clone()], + true, + ); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> B2: {:?} {:?}", header.hash(), header); + b2_header = header; + } + + { + // phase-0 + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-1 + let event = block_event_with_retracted(b2_header.clone(), b1_header.hash(), pool.api()); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + // phase-2 + let event = block_event_with_retracted(b1_header.clone(), b2_header.hash(), pool.api()); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-3 + let event = ChainEvent::Finalized { hash: b2_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn two_blocks_delayed_finalization_works() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + let from_charlie = uxt(Charlie, 3); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + pool.api().increment_nonce(Charlie.into()); + + let from_alice_watcher; + let from_bob_watcher; + let from_charlie_watcher; + let b1_header; + let c1_header; + let d1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + // block D1 + { + from_charlie_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(c1_header.hash(), vec![from_charlie.clone()], true); + assert_eq!(pool.status().ready, 3); + + log::trace!(target:"txpool", ">> D1: {:?} {:?}", header.hash(), header); + d1_header = header; + } + + { + let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 3); + } + + { + let event = ChainEvent::NewBestBlock { hash: d1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + let event = ChainEvent::Finalized { + hash: c1_header.hash(), + tree_route: Arc::from(vec![b1_header.hash()]), + }; + block_on(pool.maintain(event)); + } + + // this is to collect events from_charlie_watcher and make sure nothing was retracted + { + let event = ChainEvent::Finalized { hash: d1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_charlie_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(d1_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn delayed_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let c1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + { + // phase-0 + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); + } + + { + // phase-1 + let event = ChainEvent::NewBestBlock { hash: c1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + // phase-2 + let event = ChainEvent::Finalized { hash: b1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + // phase-3 + let event = ChainEvent::Finalized { hash: c1_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + //phase-2 + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + //phase-0 + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + //phase-1 + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + //phase-3 + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } +} + +#[test] +fn best_block_after_finalization_does_not_retract() { + sp_tracing::try_init_simple(); + let api = TestApi::empty(); + // starting block A1 (last finalized.) + let a_header = api.push_block(1, vec![], true); + + let pool = create_basic_pool(api); + + let from_alice = uxt(Alice, 1); + let from_bob = uxt(Bob, 2); + pool.api().increment_nonce(Alice.into()); + pool.api().increment_nonce(Bob.into()); + + let from_alice_watcher; + let from_bob_watcher; + let b1_header; + let c1_header; + + // block B1 + { + from_alice_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(a_header.hash(), vec![from_alice.clone()], true); + assert_eq!(pool.status().ready, 1); + + log::trace!(target:"txpool", ">> B1: {:?} {:?}", header.hash(), header); + b1_header = header; + } + + // block C1 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + let header = + pool.api() + .push_block_with_parent(b1_header.hash(), vec![from_bob.clone()], true); + assert_eq!(pool.status().ready, 2); + + log::trace!(target:"txpool", ">> C1: {:?} {:?}", header.hash(), header); + c1_header = header; + } + + { + let event = ChainEvent::Finalized { hash: a_header.hash(), tree_route: Arc::from(vec![]) }; + block_on(pool.maintain(event)); + } + + { + let event = ChainEvent::Finalized { + hash: c1_header.hash(), + tree_route: Arc::from(vec![a_header.hash(), b1_header.hash()]), + }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + { + let event = ChainEvent::NewBestBlock { hash: b1_header.hash(), tree_route: None }; + block_on(pool.maintain(event)); + } + + { + let mut stream = futures::executor::block_on_stream(from_alice_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), None); + } +} diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 46477a75b8a1f..0ced264d5c786 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -176,6 +176,13 @@ pub struct TreeRoute { } impl TreeRoute { + /// Creates a new `TreeRoute`. + /// + /// It is required that `pivot >= route.len()`, otherwise it may panics. + pub fn new(route: Vec>, pivot: usize) -> Self { + TreeRoute { route, pivot } + } + /// Get a slice of all retracted blocks in reverse order (towards common ancestor). pub fn retracted(&self) -> &[HashAndNumber] { &self.route[..self.pivot] diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 4008427623499..dc8be78aa7397 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -22,7 +22,7 @@ use codec::Encode; use futures::future::ready; use parking_lot::RwLock; -use sp_blockchain::CachedHeaderMetadata; +use sp_blockchain::{CachedHeaderMetadata, TreeRoute}; use sp_runtime::{ generic::{self, BlockId}, traits::{ @@ -335,6 +335,14 @@ impl sc_transaction_pool::ChainApi for TestApi { self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), }) } + + fn tree_route( + &self, + from: ::Hash, + to: ::Hash, + ) -> Result, Self::Error> { + sp_blockchain::tree_route::(self, from, to).map_err(Into::into) + } } impl sp_blockchain::HeaderMetadata for TestApi { From 88db102213b06be4a25e17d195589c57cd4ed435 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 12 Oct 2022 10:06:22 +0200 Subject: [PATCH 233/348] tx-pool: failing tests fixed (#12481) --- client/transaction-pool/tests/pool.rs | 62 +++++++++++++-------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 5590051768e9a..27891432753a4 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -1058,8 +1058,8 @@ fn finalized_only_handled_correctly() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1087,8 +1087,8 @@ fn best_block_after_finalized_handled_correctly() { { let mut stream = futures::executor::block_on_stream(watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.clone().hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((header.clone().hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1155,18 +1155,18 @@ fn switching_fork_with_finalized_works() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); assert_eq!(stream.next(), None); } } @@ -1250,17 +1250,17 @@ fn switching_fork_multiple_times_works() { let mut stream = futures::executor::block_on_stream(from_alice_watcher); //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-1 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); //phase-2 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-3 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -1268,13 +1268,13 @@ fn switching_fork_multiple_times_works() { let mut stream = futures::executor::block_on_stream(from_bob_watcher); //phase-1 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); //phase-2 assert_eq!(stream.next(), Some(TransactionStatus::Retracted(b2_header.hash()))); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b2_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b2_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b2_header.hash(), 1)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b2_header.hash(), 1)))); assert_eq!(stream.next(), None); } } @@ -1373,24 +1373,24 @@ fn two_blocks_delayed_finalization_works() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_charlie_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(d1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((d1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((d1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1472,9 +1472,9 @@ fn delayed_finalization_does_not_retract() { let mut stream = futures::executor::block_on_stream(from_alice_watcher); //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); //phase-2 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } @@ -1483,9 +1483,9 @@ fn delayed_finalization_does_not_retract() { //phase-0 assert_eq!(stream.next(), Some(TransactionStatus::Ready)); //phase-1 - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); //phase-3 - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } @@ -1559,16 +1559,16 @@ fn best_block_after_finalization_does_not_retract() { { let mut stream = futures::executor::block_on_stream(from_alice_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(b1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(b1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((b1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((b1_header.hash(), 0)))); assert_eq!(stream.next(), None); } { let mut stream = futures::executor::block_on_stream(from_bob_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c1_header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(c1_header.hash()))); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock((c1_header.hash(), 0)))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized((c1_header.hash(), 0)))); assert_eq!(stream.next(), None); } } From 06a9f0a5da9681287f8a1c7b53497921238ece81 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 12 Oct 2022 15:01:54 +0200 Subject: [PATCH 234/348] Clarify the "direction" field of block requests (#12438) --- client/network/sync/src/schema/api.v1.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/client/network/sync/src/schema/api.v1.proto b/client/network/sync/src/schema/api.v1.proto index 203b157470a58..1490f61a41ddd 100644 --- a/client/network/sync/src/schema/api.v1.proto +++ b/client/network/sync/src/schema/api.v1.proto @@ -24,6 +24,7 @@ message BlockRequest { bytes number = 3; } // Sequence direction. + // If missing, should be interpreted as "Ascending". Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. uint32 max_blocks = 6; // optional From f8a0b7a9569f9554db157ec9cc6b6a08dadc6eb6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 12 Oct 2022 18:10:31 +0200 Subject: [PATCH 235/348] BlockId::Number refactor: trivial changes to BlockId::Hash (#12471) * Trivial BlockId::Number => Hash * missed BlockId::Hash added --- client/consensus/aura/src/lib.rs | 2 +- client/db/benches/state_access.rs | 2 +- test-utils/runtime/src/lib.rs | 6 +++--- utils/frame/benchmarking-cli/src/storage/cmd.rs | 2 +- utils/frame/benchmarking-cli/src/storage/read.rs | 2 +- utils/frame/benchmarking-cli/src/storage/write.rs | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index a0eed6e35310e..734cecca9b30b 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -769,7 +769,7 @@ mod tests { assert_eq!(client.chain_info().best_number, 0); assert_eq!( - authorities(&client, &BlockId::Number(0)).unwrap(), + authorities(&client, &BlockId::Hash(client.chain_info().best_hash)).unwrap(), vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 714dda82d61b7..4f4c10bcc8f53 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -84,7 +84,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(BlockId::Number(number - 1)).unwrap().storage_root( + let (state_root, tx) = db.state_at(BlockId::Hash(parent_hash)).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 3db0e5510057b..8bda4ea602428 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1340,7 +1340,7 @@ mod tests { .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .set_heap_pages(8) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger // than the heap. @@ -1369,7 +1369,7 @@ mod tests { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); runtime_api.test_storage(&block_id).unwrap(); } @@ -1396,7 +1396,7 @@ mod tests { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); runtime_api.test_witness(&block_id, proof, root).unwrap(); } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 1d91e8f0b0517..a4c4188dfe073 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -191,7 +191,7 @@ impl StorageCmd { B: BlockT + Debug, BA: ClientBackend, { - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); let empty_prefix = StorageKey(Vec::new()); let mut keys = client.storage_keys(&block, &empty_prefix)?; let (mut rng, _) = new_rng(None); diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index cba318f87ea98..5e8a310ea5c5a 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -41,7 +41,7 @@ impl StorageCmd { <::Header as HeaderT>::Number: From, { let mut record = BenchRecord::default(); - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); info!("Preparing keys from block {}", block); // Load all keys and randomly shuffle them. diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 9a3821a7095f8..0ef2a2f9ae113 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -57,7 +57,7 @@ impl StorageCmd { // Store the time that it took to write each value. let mut record = BenchRecord::default(); - let block = BlockId::Number(client.usage_info().chain.best_number); + let block = BlockId::Hash(client.usage_info().chain.best_hash); let header = client.header(block)?.ok_or("Header not found")?; let original_root = *header.state_root(); let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); From b324e511771b4e2d809d8319b98f3e36083c9ee2 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Wed, 12 Oct 2022 12:32:10 -0400 Subject: [PATCH 236/348] Dont use benchmark range on constant functions (#12456) * dont use benchmark range on constant function * update weights * fix * new weights * Update frame/examples/basic/src/benchmarking.rs Co-authored-by: parity-processbot <> --- frame/examples/basic/src/benchmarking.rs | 17 +++--- frame/examples/basic/src/lib.rs | 2 +- frame/examples/basic/src/weights.rs | 78 +++++++++++------------- 3 files changed, 46 insertions(+), 51 deletions(-) diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 87d65a0bfa5b6..13f069c23e27b 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -34,25 +34,26 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { - // This will measure the execution time of `set_dummy` for b in [0..1000] range. + // This will measure the execution time of `set_dummy`. set_dummy_benchmark { - // This is the benchmark setup phase - let b in 0 .. 1000; - }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call + // This is the benchmark setup phase. + // `set_dummy` is a constant time function, hence we hard-code some random value here. + let value = 1000u32.into(); + }: set_dummy(RawOrigin::Root, value) // The execution phase is just running `set_dummy` extrinsic call verify { // This is the optional benchmark verification phase, asserting certain states. - assert_eq!(Pallet::::dummy(), Some(b.into())) + assert_eq!(Pallet::::dummy(), Some(value)) } - // This will measure the execution time of `accumulate_dummy` for b in [0..1000] range. + // This will measure the execution time of `accumulate_dummy`. // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same // as the extrinsic call. `_(...)` is used to represent the extrinsic name. // The benchmark verification phase is omitted. accumulate_dummy { - let b in 0 .. 1000; + let value = 1000u32.into(); // The caller account is whitelisted for DB reads/write by the benchmarking macro. let caller: T::AccountId = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), b.into()) + }: _(RawOrigin::Signed(caller), value) // This will measure the execution time of sorting a vector. sort_vector { diff --git a/frame/examples/basic/src/lib.rs b/frame/examples/basic/src/lib.rs index f754348782cec..256529421caae 100644 --- a/frame/examples/basic/src/lib.rs +++ b/frame/examples/basic/src/lib.rs @@ -498,7 +498,7 @@ pub mod pallet { // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. #[pallet::weight( - ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) + ::WeightInfo::accumulate_dummy() )] pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. diff --git a/frame/examples/basic/src/weights.rs b/frame/examples/basic/src/weights.rs index 986648b4302bc..a69f0824eac11 100644 --- a/frame/examples/basic/src/weights.rs +++ b/frame/examples/basic/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,34 +17,26 @@ //! Autogenerated weights for pallet_example_basic //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-10-09, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `Shawns-MacBook-Pro.local`, CPU: `` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: // ./target/release/substrate // benchmark -// --chain -// dev -// --execution -// wasm -// --wasm-execution -// compiled -// --pallet -// pallet_example_basic -// --extrinsic -// * -// --steps -// 100 -// --repeat -// 10 -// --raw -// --output -// ./ +// pallet +// --chain=dev +// --execution=wasm +// --wasm-execution=compiled +// --pallet=pallet_example_basic +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --output=./ // --template // ./.maintain/frame-weight-template.hbs - #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] @@ -54,48 +46,50 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_example_basic. pub trait WeightInfo { - fn set_dummy_benchmark(b: u32, ) -> Weight; - fn accumulate_dummy(b: u32, ) -> Weight; + fn set_dummy_benchmark() -> Weight; + fn accumulate_dummy() -> Weight; fn sort_vector(x: u32, ) -> Weight; } /// Weights for pallet_example_basic using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as u64) - .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:0 w:1) + fn set_dummy_benchmark() -> Weight { + Weight::from_ref_time(19_000_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as u64) - .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:1 w:1) + fn accumulate_dummy() -> Weight { + Weight::from_ref_time(18_000_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } + /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - fn set_dummy_benchmark(b: u32, ) -> Weight { - Weight::from_ref_time(5_834_000 as u64) - .saturating_add(Weight::from_ref_time(24_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:0 w:1) + fn set_dummy_benchmark() -> Weight { + Weight::from_ref_time(19_000_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - fn accumulate_dummy(b: u32, ) -> Weight { - Weight::from_ref_time(51_353_000 as u64) - .saturating_add(Weight::from_ref_time(14_000 as u64).saturating_mul(b as u64)) + // Storage: BasicExample Dummy (r:1 w:1) + fn accumulate_dummy() -> Weight { + Weight::from_ref_time(18_000_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } + /// The range of component `x` is `[0, 10000]`. fn sort_vector(x: u32, ) -> Weight { - Weight::from_ref_time(2_569_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(x as u64)) + Weight::from_ref_time(0 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(520 as u64).saturating_mul(x as u64)) } } From 94941a892e9c625124afa8f3b28aedc5a6274059 Mon Sep 17 00:00:00 2001 From: Dmitrii Markin Date: Thu, 13 Oct 2022 12:24:31 +0300 Subject: [PATCH 237/348] Punish peers for duplicate GRANDPA neighbor messages (#12462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Decrease peer reputation for duplicate GRANDPA neighbor messages. * Fix comparison * Fix update_peer_state() validity condition * Add negative test * Rework update_peer_state() validity condition, add tests * update_peer_state() validity condition: invert comparison * Split InvalidViewChange and DuplicateNeighborMessage misbehaviors * Enforce rate-limiting of duplicate GRANDPA neighbor packets * Update client/finality-grandpa/src/communication/gossip.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Make rolling clock back in a test safer Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- .../src/communication/gossip.rs | 151 +++++++++++++----- .../finality-grandpa/src/communication/mod.rs | 8 +- .../src/communication/periodic.rs | 17 +- 3 files changed, 124 insertions(+), 52 deletions(-) diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 1ba5e0da33c96..218b4b668c10f 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -35,7 +35,8 @@ //! impolite to send messages about r+1 or later. "future-round" messages can //! be dropped and ignored. //! -//! It is impolite to send a neighbor packet which moves backwards in protocol state. +//! It is impolite to send a neighbor packet which moves backwards or does not progress +//! protocol state. //! //! This is beneficial if it conveys some progress in the protocol state of the peer. //! @@ -97,7 +98,7 @@ use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use super::{benefit, cost, Round, SetId}; +use super::{benefit, cost, Round, SetId, NEIGHBOR_REBROADCAST_PERIOD}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; use std::{ @@ -148,14 +149,15 @@ enum Consider { /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. - last_commit: Option, // commit-finalized block height, if any. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. + last_commit: Option, // commit-finalized block height, if any. + last_update: Option, // last time we heard from peer, used for spamming detection. } impl Default for View { fn default() -> Self { - View { round: Round(1), set_id: SetId(0), last_commit: None } + View { round: Round(1), set_id: SetId(0), last_commit: None, last_update: None } } } @@ -225,7 +227,12 @@ impl LocalView { /// Converts the local view to a `View` discarding round and set id /// information about the last commit. fn as_view(&self) -> View<&N> { - View { round: self.round, set_id: self.set_id, last_commit: self.last_commit_height() } + View { + round: self.round, + set_id: self.set_id, + last_commit: self.last_commit_height(), + last_update: None, + } } /// Update the set ID. implies a reset to round 1. @@ -417,6 +424,8 @@ pub(super) struct FullCatchUpMessage { pub(super) enum Misbehavior { // invalid neighbor message, considering the last one. InvalidViewChange, + // duplicate neighbor message. + DuplicateNeighborMessage, // could not decode neighbor message. bytes-length of the packet. UndecodablePacket(i32), // Bad catch up message (invalid signatures). @@ -438,6 +447,7 @@ impl Misbehavior { match *self { InvalidViewChange => cost::INVALID_VIEW_CHANGE, + DuplicateNeighborMessage => cost::DUPLICATE_NEIGHBOR_MESSAGE, UndecodablePacket(bytes) => ReputationChange::new( bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), "Grandpa: Bad packet", @@ -488,20 +498,22 @@ struct Peers { second_stage_peers: HashSet, /// The randomly picked set of `LUCKY_PEERS` light clients we'll gossip commit messages to. lucky_light_peers: HashSet, + /// Neighbor packet rebroadcast period --- we reduce the reputation of peers sending duplicate + /// packets too often. + neighbor_rebroadcast_period: Duration, } -impl Default for Peers { - fn default() -> Self { +impl Peers { + fn new(neighbor_rebroadcast_period: Duration) -> Self { Peers { inner: Default::default(), first_stage_peers: Default::default(), second_stage_peers: Default::default(), lucky_light_peers: Default::default(), + neighbor_rebroadcast_period, } } -} -impl Peers { fn new_peer(&mut self, who: PeerId, role: ObservedRole) { match role { ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { @@ -547,10 +559,23 @@ impl Peers { return Err(Misbehavior::InvalidViewChange) } + let now = Instant::now(); + let duplicate_packet = (update.set_id, update.round, Some(&update.commit_finalized_height)) == + (peer.view.set_id, peer.view.round, peer.view.last_commit.as_ref()); + + if duplicate_packet { + if let Some(last_update) = peer.view.last_update { + if now < last_update + self.neighbor_rebroadcast_period / 2 { + return Err(Misbehavior::DuplicateNeighborMessage) + } + } + } + peer.view = View { round: update.round, set_id: update.set_id, last_commit: Some(update.commit_finalized_height), + last_update: Some(now), }; trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", @@ -748,7 +773,7 @@ impl Inner { Inner { local_view: None, - peers: Peers::default(), + peers: Peers::new(NEIGHBOR_REBROADCAST_PERIOD), live_topics: KeepTopics::new(), next_rebroadcast: Instant::now() + REBROADCAST_AFTER, authorities: Vec::new(), @@ -758,13 +783,16 @@ impl Inner { } } - /// Note a round in the current set has started. + /// Note a round in the current set has started. Does nothing if the last + /// call to the function was with the same `round`. fn note_round(&mut self, round: Round) -> MaybeMessage { { let local_view = match self.local_view { None => return None, Some(ref mut v) => if v.round == round { + // Do not send neighbor packets out if `round` has not changed --- + // such behavior is punishable. return None } else { v @@ -803,6 +831,8 @@ impl Inner { ); self.authorities = authorities; } + // Do not send neighbor packets out if the `set_id` has not changed --- + // such behavior is punishable. return None } else { v @@ -816,7 +846,9 @@ impl Inner { self.multicast_neighbor_packet() } - /// Note that we've imported a commit finalizing a given block. + /// Note that we've imported a commit finalizing a given block. Does nothing if the last + /// call to the function was with the same or higher `finalized` number. + /// `set_id` & `round` are the ones the commit message is from. fn note_commit_finalized( &mut self, round: Round, @@ -1357,6 +1389,8 @@ impl GossipValidator { } /// Note that we've imported a commit finalizing a given block. + /// `set_id` & `round` are the ones the commit message is from and not necessarily + /// the latest set ID & round started. pub(super) fn note_commit_finalized( &self, round: Round, @@ -1647,11 +1681,12 @@ pub(super) struct PeerReport { #[cfg(test)] mod tests { - use super::{environment::SharedVoterSetState, *}; + use super::{super::NEIGHBOR_REBROADCAST_PERIOD, environment::SharedVoterSetState, *}; use crate::communication; use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; use sp_core::{crypto::UncheckedFrom, H256}; + use std::time::Instant; use substrate_test_runtime_client::runtime::{Block, Header}; // some random config (not really needed) @@ -1684,7 +1719,12 @@ mod tests { #[test] fn view_vote_rules() { - let view = View { round: Round(100), set_id: SetId(1), last_commit: Some(1000u64) }; + let view = View { + round: Round(100), + set_id: SetId(1), + last_commit: Some(1000u64), + last_update: None, + }; assert_eq!(view.consider_vote(Round(98), SetId(1)), Consider::RejectPast); assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); @@ -1701,7 +1741,12 @@ mod tests { #[test] fn view_global_message_rules() { - let view = View { round: Round(100), set_id: SetId(2), last_commit: Some(1000u64) }; + let view = View { + round: Round(100), + set_id: SetId(2), + last_commit: Some(1000u64), + last_update: None, + }; assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); @@ -1719,7 +1764,7 @@ mod tests { #[test] fn unknown_peer_cannot_be_updated() { - let mut peers = Peers::default(); + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); let update = @@ -1750,27 +1795,35 @@ mod tests { let update4 = NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; - let mut peers = Peers::default(); + // Use shorter rebroadcast period to safely roll the clock back in the last test + // and don't hit the system boot time on systems with unsigned time. + const SHORT_NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(1); + let mut peers = Peers::new(SHORT_NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); peers.new_peer(id, ObservedRole::Authority); - let mut check_update = move |update: NeighborPacket<_>| { + let check_update = |peers: &mut Peers<_>, update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); assert_eq!(view.round, update.round); assert_eq!(view.set_id, update.set_id); assert_eq!(view.last_commit, Some(update.commit_finalized_height)); }; - check_update(update1); - check_update(update2); - check_update(update3); - check_update(update4); + check_update(&mut peers, update1); + check_update(&mut peers, update2); + check_update(&mut peers, update3); + check_update(&mut peers, update4.clone()); + + // Allow duplicate neighbor packets if enough time has passed. + peers.inner.get_mut(&id).unwrap().view.last_update = + Some(Instant::now() - SHORT_NEIGHBOR_REBROADCAST_PERIOD); + check_update(&mut peers, update4); } #[test] fn invalid_view_change() { - let mut peers = Peers::default(); + let mut peers = Peers::new(NEIGHBOR_REBROADCAST_PERIOD); let id = PeerId::random(); peers.new_peer(id, ObservedRole::Authority); @@ -1783,29 +1836,41 @@ mod tests { .unwrap() .unwrap(); - let mut check_update = move |update: NeighborPacket<_>| { + let mut check_update = move |update: NeighborPacket<_>, misbehavior| { let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); - assert_eq!(err, Misbehavior::InvalidViewChange); + assert_eq!(err, misbehavior); }; // round moves backwards. - check_update(NeighborPacket { - round: Round(9), - set_id: SetId(10), - commit_finalized_height: 10, - }); - // commit finalized height moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 9, - }); + check_update( + NeighborPacket { round: Round(9), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); // set ID moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(9), - commit_finalized_height: 10, - }); + check_update( + NeighborPacket { round: Round(10), set_id: SetId(9), commit_finalized_height: 10 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // duplicate packet without grace period. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + Misbehavior::DuplicateNeighborMessage, + ); + // commit finalized height moves backwards while round moves forward. + check_update( + NeighborPacket { round: Round(11), set_id: SetId(10), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); + // commit finalized height moves backwards while set ID moves forward. + check_update( + NeighborPacket { round: Round(10), set_id: SetId(11), commit_finalized_height: 9 }, + Misbehavior::InvalidViewChange, + ); } #[test] diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 12cb2601f4c26..75a7697812c6c 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -37,6 +37,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + time::Duration, }; use finality_grandpa::{ @@ -68,6 +69,9 @@ mod periodic; #[cfg(test)] pub(crate) mod tests; +// How often to rebroadcast neighbor packets, in cases where no new packets are created. +pub(crate) const NEIGHBOR_REBROADCAST_PERIOD: Duration = Duration::from_secs(2 * 60); + pub mod grandpa_protocol_name { use sc_chain_spec::ChainSpec; use sc_network_common::protocol::ProtocolName; @@ -103,6 +107,8 @@ mod cost { pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); + pub(super) const DUPLICATE_NEIGHBOR_MESSAGE: Rep = + Rep::new(-500, "Grandpa: Duplicate neighbor message without grace period"); pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; pub(super) const PER_BLOCK_LOADED: i32 = -10; @@ -279,7 +285,7 @@ impl> NetworkBridge { } let (neighbor_packet_worker, neighbor_packet_sender) = - periodic::NeighborPacketWorker::new(); + periodic::NeighborPacketWorker::new(NEIGHBOR_REBROADCAST_PERIOD); NetworkBridge { service, diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index e6d63beafc362..c001796b5ca5d 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -32,9 +32,6 @@ use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor}; -// How often to rebroadcast, in cases where no new packets are created. -const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); - /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( @@ -60,6 +57,7 @@ impl NeighborPacketSender { /// implementation). Periodically it sends out the last packet in cases where no new ones arrive. pub(super) struct NeighborPacketWorker { last: Option<(Vec, NeighborPacket>)>, + rebroadcast_period: Duration, delay: Delay, rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, } @@ -67,13 +65,16 @@ pub(super) struct NeighborPacketWorker { impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new() -> (Self, NeighborPacketSender) { + pub(super) fn new(rebroadcast_period: Duration) -> (Self, NeighborPacketSender) { let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( "mpsc_grandpa_neighbor_packet_worker", ); - let delay = Delay::new(REBROADCAST_AFTER); + let delay = Delay::new(rebroadcast_period); - (NeighborPacketWorker { last: None, delay, rx }, NeighborPacketSender(tx)) + ( + NeighborPacketWorker { last: None, rebroadcast_period, delay, rx }, + NeighborPacketSender(tx), + ) } } @@ -85,7 +86,7 @@ impl Stream for NeighborPacketWorker { match this.rx.poll_next_unpin(cx) { Poll::Ready(None) => return Poll::Ready(None), Poll::Ready(Some((to, packet))) => { - this.delay.reset(REBROADCAST_AFTER); + this.delay.reset(this.rebroadcast_period); this.last = Some((to.clone(), packet.clone())); return Poll::Ready(Some((to, GossipMessage::::from(packet)))) @@ -98,7 +99,7 @@ impl Stream for NeighborPacketWorker { // Getting this far here implies that the timer fired. - this.delay.reset(REBROADCAST_AFTER); + this.delay.reset(this.rebroadcast_period); // Make sure the underlying task is scheduled for wake-up. // From 983b6b0e5d93a3f7d99d8b3d3a8bb398af3ec045 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Thu, 13 Oct 2022 12:37:09 +0300 Subject: [PATCH 238/348] Introduce mockable `ChainSync` object for testing (#12480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Introduce mockable `ChainSync` object for testing `mockall` allows to mock `ChainSync` and to verify that the calls made to `ChaiSync` are firstly executed at all, that they're executed in correct order and with correct parameters. This allows to verify, e.g., that delegating calls directly to `ChainSync` from `NetworkService` still calls the correct functions with correct arguments even if `Protocol` middleman is removed. * Add Cargo.lock * Fix tests * Update client/network/Cargo.toml Co-authored-by: Bastian Köcher * Update Cargo.lock * Fix clippy and documentation Co-authored-by: Bastian Köcher Co-authored-by: parity-processbot <> --- Cargo.lock | 58 +++ client/network/common/src/sync.rs | 14 +- client/network/src/protocol.rs | 2 +- client/network/src/service.rs | 2 + client/network/src/service/chainsync_tests.rs | 339 ++++++++++++++++++ client/network/sync/Cargo.toml | 1 + client/network/sync/src/lib.rs | 35 +- client/network/sync/src/mock.rs | 118 ++++++ 8 files changed, 546 insertions(+), 23 deletions(-) create mode 100644 client/network/src/service/chainsync_tests.rs create mode 100644 client/network/sync/src/mock.rs diff --git a/Cargo.lock b/Cargo.lock index d29023f330ce1..062195c70f8ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1717,6 +1717,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +[[package]] +name = "downcast" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" + [[package]] name = "downcast-rs" version = "1.2.0" @@ -2093,6 +2099,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2116,6 +2131,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fragile" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" + [[package]] name = "frame-benchmarking" version = "4.0.0-dev" @@ -4395,6 +4416,33 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "mockall" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" +dependencies = [ + "cfg-if 1.0.0", + "downcast", + "fragile", + "lazy_static", + "mockall_derive", + "predicates", + "predicates-tree", +] + +[[package]] +name = "mockall_derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "more-asserts" version = "0.2.1" @@ -4970,6 +5018,12 @@ dependencies = [ "version_check", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-bigint" version = "0.2.6" @@ -6942,8 +6996,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c143348f141cc87aab5b950021bac6145d0e5ae754b0591de23244cee42c9308" dependencies = [ "difflib", + "float-cmp", "itertools", + "normalize-line-endings", "predicates-core", + "regex", ] [[package]] @@ -8586,6 +8643,7 @@ dependencies = [ "libp2p", "log", "lru", + "mockall", "parity-scale-codec", "prost 0.11.0", "prost-build 0.11.1", diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index 020b2c9efa4c7..d3603c6792c84 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -269,12 +269,14 @@ pub trait ChainSync: Send { ); /// Get an iterator over all scheduled justification requests. - fn justification_requests( - &mut self, - ) -> Box)> + '_>; + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; /// Get an iterator over all block requests of all peers. - fn block_requests(&mut self) -> Box)> + '_>; + fn block_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; /// Get a state request, if any. fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; @@ -359,9 +361,9 @@ pub trait ChainSync: Send { /// /// If [`PollBlockAnnounceValidation::ImportHeader`] is returned, then the caller MUST try to /// import passed header (call `on_block_data`). The network request isn't sent in this case. - fn poll_block_announce_validation( + fn poll_block_announce_validation<'a>( &mut self, - cx: &mut std::task::Context, + cx: &mut std::task::Context<'a>, ) -> Poll>; /// Call when a peer has disconnected. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 325e044527efa..c3def8adc6cfe 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1440,7 +1440,7 @@ where for (id, request) in self .chain_sync .block_requests() - .map(|(peer_id, request)| (*peer_id, request)) + .map(|(peer_id, request)| (peer_id, request)) .collect::>() { let event = diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 28e479b702779..25916041285a3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -92,6 +92,8 @@ use std::{ pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; +#[cfg(test)] +mod chainsync_tests; mod metrics; mod out_events; #[cfg(test)] diff --git a/client/network/src/service/chainsync_tests.rs b/client/network/src/service/chainsync_tests.rs new file mode 100644 index 0000000000000..ca44c65d267f4 --- /dev/null +++ b/client/network/src/service/chainsync_tests.rs @@ -0,0 +1,339 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, NetworkWorker}; + +use futures::prelude::*; +use libp2p::PeerId; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{BlockBackend, HeaderBackend}; +use sc_consensus::JustificationSyncLink; +use sc_network_common::{ + config::{ + NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + TransportConfig, + }, + protocol::role::Roles, + service::NetworkSyncForkRequest, + sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT, SyncState, SyncStatus}, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, mock::MockChainSync, + state_request_handler::StateRequestHandler, +}; +use sp_core::H256; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as _, Zero}, +}; +use std::{iter, sync::Arc, task::Poll}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkWorker = NetworkWorker< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, + substrate_test_runtime_client::TestClient, +>; + +const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; +const PROTOCOL_NAME: &str = "/foo"; + +fn make_network( + chain_sync: Box>, + client: Arc, +) -> (TestNetworkWorker, Arc) { + let network_config = config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + handshake: None, + set_config: Default::default(), + }], + listen_addresses: vec![config::build_multiaddr![Memory(rand::random::())]], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }; + + #[derive(Clone)] + struct PassThroughVerifier(bool); + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( + &mut self, + mut block: sc_consensus::BlockImportParams, + ) -> Result< + ( + sc_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = block + .header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) + }) + }) + .map(|blob| { + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] + }); + + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) + } + } + + let import_queue = Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let protocol_id = ProtocolId::from("/test-protocol-name"); + + let fork_id = Some(String::from("test-fork-id")); + + let block_request_protocol_config = { + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, None, client.clone()); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let block_announce_config = NonDefaultSetConfig { + notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< + substrate_test_runtime_client::runtime::Block, + >::build( + Roles::from(&config::Role::Full), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ))), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + }; + + let worker = NetworkWorker::new(config::Params { + block_announce_config, + role: config::Role::Full, + executor: None, + network_config, + chain: client.clone(), + protocol_id, + fork_id, + import_queue, + chain_sync, + metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync_protocol_config: None, + request_response_protocol_configs: Vec::new(), + }) + .unwrap(); + + (worker, client) +} + +fn set_default_expecations_no_peers( + chain_sync: &mut MockChainSync, +) { + chain_sync.expect_block_requests().returning(|| Box::new(iter::empty())); + chain_sync.expect_state_request().returning(|| None); + chain_sync.expect_justification_requests().returning(|| Box::new(iter::empty())); + chain_sync.expect_warp_sync_request().returning(|| None); + chain_sync.expect_poll_block_announce_validation().returning(|_| Poll::Pending); + chain_sync.expect_status().returning(|| SyncStatus { + state: SyncState::Idle, + best_seen_block: None, + num_peers: 0u32, + queued_blocks: 0u32, + state_sync: None, + warp_sync: None, + }); +} + +#[async_std::test] +async fn normal_network_poll_no_peers() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + set_default_expecations_no_peers(&mut chain_sync); + + let (mut network, _) = make_network(chain_sync, client); + + // poll the network once + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn request_justification() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let hash = H256::random(); + let number = 1337u64; + + chain_sync + .expect_request_justification() + .withf(move |in_hash, in_number| &hash == in_hash && &number == in_number) + .once() + .returning(|_, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "request justifiction" message and poll the network + network.service().request_justification(&hash, number); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn clear_justification_requests(&mut self) { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + chain_sync.expect_clear_justification_requests().once().returning(|| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "request justifiction" message and poll the network + network.service().clear_justification_requests(); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn set_sync_fork_request() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let hash = H256::random(); + let number = 1337u64; + let peers = (0..3).map(|_| PeerId::random()).collect::>(); + let copy_peers = peers.clone(); + + chain_sync + .expect_set_sync_fork_request() + .withf(move |in_peers, in_hash, in_number| { + &peers == in_peers && &hash == in_hash && &number == in_number + }) + .once() + .returning(|_, _, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "set sync fork request" message and poll the network + network.service().set_sync_fork_request(copy_peers, hash, number); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} + +#[async_std::test] +async fn on_block_finalized() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let mut chain_sync = + Box::new(MockChainSync::::new()); + + let at = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap().hash(); + let block = client + .new_block_at(&BlockId::Hash(at), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + let header = block.header.clone(); + let block_number = *header.number(); + let hash = block.hash(); + + chain_sync + .expect_on_block_finalized() + .withf(move |in_hash, in_number| &hash == in_hash && &block_number == in_number) + .once() + .returning(|_, _| ()); + + set_default_expecations_no_peers(&mut chain_sync); + let (mut network, _) = make_network(chain_sync, client); + + // send "set sync fork request" message and poll the network + network.on_block_finalized(hash, header); + + futures::future::poll_fn(|cx| { + let _ = network.poll_unpin(cx); + Poll::Ready(()) + }) + .await; +} diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 24d418f7233d7..9d032f5cca96c 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -25,6 +25,7 @@ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" +mockall = "0.11.2" prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 280e530eca9a9..84998c747b3cc 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -30,6 +30,7 @@ pub mod block_request_handler; pub mod blocks; +pub mod mock; mod schema; pub mod state; pub mod state_request_handler; @@ -643,9 +644,9 @@ where .extend(peers); } - fn justification_requests( - &mut self, - ) -> Box)> + '_> { + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); Box::new(std::iter::from_fn(move || { @@ -670,7 +671,9 @@ where })) } - fn block_requests(&mut self) -> Box)> + '_> { + fn block_requests<'a>( + &'a mut self, + ) -> Box)> + 'a> { if self.mode == SyncMode::Warp { return Box::new(std::iter::once(self.warp_target_block_request()).flatten()) } @@ -695,8 +698,8 @@ where let allowed_requests = self.allowed_requests.take(); let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; let gap_sync = &mut self.gap_sync; - let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { - if !peer.state.is_available() || !allowed_requests.contains(id) { + let iter = self.peers.iter_mut().filter_map(move |(&id, peer)| { + if !peer.state.is_available() || !allowed_requests.contains(&id) { return None } @@ -725,7 +728,7 @@ where }; Some((id, ancestry_request::(current))) } else if let Some((range, req)) = peer_block_request( - id, + &id, peer, blocks, attrs, @@ -744,7 +747,7 @@ where ); Some((id, req)) } else if let Some((hash, req)) = - fork_sync_request(id, fork_targets, best_queued, last_finalized, attrs, |hash| { + fork_sync_request(&id, fork_targets, best_queued, last_finalized, attrs, |hash| { if queue.contains(hash) { BlockStatus::Queued } else { @@ -756,7 +759,7 @@ where Some((id, req)) } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { peer_gap_block_request( - id, + &id, peer, &mut sync.blocks, attrs, @@ -2216,7 +2219,7 @@ where } /// Generate block request for downloading of the target block body during warp sync. - fn warp_target_block_request(&mut self) -> Option<(&PeerId, BlockRequest)> { + fn warp_target_block_request(&mut self) -> Option<(PeerId, BlockRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -2234,7 +2237,7 @@ where trace!(target: "sync", "New warp target block request for {}", id); peer.state = PeerSyncState::DownloadingWarpTargetBlock; self.allowed_requests.clear(); - return Some((id, request)) + return Some((*id, request)) } } } @@ -2482,7 +2485,7 @@ fn fork_sync_request( true }); for (hash, r) in targets { - if !r.peers.contains(id) { + if !r.peers.contains(&id) { continue } // Download the fork only if it is behind or not too far ahead our tip of the chain @@ -2740,7 +2743,7 @@ mod test { // we wil send block requests to these peers // for these blocks we don't know about - assert!(sync.block_requests().all(|(p, _)| { *p == peer_id1 || *p == peer_id2 })); + assert!(sync.block_requests().all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); @@ -2835,7 +2838,7 @@ mod test { log::trace!(target: "sync", "Requests: {:?}", requests); assert_eq!(1, requests.len()); - assert_eq!(peer, requests[0].0); + assert_eq!(*peer, requests[0].0); let request = requests[0].1.clone(); @@ -3065,9 +3068,9 @@ mod test { send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); let (peer1_req, peer2_req) = sync.block_requests().fold((None, None), |res, req| { - if req.0 == &peer_id1 { + if req.0 == peer_id1 { (Some(req.1), res.1) - } else if req.0 == &peer_id2 { + } else if req.0 == peer_id2 { (res.0, Some(req.1)) } else { panic!("Unexpected req: {:?}", req) diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs new file mode 100644 index 0000000000000..2a3b059f735b2 --- /dev/null +++ b/client/network/sync/src/mock.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Contains a mock implementation of `ChainSync` that can be used +//! for testing calls made to `ChainSync`. + +use futures::task::Poll; +use libp2p::PeerId; +use sc_consensus::{BlockImportError, BlockImportStatus}; +use sc_network_common::sync::{ + message::{BlockAnnounce, BlockData, BlockRequest, BlockResponse}, + warp::{EncodedProof, WarpProofRequest}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncStatus, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +mockall::mock! { + pub ChainSync {} + + impl ChainSyncT for ChainSync { + fn peer_info(&self, who: &PeerId) -> Option>; + fn status(&self) -> SyncStatus; + fn num_sync_requests(&self) -> usize; + fn num_downloaded_blocks(&self) -> usize; + fn num_peers(&self) -> usize; + fn new_peer( + &mut self, + who: PeerId, + best_hash: Block::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer>; + fn update_chain_info(&mut self, best_hash: &Block::Hash, best_number: NumberFor); + fn request_justification(&mut self, hash: &Block::Hash, number: NumberFor); + fn clear_justification_requests(&mut self); + fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &Block::Hash, + number: NumberFor, + ); + fn justification_requests<'a>( + &'a mut self, + ) -> Box)> + 'a>; + fn block_requests<'a>(&'a mut self) -> Box)> + 'a>; + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)>; + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)>; + fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer>; + fn on_state_data( + &mut self, + who: &PeerId, + response: OpaqueStateResponse, + ) -> Result, BadPeer>; + fn on_warp_sync_data(&mut self, who: &PeerId, response: EncodedProof) -> Result<(), BadPeer>; + fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer>; + fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, Block::Hash)>, + ) -> Box), BadPeer>>>; + fn on_justification_import( + &mut self, + hash: Block::Hash, + number: NumberFor, + success: bool, + ); + fn on_block_finalized(&mut self, hash: &Block::Hash, number: NumberFor); + fn push_block_announce_validation( + &mut self, + who: PeerId, + hash: Block::Hash, + announce: BlockAnnounce, + is_best: bool, + ); + fn poll_block_announce_validation<'a>( + &mut self, + cx: &mut std::task::Context<'a>, + ) -> Poll>; + fn peer_disconnected(&mut self, who: &PeerId) -> Option>; + fn metrics(&self) -> Metrics; + fn create_opaque_block_request(&self, request: &BlockRequest) -> OpaqueBlockRequest; + fn encode_block_request(&self, request: &OpaqueBlockRequest) -> Result, String>; + fn decode_block_response(&self, response: &[u8]) -> Result; + fn block_response_into_blocks( + &self, + request: &BlockRequest, + response: OpaqueBlockResponse, + ) -> Result>, String>; + fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; + fn decode_state_response(&self, response: &[u8]) -> Result; + } +} From 1f39c9029eb83e9432d86877f0694f643b7dd968 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 13 Oct 2022 12:13:56 +0200 Subject: [PATCH 239/348] pallet-mmr: RPC API and Runtime API work with block numbers (#12345) * pallet-mmr: RPC API works with block_numbers * fixes * update rpc * fmt * final touches in the rpc * temporary fix * fix * fmt * docs * Update lib.rs * use NumberFor * validate input * update runtime * convert block_number to u64 * small edit * update runtime api * test fix * runtime fix * update test function * fmt * fix nits * remove block_num_to_leaf_index from runtime api * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Robert Hambrock * fix tests * get the code to compile after merge * get the tests to compile * fix in tests? * fix test * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * Update primitives/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * fix errors & nits * change block_num_to_leaf_index * don't make any assumptions * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/tests.rs Co-authored-by: Adrian Catangiu * fix * small fix * use best_known_block_number * best_known_block_number instead of leaves_count * more readable? * remove warning * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Robert Hambrock * simplify * update docs * nits * fmt & fix * merge fixes * fix * small fix * docs & nit fixes * Nit fixes * remove leaf_indices_to_block_numbers() * fmt Co-authored-by: Robert Hambrock Co-authored-by: Adrian Catangiu --- bin/node/rpc/src/lib.rs | 6 +- bin/node/runtime/src/lib.rs | 20 +++-- client/beefy/src/lib.rs | 4 +- client/beefy/src/tests.rs | 16 ++-- client/beefy/src/worker.rs | 2 +- frame/merkle-mountain-range/rpc/src/lib.rs | 59 +++++++-------- frame/merkle-mountain-range/src/lib.rs | 56 +++++++++++--- frame/merkle-mountain-range/src/tests.rs | 81 +++++++++++---------- primitives/beefy/src/mmr.rs | 6 +- primitives/merkle-mountain-range/src/lib.rs | 23 +++--- 10 files changed, 160 insertions(+), 113 deletions(-) diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 94e01619c6e63..8596fe23321ba 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -108,7 +108,11 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, + C::Api: pallet_mmr_rpc::MmrRuntimeApi< + Block, + ::Hash, + BlockNumber, + >, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 142173621036d..f137b36eff036 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2016,11 +2016,15 @@ impl_runtime_apis! { } } - impl pallet_mmr::primitives::MmrApi for Runtime { - fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) + impl pallet_mmr::primitives::MmrApi< + Block, + mmr::Hash, + BlockNumber, + > for Runtime { + fn generate_proof(block_number: BlockNumber) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { - Mmr::generate_batch_proof(vec![leaf_index]).and_then(|(leaves, proof)| + Mmr::generate_batch_proof(vec![block_number]).and_then(|(leaves, proof)| Ok(( mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), mmr::BatchProof::into_single_leaf_proof(proof)? @@ -2052,9 +2056,9 @@ impl_runtime_apis! { } fn generate_batch_proof( - leaf_indices: Vec, + block_numbers: Vec, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_batch_proof(leaf_indices).map(|(leaves, proof)| { + Mmr::generate_batch_proof(block_numbers).map(|(leaves, proof)| { ( leaves .into_iter() @@ -2066,10 +2070,10 @@ impl_runtime_apis! { } fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: pallet_mmr::primitives::LeafIndex, + block_numbers: Vec, + best_known_block_number: BlockNumber, ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + Mmr::generate_historical_batch_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( leaves diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 1c61cac072207..441f6e4248117 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -24,7 +24,7 @@ use sc_consensus::BlockImport; use sc_network::ProtocolName; use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; -use sp_api::ProvideRuntimeApi; +use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; @@ -200,7 +200,7 @@ where C: Client + BlockBackend, P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, + R::Api: BeefyApi + MmrApi>, N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { let BeefyParams { diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 24cf89acd5734..89be1cac4f886 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -43,9 +43,7 @@ use beefy_primitives::{ KEY_TYPE as BeefyKeyType, }; use sc_network::{config::RequestResponseConfig, ProtocolName}; -use sp_mmr_primitives::{ - BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, -}; +use sp_mmr_primitives::{BatchProof, EncodableOpaqueLeaf, Error as MmrError, MmrApi, Proof}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; @@ -247,8 +245,8 @@ macro_rules! create_test_api { } } - impl MmrApi for RuntimeApi { - fn generate_proof(_leaf_index: LeafIndex) + impl MmrApi> for RuntimeApi { + fn generate_proof(_block_number: u64) -> Result<(EncodableOpaqueLeaf, Proof), MmrError> { unimplemented!() } @@ -270,13 +268,13 @@ macro_rules! create_test_api { Ok($mmr_root) } - fn generate_batch_proof(_leaf_indices: Vec) -> Result<(Vec, BatchProof), MmrError> { + fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, BatchProof), MmrError> { unimplemented!() } fn generate_historical_batch_proof( - _leaf_indices: Vec, - _leaves_count: LeafIndex + _block_numbers: Vec, + _best_known_block_number: u64 ) -> Result<(Vec, BatchProof), MmrError> { unimplemented!() } @@ -349,7 +347,7 @@ fn initialize_beefy( ) -> impl Future where API: ProvideRuntimeApi + Default + Sync + Send, - API::Api: BeefyApi + MmrApi, + API::Api: BeefyApi + MmrApi>, { let tasks = FuturesUnordered::new(); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index a21807c8ee875..4381081f74ebd 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -252,7 +252,7 @@ where C: Client, P: PayloadProvider, R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, + R::Api: BeefyApi + MmrApi>, N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index e939ff8ae7cd0..ffc7ac2da56bf 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -30,10 +30,10 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::ProvideRuntimeApi; +use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_mmr_primitives::{BatchProof, Error as MmrError, LeafIndex, Proof}; +use sp_mmr_primitives::{BatchProof, Error as MmrError, Proof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -96,11 +96,11 @@ impl LeafBatchProof { /// MMR RPC methods. #[rpc(client, server)] -pub trait MmrApi { - /// Generate MMR proof for given leaf index. +pub trait MmrApi { + /// Generate MMR proof for given block number. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for leaf at given `leaf_index`. + /// MMR proof for a block with a specified `block_number`. /// Optionally, a block hash at which the runtime should be queried can be specified. /// /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of @@ -108,49 +108,49 @@ pub trait MmrApi { #[method(name = "mmr_generateProof")] fn generate_proof( &self, - leaf_index: LeafIndex, + block_number: BlockNumber, at: Option, ) -> RpcResult>; - /// Generate MMR proof for the given leaf indices. + /// Generate MMR proof for the given block numbers. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for a set of leaves at the given `leaf_indices`. + /// MMR proof for a set of blocks with the specific `block_numbers`. /// Optionally, a block hash at which the runtime should be queried can be specified. /// /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `leaf_indices` supplied + /// is the same as the order of the entries in `block_numbers` supplied #[method(name = "mmr_generateBatchProof")] fn generate_batch_proof( &self, - leaf_indices: Vec, + block_numbers: Vec, at: Option, ) -> RpcResult>; - /// Generate a MMR proof for the given `leaf_indices` of the MMR that had `leaves_count` leaves. + /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// a MMR proof for the set of leaves at the given `leaf_indices` with MMR fixed to the state - /// with exactly `leaves_count` leaves. `leaves_count` must be larger than all `leaf_indices` - /// for the function to succeed. + /// a MMR proof for the set of blocks that have the given `block_numbers` with MMR given the + /// `best_known_block_number`. `best_known_block_number` must be larger than all the + /// `block_numbers` for the function to succeed. /// /// Optionally, a block hash at which the runtime should be queried can be specified. /// Note that specifying the block hash isn't super-useful here, unless you're generating /// proof using non-finalized blocks where there are several competing forks. That's because - /// MMR state will be fixed to the state with `leaves_count`, which already points to some - /// historical block. + /// MMR state will be fixed to the state with `best_known_block_number`, which already points to + /// some historical block. /// /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `leaf_indices` supplied + /// is the same as the order of the entries in `block_numbers` supplied #[method(name = "mmr_generateHistoricalBatchProof")] fn generate_historical_batch_proof( &self, - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec, + best_known_block_number: BlockNumber, at: Option, ) -> RpcResult>; } @@ -169,16 +169,17 @@ impl Mmr { } #[async_trait] -impl MmrApiServer<::Hash> for Mmr +impl MmrApiServer<::Hash, NumberFor> + for Mmr where Block: BlockT, Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: MmrRuntimeApi, + Client::Api: MmrRuntimeApi>, MmrHash: Codec + Send + Sync + 'static, { fn generate_proof( &self, - leaf_index: LeafIndex, + block_number: NumberFor, at: Option<::Hash>, ) -> RpcResult> { let api = self.client.runtime_api(); @@ -188,7 +189,7 @@ where .generate_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_index, + block_number, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; @@ -198,7 +199,7 @@ where fn generate_batch_proof( &self, - leaf_indices: Vec, + block_numbers: Vec>, at: Option<::Hash>, ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); @@ -210,7 +211,7 @@ where .generate_batch_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_indices, + block_numbers, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; @@ -220,8 +221,8 @@ where fn generate_historical_batch_proof( &self, - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec>, + best_known_block_number: NumberFor, at: Option<::Hash>, ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); @@ -233,8 +234,8 @@ where .generate_historical_batch_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), - leaf_indices, - leaves_count, + block_numbers, + best_known_block_number, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 8b4f2b60bc198..ad3ce340496e8 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -59,7 +59,7 @@ use codec::Encode; use frame_support::weights::Weight; use sp_runtime::{ - traits::{self, One, Saturating}, + traits::{self, CheckedSub, One, Saturating}, SaturatedConversion, }; @@ -318,37 +318,73 @@ impl, I: 'static> Pallet { .saturating_add(leaf_index.saturated_into()) } - /// Generate a MMR proof for the given `leaf_indices`. + /// Convert a `block_num` into a leaf index. + fn block_num_to_leaf_index(block_num: T::BlockNumber) -> Result + where + T: frame_system::Config, + { + // leaf_idx = (leaves_count - 1) - (current_block_num - block_num); + let best_block_num = >::block_number(); + let blocks_diff = best_block_num.checked_sub(&block_num).ok_or_else(|| { + primitives::Error::BlockNumToLeafIndex + .log_debug("The provided block_number is greater than the best block number.") + })?; + let blocks_diff_as_leaf_idx = blocks_diff.try_into().map_err(|_| { + primitives::Error::BlockNumToLeafIndex + .log_debug("The `blocks_diff` couldn't be converted to `LeafIndex`.") + })?; + + let leaf_idx = Self::mmr_leaves() + .checked_sub(1) + .and_then(|last_leaf_idx| last_leaf_idx.checked_sub(blocks_diff_as_leaf_idx)) + .ok_or_else(|| { + primitives::Error::BlockNumToLeafIndex + .log_debug("There aren't enough leaves in the chain.") + })?; + Ok(leaf_idx) + } + + /// Generate a MMR proof for the given `block_numbers`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_batch_proof( - leaf_indices: Vec, + block_numbers: Vec, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - Self::generate_historical_batch_proof(leaf_indices, Self::mmr_leaves()) + Self::generate_historical_batch_proof( + block_numbers, + >::block_number(), + ) } - /// Generate a MMR proof for the given `leaf_indices` for the MMR of `leaves_count` size. + /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. pub fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: LeafIndex, + block_numbers: Vec, + best_known_block_number: T::BlockNumber, ) -> Result< (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - if leaves_count > Self::mmr_leaves() { - return Err(Error::InvalidLeavesCount) - } + let leaves_count = + Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + + // we need to translate the block_numbers into leaf indices. + let leaf_indices = block_numbers + .iter() + .map(|block_num| -> Result { + Self::block_num_to_leaf_index(*block_num) + }) + .collect::, _>>()?; let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); mmr.generate_batch_proof(leaf_indices) diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index bcb775ba02819..a63a433029295 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -235,22 +235,21 @@ fn should_generate_proofs_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for all leaves - let proofs = (0_u64..crate::NumberOfLeaves::::get()) + let best_block_number = frame_system::Pallet::::block_number(); + // when generate proofs for all leaves. + let proofs = (1_u64..=best_block_number) .into_iter() - .map(|leaf_index| { - crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() - }) + .map(|block_num| crate::Pallet::::generate_batch_proof(vec![block_num]).unwrap()) .collect::>(); // when generate historical proofs for all leaves - let historical_proofs = (0_u64..crate::NumberOfLeaves::::get()) + let historical_proofs = (1_u64..best_block_number) .into_iter() - .map(|leaf_index| { + .map(|block_num| { let mut proofs = vec![]; - for leaves_count in leaf_index + 1..=num_blocks { + for leaves_count in block_num..=num_blocks { proofs.push( crate::Pallet::::generate_historical_batch_proof( - vec![leaf_index], + vec![block_num], leaves_count, ) .unwrap(), @@ -321,7 +320,7 @@ fn should_generate_proofs_correctly() { leaf_count: 3, items: vec![hex( "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" - ),], + )], } ) ); @@ -352,6 +351,7 @@ fn should_generate_proofs_correctly() { assert_eq!( proofs[4], ( + // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], BatchProof { leaf_indices: vec![4], @@ -393,7 +393,7 @@ fn should_generate_proofs_correctly() { } ) ); - assert_eq!(historical_proofs[6][0], proofs[6]); + assert_eq!(historical_proofs[5][1], proofs[5]); }); } @@ -410,11 +410,12 @@ fn should_generate_batch_proof_correctly() { register_offchain_ext(&mut ext); ext.execute_with(|| { // when generate proofs for a batch of leaves - let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); + let (.., proof) = crate::Pallet::::generate_batch_proof(vec![1, 5, 6]).unwrap(); // then assert_eq!( proof, BatchProof { + // the leaf indices are equivalent to the above specified block numbers - 1. leaf_indices: vec![0, 4, 5], leaf_count: 7, items: vec![ @@ -427,7 +428,7 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap(); + crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 6).unwrap(); // then assert_eq!( historical_proof, @@ -443,7 +444,7 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 7).unwrap(); + crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 7).unwrap(); // then assert_eq!(historical_proof, proof); }); @@ -500,15 +501,15 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - leaf_indices: &Vec, + block_numbers: &Vec, blocks_to_add: usize, ) { let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_batch_proof(leaf_indices.to_vec()).unwrap() + crate::Pallet::::generate_batch_proof(block_numbers.to_vec()).unwrap() }); let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); - let min_mmr_size = leaf_indices.iter().max().unwrap() + 1; + let min_mmr_size = block_numbers.iter().max().unwrap() + 1; // generate historical proofs for all possible mmr sizes, // lower bound being index of highest leaf to be proven @@ -516,7 +517,7 @@ fn should_verify_batch_proofs() { .map(|mmr_size| { ext.execute_with(|| { crate::Pallet::::generate_historical_batch_proof( - leaf_indices.to_vec(), + block_numbers.to_vec(), mmr_size, ) .unwrap() @@ -546,39 +547,41 @@ fn should_verify_batch_proofs() { // to retrieve full leaf data when generating proofs register_offchain_ext(&mut ext); - // verify that up to n=10, valid proofs are generated for all possible leaf combinations - for n in 0..10 { + // verify that up to n=10, valid proofs are generated for all possible block number + // combinations. + for n in 1..=10 { ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n - let leaves_set: Vec> = (0..=n).into_iter().powerset().skip(1).collect(); + // generate powerset (skipping empty set) of all possible block number combinations for mmr + // size n. + let blocks_set: Vec> = (1..=n).into_iter().powerset().skip(1).collect(); - leaves_set.iter().for_each(|leaves_subset| { - generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + blocks_set.iter().for_each(|blocks_subset| { + generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); ext.persist_offchain_overlay(); }); } - // verify that up to n=15, valid proofs are generated for all possible 2-leaf combinations - for n in 10..15 { - // (MMR Leafs) + // verify that up to n=15, valid proofs are generated for all possible 2-block number + // combinations. + for n in 11..=15 { ext.execute_with(|| new_block()); ext.persist_offchain_overlay(); - // generate all possible 2-leaf combinations for mmr size n - let leaves_set: Vec> = (0..=n).into_iter().combinations(2).collect(); + // generate all possible 2-block number combinations for mmr size n. + let blocks_set: Vec> = (1..=n).into_iter().combinations(2).collect(); - leaves_set.iter().for_each(|leaves_subset| { - generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); + blocks_set.iter().for_each(|blocks_subset| { + generate_and_verify_batch_proof(&mut ext, &blocks_subset, 0); ext.persist_offchain_overlay(); }); } - generate_and_verify_batch_proof(&mut ext, &vec![7, 11], 20); + generate_and_verify_batch_proof(&mut ext, &vec![8, 12], 20); ext.execute_with(|| add_blocks(1000)); ext.persist_offchain_overlay(); - generate_and_verify_batch_proof(&mut ext, &vec![7, 11, 100, 800], 100); + generate_and_verify_batch_proof(&mut ext, &vec![8, 12, 100, 800], 100); } #[test] @@ -650,11 +653,11 @@ fn should_verify_batch_proof_statelessly() { register_offchain_ext(&mut ext); let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() + crate::Pallet::::generate_batch_proof(vec![1, 4, 5]).unwrap() }); let (historical_leaves, historical_proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap() + crate::Pallet::::generate_historical_batch_proof(vec![1, 4, 5], 6).unwrap() }); // Verify proof without relying on any on-chain data. @@ -920,7 +923,7 @@ fn should_verify_canonicalized() { // Generate proofs for some blocks. let (leaves, proofs) = - ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![0, 4, 5, 7]).unwrap()); + ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![1, 4, 5, 7]).unwrap()); // Verify all previously generated proofs. ext.execute_with(|| { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); @@ -953,19 +956,19 @@ fn does_not_panic_when_generating_historical_proofs() { // when leaf index is invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![10], 7), - Err(Error::LeafNotFound), + Err(Error::BlockNumToLeafIndex), ); // when leaves count is invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![3], 100), - Err(Error::InvalidLeavesCount), + Err(Error::BlockNumToLeafIndex), ); // when both leaf index and leaves count are invalid assert_eq!( crate::Pallet::::generate_historical_batch_proof(vec![10], 100), - Err(Error::InvalidLeavesCount), + Err(Error::BlockNumToLeafIndex), ); }); } diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index b479d979f13f3..0edb8babd608e 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -142,7 +142,7 @@ pub use mmr_root_provider::MmrRootProvider; mod mmr_root_provider { use super::*; use crate::{known_payloads, payload::PayloadProvider, Payload}; - use sp_api::ProvideRuntimeApi; + use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_mmr_primitives::MmrApi; use sp_runtime::generic::BlockId; use sp_std::{marker::PhantomData, sync::Arc}; @@ -159,7 +159,7 @@ mod mmr_root_provider { where B: Block, R: ProvideRuntimeApi, - R::Api: MmrApi, + R::Api: MmrApi>, { /// Create new BEEFY Payload provider with MMR Root as payload. pub fn new(runtime: Arc) -> Self { @@ -182,7 +182,7 @@ mod mmr_root_provider { where B: Block, R: ProvideRuntimeApi, - R::Api: MmrApi, + R::Api: MmrApi>, { fn payload(&self, header: &B::Header) -> Option { self.mmr_root_from_digest_or_runtime(header).map(|mmr_root| { diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 7a26cae839ea9..06bc1f4bffe84 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -387,6 +387,8 @@ impl Proof { /// Merkle Mountain Range operation error. #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { + /// Error during translation of a block number into a leaf index. + BlockNumToLeafIndex, /// Error while pushing new node. Push, /// Error getting the new root. @@ -403,8 +405,8 @@ pub enum Error { PalletNotIncluded, /// Cannot find the requested leaf index InvalidLeafIndex, - /// The provided leaves count is larger than the actual leaves count. - InvalidLeavesCount, + /// The provided best know block number is invalid. + InvalidBestKnownBlock, } impl Error { @@ -434,9 +436,9 @@ impl Error { sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. - pub trait MmrApi { - /// Generate MMR proof for a leaf under given index. - fn generate_proof(leaf_index: LeafIndex) -> Result<(EncodableOpaqueLeaf, Proof), Error>; + pub trait MmrApi { + /// Generate MMR proof for a block with a specified `block_number`. + fn generate_proof(block_number: BlockNumber) -> Result<(EncodableOpaqueLeaf, Proof), Error>; /// Verify MMR proof against on-chain MMR. /// @@ -457,14 +459,13 @@ sp_api::decl_runtime_apis! { /// Return the on-chain MMR root hash. fn mmr_root() -> Result; - /// Generate MMR proof for a series of leaves under given indices. - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, BatchProof), Error>; + /// Generate MMR proof for a series of blocks with the specified block numbers. + fn generate_batch_proof(block_numbers: Vec) -> Result<(Vec, BatchProof), Error>; - /// Generate MMR proof for a series of leaves under given indices, using MMR at given `leaves_count` size. + /// Generate MMR proof for a series of `block_numbers`, given the `best_known_block_number`. fn generate_historical_batch_proof( - leaf_indices: Vec, - leaves_count: LeafIndex + block_numbers: Vec, + best_known_block_number: BlockNumber ) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. From fd00b14932d37bee06371325269c163bf80d1d16 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 13 Oct 2022 19:31:00 +0900 Subject: [PATCH 240/348] Enable the `wasmtime`-based WASM executor by default (#12486) --- client/cli/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 37a8fd2e0b64d..66742e14789ef 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -51,6 +51,6 @@ sp-version = { version = "5.0.0", path = "../../primitives/version" } tempfile = "3.1.0" [features] -default = ["rocksdb"] +default = ["rocksdb", "wasmtime"] rocksdb = ["sc-client-db/rocksdb"] wasmtime = ["sc-service/wasmtime"] From 6e04e48f36e1839532d566a4dcad3c57b4be939d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:22:57 +0200 Subject: [PATCH 241/348] Trivial BlockId::Number => Hash (#12490) --- primitives/api/test/benches/bench.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 2682c91f94106..2445a5c07f09e 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -27,14 +27,14 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) }); c.bench_function("add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) }); @@ -42,7 +42,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with same runtime api", |b| { let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); let data = vec![0; 1000]; b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) @@ -50,7 +50,7 @@ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("vector add one with recreating runtime api", |b| { let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); let data = vec![0; 1000]; b.iter_with_large_drop(|| client.runtime_api().benchmark_vector_add_one(&block_id, &data)) @@ -60,7 +60,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) }); @@ -68,7 +68,7 @@ fn sp_api_benchmark(c: &mut Criterion) { let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm) .build(); - let block_id = BlockId::Number(client.chain_info().best_number); + let block_id = BlockId::Hash(client.chain_info().best_hash); b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) }); } From f3139874cb50f9028ecba9bdbd3004e7f3f228f5 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 14 Oct 2022 11:27:32 +0200 Subject: [PATCH 242/348] BlockId removal: refactor: Backend::state_at (#12488) * Minor naming improved * BlockId removal refactor: Backend::state_at * formatting --- bin/node/bench/src/import.rs | 9 +++- client/api/src/backend.rs | 4 +- client/api/src/in_mem.rs | 21 ++++---- .../basic-authorship/src/basic_authorship.rs | 2 +- client/block-builder/src/lib.rs | 5 +- client/db/benches/state_access.rs | 10 ++-- client/db/src/lib.rs | 48 ++++++------------ client/service/src/client/call_executor.rs | 21 ++++---- client/service/src/client/client.rs | 49 ++++++++++++------- client/service/test/src/client/mod.rs | 26 ++++++---- primitives/blockchain/src/backend.rs | 4 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 6 +-- 12 files changed, 105 insertions(+), 100 deletions(-) diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 47f630eb68700..26f9391800ceb 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -34,7 +34,7 @@ use std::borrow::Cow; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; -use sc_client_api::backend::Backend; +use sc_client_api::{backend::Backend, HeaderBackend}; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; @@ -127,10 +127,15 @@ impl core::Benchmark for ImportBenchmark { context.import_block(self.block.clone()); let elapsed = start.elapsed(); + let hash = context + .client + .expect_block_hash_from_id(&BlockId::number(1)) + .expect("Block 1 was imported; qed"); + // Sanity checks. context .client - .state_at(&BlockId::number(1)) + .state_at(&hash) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index bcc7a9bff3b2d..0e94d28b75dd5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -505,11 +505,11 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns true if state for given block is available. fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { - self.state_at(BlockId::Hash(*hash)).is_ok() + self.state_at(hash).is_ok() } /// Returns state backend with post-state of given block. - fn state_at(&self, block: BlockId) -> sp_blockchain::Result; + fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result; /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 9000f62aa6cc3..9cea1883bdcdc 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -686,7 +686,7 @@ where type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; + let old_state = self.state_at(&Default::default())?; Ok(BlockImportOperation { pending_block: None, old_state, @@ -702,7 +702,8 @@ where operation: &mut Self::BlockImportOperation, block: BlockId, ) -> sp_blockchain::Result<()> { - operation.old_state = self.state_at(block)?; + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + operation.old_state = self.state_at(&hash)?; Ok(()) } @@ -768,16 +769,16 @@ where None } - fn state_at(&self, block: BlockId) -> sp_blockchain::Result { - match block { - BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), - _ => {}, + fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + if *hash == Default::default() { + return Ok(Self::State::default()) } - self.blockchain - .id(block) - .and_then(|id| self.states.read().get(&id).cloned()) - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) + self.states + .read() + .get(hash) + .cloned() + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) } fn revert( diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index f5ccd9023a3db..da98ccab9cb07 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -736,7 +736,7 @@ mod tests { let api = client.runtime_api(); api.execute_block(&block_id, proposal.block).unwrap(); - let state = backend.state_at(block_id).unwrap(); + let state = backend.state_at(&genesis_hash).unwrap(); let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 803e9c1e8bf26..cd5e62e264200 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -258,12 +258,11 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(self.block_id)?; - let parent_hash = self.parent_hash; + let state = self.backend.state_at(&self.parent_hash)?; let storage_changes = self .api - .into_storage_changes(&state, parent_hash) + .into_storage_changes(&state, self.parent_hash) .map_err(sp_blockchain::Error::StorageChanges)?; Ok(BuiltBlock { diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 4f4c10bcc8f53..912a9b028f638 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -84,7 +84,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(BlockId::Hash(parent_hash)).unwrap().storage_root( + let (state_root, tx) = db.state_at(&parent_hash).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); @@ -176,7 +176,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().cycle().take(keys.len() * multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -214,7 +214,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -252,7 +252,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); @@ -290,7 +290,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(BlockId::Hash(block_hash)).expect("Creates state"), + || backend.state_at(&block_hash).expect("Creates state"), |state| { let _ = state .storage_hash(sp_core::storage::well_known_keys::CODE) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 32c4c9ef85ed9..ae777ad154f6d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1963,10 +1963,11 @@ impl sc_client_api::backend::Backend for Backend { operation: &mut Self::BlockImportOperation, block: BlockId, ) -> ClientResult<()> { + let hash = self.blockchain.expect_block_hash_from_id(&block)?; if block.is_pre_genesis() { operation.old_state = self.empty_state()?; } else { - operation.old_state = self.state_at(block)?; + operation.old_state = self.state_at(&hash)?; } operation.commit_state = true; @@ -2302,15 +2303,8 @@ impl sc_client_api::backend::Backend for Backend { &self.blockchain } - fn state_at(&self, block: BlockId) -> ClientResult { - use sc_client_api::blockchain::HeaderBackend as BcHeaderBackend; - - let is_genesis = match &block { - BlockId::Number(n) if n.is_zero() => true, - BlockId::Hash(h) if h == &self.blockchain.meta.read().genesis_hash => true, - _ => false, - }; - if is_genesis { + fn state_at(&self, hash: &Block::Hash) -> ClientResult { + if hash == &self.blockchain.meta.read().genesis_hash { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root; let db_state = DbStateBuilder::::new(genesis_state.clone(), root) @@ -2322,14 +2316,7 @@ impl sc_client_api::backend::Backend for Backend { } } - let hash = match block { - BlockId::Hash(h) => h, - BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", n)) - })?, - }; - - match self.blockchain.header_metadata(hash) { + match self.blockchain.header_metadata(*hash) { Ok(ref hdr) => { let hint = || { sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) @@ -2337,7 +2324,7 @@ impl sc_client_api::backend::Backend for Backend { .is_some() }; if let Ok(()) = - self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) + self.storage.state_db.pin(hash, hdr.number.saturated_into::(), hint) { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) @@ -2345,12 +2332,12 @@ impl sc_client_api::backend::Backend for Backend { self.shared_trie_cache.as_ref().map(|c| c.local_cache()), ) .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); - Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) + let state = RefTrackingState::new(db_state, self.storage.clone(), Some(*hash)); + Ok(RecordStatsState::new(state, Some(*hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", - block + hash ))) } }, @@ -2588,7 +2575,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(BlockId::Number(0)).unwrap(); + let state = db.state_at(&hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2623,7 +2610,8 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(BlockId::Number(1)).unwrap(); + let hash = db.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); + let state = db.state_at(&hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -3139,11 +3127,7 @@ pub(crate) mod tests { hash }; - let block0_hash = backend - .state_at(BlockId::Hash(hash0)) - .unwrap() - .storage_hash(&b"test"[..]) - .unwrap(); + let block0_hash = backend.state_at(&hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); let hash1 = { let mut op = backend.begin_operation().unwrap(); @@ -3182,11 +3166,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend - .state_at(BlockId::Hash(hash1)) - .unwrap() - .storage_hash(&b"test"[..]) - .unwrap(); + let block1_hash = backend.state_at(&hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); assert_ne!(block0_hash, block1_hash); } diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index e39436ec641d7..8ab332a24be78 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -147,17 +147,14 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - let mut sm = StateMachine::new( &state, &mut changes, @@ -195,14 +192,11 @@ where { let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let changes = &mut *changes.borrow_mut(); - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - // It is important to extract the runtime code here before we create the proof // recorder to not record it. We also need to fetch the runtime code from `state` to // make sure we use the caching layers. @@ -255,7 +249,9 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let state = self.backend.state_at(*id)?; + + let at_hash = self.backend.blockchain().expect_block_hash_from_id(id)?; + let state = self.backend.state_at(&at_hash)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -272,7 +268,8 @@ where method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let state = self.backend.state_at(*at)?; + let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + let state = self.backend.state_at(&at_hash)?; let trie_backend = state.as_trie_backend(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 27561046c3481..e2fd5cda1d2f0 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -414,8 +414,8 @@ where } /// Get a reference to the state at a given block. - pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { - self.backend.state_at(*block) + pub fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + self.backend.state_at(hash) } /// Get the code at a given block. @@ -813,7 +813,7 @@ where Block::new(import_block.header.clone(), body.clone()), )?; - let state = self.backend.state_at(at)?; + let state = self.backend.state_at(parent_hash)?; let gen_storage_changes = runtime_api .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; @@ -1154,7 +1154,9 @@ where id: &BlockId, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id).and_then(|state| prove_read(state, keys).map_err(Into::into)) + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash) + .and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( @@ -1163,7 +1165,8 @@ where child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id) + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash) .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } @@ -1182,7 +1185,8 @@ where start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; // this is a read proof, using version V0 or V1 is equivalent. let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; @@ -1204,7 +1208,8 @@ where if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { @@ -1400,7 +1405,8 @@ where id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let keys = self.state_at(&hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } @@ -1409,7 +1415,8 @@ where id: &BlockId, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let keys = state .keys(&key_prefix.0) .into_iter() @@ -1427,7 +1434,8 @@ where prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } @@ -1439,7 +1447,8 @@ where prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + let state = self.state_at(&hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) } @@ -1449,8 +1458,9 @@ where id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(id)? + .state_at(&hash)? .storage(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1461,7 +1471,8 @@ where id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(id)? + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash)? .storage_hash(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } @@ -1472,8 +1483,9 @@ where child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; let keys = self - .state_at(id)? + .state_at(&hash)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1487,8 +1499,9 @@ where child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(id)? + .state_at(&hash)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1500,7 +1513,8 @@ where child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { - self.state_at(id)? + let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.state_at(&hash)? .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } @@ -1681,7 +1695,8 @@ where } fn state_at(&self, at: &BlockId) -> Result { - self.state_at(at).map_err(Into::into) + let hash = self.backend.blockchain().expect_block_hash_from_id(at)?; + self.state_at(&hash).map_err(Into::into) } } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index e0f47110d9046..c6ac1fc7d73d9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -20,7 +20,7 @@ use futures::executor::block_on; use parity_scale_codec::{Decode, Encode, Joiner}; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{ - in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, StorageProvider, + in_mem, BlockBackend, BlockchainEvents, FinalityNotifications, HeaderBackend, StorageProvider, }; use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sc_consensus::{ @@ -338,11 +338,15 @@ fn block_builder_works_with_transactions() { let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); + let hash0 = client + .expect_block_hash_from_id(&BlockId::Number(0)) + .expect("block 0 was just imported. qed"); + let hash1 = client + .expect_block_hash_from_id(&BlockId::Number(1)) + .expect("block 1 was just imported. qed"); + assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); + assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); assert_eq!( client .runtime_api() @@ -392,11 +396,15 @@ fn block_builder_does_not_include_invalid() { let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); + let hash0 = client + .expect_block_hash_from_id(&BlockId::Number(0)) + .expect("block 0 was just imported. qed"); + let hash1 = client + .expect_block_hash_from_id(&BlockId::Number(1)) + .expect("block 1 was just imported. qed"); + assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); + assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index f80c6d0269116..79c05aec8adca 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -78,8 +78,8 @@ pub trait HeaderBackend: Send + Sync { /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is /// not found. fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id).and_then(|n| { - n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + self.block_hash_from_id(id).and_then(|h| { + h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) }) } } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index f9a57206ece4d..c3d3ec816f97e 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -24,7 +24,7 @@ use jsonrpsee::{ }; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::sync::Arc; use sp_core::{ @@ -144,8 +144,8 @@ where fn call(&self, at: Option<::Hash>) -> RpcResult { self.deny_unsafe.check_if_safe()?; - let block_id = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); - let state = self.backend.state_at(block_id).map_err(error_into_rpc_err)?; + let hash = at.unwrap_or_else(|| self.client.info().best_hash); + let state = self.backend.state_at(&hash).map_err(error_into_rpc_err)?; let (top, child) = migration_status(&state).map_err(error_into_rpc_err)?; Ok(MigrationStatusResult { From 0ee03277c33b6334ddba7434e014fa637dcb6107 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 14 Oct 2022 13:26:13 +0200 Subject: [PATCH 243/348] Try-runtime CLI fix weight parsing (#12491) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + utils/frame/try-runtime/cli/Cargo.toml | 1 + .../try-runtime/cli/src/commands/follow_chain.rs | 5 +++-- .../cli/src/commands/on_runtime_upgrade.rs | 14 ++++++++------ 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 062195c70f8ab..032886b9945e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11356,6 +11356,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-version", + "sp-weights", "tokio", "zstd", ] diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 56ead30644d86..956eaff745335 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -31,6 +31,7 @@ sp-keystore = { version = "0.12.0", path = "../../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } +sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime" } [dev-dependencies] diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 88866b538169b..fb5345827858a 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -33,6 +33,7 @@ use sc_service::Configuration; use serde::de::DeserializeOwned; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_weights::Weight; use std::{collections::VecDeque, fmt::Debug, str::FromStr}; const SUB: &str = "chain_subscribeFinalizedHeads"; @@ -294,8 +295,8 @@ where full_extensions(), )?; - let consumed_weight = ::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output: {:?}", e))?; + let consumed_weight = ::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode weight: {:?}", e))?; let storage_changes = changes .drain_storage_changes( diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 5055e4fb34581..1d7d876a4aa92 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -21,6 +21,7 @@ use parity_scale_codec::Decode; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_weights::Weight; use crate::{ build_executor, ensure_matching_spec, extract_code, local_spec, state_machine_call_with_proof, @@ -78,14 +79,15 @@ where Default::default(), // we don't really need any extensions here. )?; - let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) - .map_err(|e| format!("failed to decode output: {:?}", e))?; + let (weight, total_weight) = <(Weight, Weight) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode weight: {:?}", e))?; log::info!( target: LOG_TARGET, - "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = {}, total weight = {} ({})", - weight, - total_weight, - weight as f64 / total_weight.max(1) as f64 + "TryRuntime_on_runtime_upgrade executed without errors. Consumed weight = ({} ps, {} byte), total weight = ({} ps, {} byte) ({:.2} %, {:.2} %).", + weight.ref_time(), weight.proof_size(), + total_weight.ref_time(), total_weight.proof_size(), + (weight.ref_time() as f64 / total_weight.ref_time().max(1) as f64) * 100.0, + (weight.proof_size() as f64 / total_weight.proof_size().max(1) as f64) * 100.0, ); Ok(()) From 6f453b5c2b6d7479189da06fd03e403b95503bdd Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 17 Oct 2022 14:27:24 +0800 Subject: [PATCH 244/348] Export fn code_hash (#12479) Co-authored-by: parity-processbot <> --- frame/contracts/src/lib.rs | 5 +++++ frame/contracts/src/storage.rs | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 794b172cc6282..d48a71b85e9fe 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -1031,6 +1031,11 @@ where T::AddressGenerator::generate_address(deploying_address, code_hash, salt) } + /// Returns the code hash of the contract specified by `account` ID. + pub fn code_hash(account: &AccountIdOf) -> Option> { + Storage::::code_hash(account) + } + /// Store code for benchmarks which does not check nor instrument the code. #[cfg(feature = "runtime-benchmarks")] fn store_code_raw( diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index cf10c3225c920..c7644e696196f 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -328,7 +328,6 @@ where } /// Returns the code hash of the contract specified by `account` ID. - #[cfg(test)] pub fn code_hash(account: &AccountIdOf) -> Option> { >::get(account).map(|i| i.code_hash) } From 30a7a5b4b71f23971742684889f9fdec5f5854a9 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 17 Oct 2022 10:25:25 +0300 Subject: [PATCH 245/348] Introduce `ChainSyncInterface` (#12489) * Introduce `ChainSyncInterface` `ChainSyncInterface` provides an asynchronous interface for other subsystems to submit calls to `ChainSync`. This allows `NetworkService` to delegate calls to `ChainSync` while still providing the same API for other subsystems (for now). This makes it possible to move the syncing code in piecemeal fashion out of `protocol.rs` as the calls are just forwarded to `ChainSync`. * Apply review comments * Fix tests --- Cargo.lock | 2 + client/network/common/src/sync.rs | 10 ++++ client/network/src/config.rs | 4 ++ client/network/src/lib.rs | 12 ++++ client/network/src/protocol.rs | 19 ++---- client/network/src/service.rs | 13 ++-- client/network/src/service/chainsync_tests.rs | 56 ++++++++++++++---- client/network/src/service/tests.rs | 3 +- client/network/sync/Cargo.toml | 2 + client/network/sync/src/lib.rs | 45 ++++++++++---- client/network/sync/src/mock.rs | 4 ++ client/network/sync/src/service/chain_sync.rs | 58 ++++++++++++++++++ client/network/sync/src/service/mock.rs | 31 ++++++++++ client/network/sync/src/service/mod.rs | 22 +++++++ client/network/sync/src/tests.rs | 59 +++++++++++++++++++ client/network/test/src/lib.rs | 3 +- client/service/src/builder.rs | 3 +- 17 files changed, 301 insertions(+), 45 deletions(-) create mode 100644 client/network/sync/src/service/chain_sync.rs create mode 100644 client/network/sync/src/service/mock.rs create mode 100644 client/network/sync/src/service/mod.rs create mode 100644 client/network/sync/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 032886b9945e2..7f49ff75561db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8638,6 +8638,7 @@ name = "sc-network-sync" version = "0.10.0-dev" dependencies = [ "array-bytes", + "async-std", "fork-tree", "futures", "libp2p", @@ -8653,6 +8654,7 @@ dependencies = [ "sc-consensus", "sc-network-common", "sc-peerset", + "sc-utils", "smallvec", "sp-arithmetic", "sp-blockchain", diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index d3603c6792c84..aa761e66ebf68 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -395,4 +395,14 @@ pub trait ChainSync: Send { /// Decode implementation-specific state response. fn decode_state_response(&self, response: &[u8]) -> Result; + + /// Advance the state of `ChainSync` + /// + /// Internally calls [`ChainSync::poll_block_announce_validation()`] and + /// this function should be polled until it returns [`Poll::Pending`] to + /// consume all pending events. + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll>; } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index db3e8f0b98a33..dcd7c7d8ad6de 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -33,6 +33,7 @@ pub use sc_network_common::{ pub use libp2p::{build_multiaddr, core::PublicKey, identity}; +use crate::ChainSyncInterface; use core::{fmt, iter}; use libp2p::{ identity::{ed25519, Keypair}, @@ -91,6 +92,9 @@ where /// Instance of chain sync implementation. pub chain_sync: Box>, + /// Interface that can be used to delegate syncing-related function calls to `ChainSync` + pub chain_sync_service: Box>, + /// Registry for recording prometheus metrics to. pub metrics_registry: Option, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 27f2a938154fe..f3faa44ee6dbd 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -279,6 +279,7 @@ pub use service::{ DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationSenderReady, OutboundFailure, PublicKey, }; +use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sc_peerset::ReputationChange; @@ -293,3 +294,14 @@ const MAX_CONNECTIONS_PER_PEER: usize = 2; /// The maximum number of concurrent established connections that were incoming. const MAX_CONNECTIONS_ESTABLISHED_INCOMING: u32 = 10_000; + +/// Abstraction over syncing-related services +pub trait ChainSyncInterface: + NetworkSyncForkRequest> + Send + Sync +{ +} + +impl ChainSyncInterface for T where + T: NetworkSyncForkRequest> + Send + Sync +{ +} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c3def8adc6cfe..a04dba79f7fd2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -947,18 +947,6 @@ where self.chain_sync.clear_justification_requests(); } - /// Request syncing for the given block from given set of peers. - /// Uses `protocol` to queue a new block download request and tries to dispatch all pending - /// requests. - pub fn set_sync_fork_request( - &mut self, - peers: Vec, - hash: &B::Hash, - number: NumberFor, - ) { - self.chain_sync.set_sync_fork_request(peers, hash, number) - } - /// A batch of blocks have been processed, with or without errors. /// Call this when a batch of blocks have been processed by the importqueue, with or without /// errors. @@ -1461,8 +1449,11 @@ where self.pending_messages.push_back(event); } - // Check if there is any block announcement validation finished. - while let Poll::Ready(result) = self.chain_sync.poll_block_announce_validation(cx) { + // Advance the state of `ChainSync` + // + // Process any received requests received from `NetworkService` and + // check if there is any block announcement validation finished. + while let Poll::Ready(result) = self.chain_sync.poll(cx) { match self.process_block_announce_validation_result(result) { CustomMessageOutcome::None => {}, outcome => self.pending_messages.push_back(outcome), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 25916041285a3..d1f428cc292c3 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -35,7 +35,7 @@ use crate::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready}, - transport, ReputationChange, + transport, ChainSyncInterface, ReputationChange, }; use futures::{channel::oneshot, prelude::*}; @@ -121,6 +121,8 @@ pub struct NetworkService { peerset: PeersetHandle, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender>, + /// Interface that can be used to delegate calls to `ChainSync` + chain_sync_service: Box>, /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Updated by the [`NetworkWorker`]. peers_notifications_sinks: Arc>>, @@ -433,6 +435,7 @@ where local_peer_id, local_identity, to_worker, + chain_sync_service: params.chain_sync_service, peers_notifications_sinks: peers_notifications_sinks.clone(), notifications_sizes_metric: metrics .as_ref() @@ -814,7 +817,7 @@ where /// a stale fork missing. /// Passing empty `peers` set effectively removes the sync request. fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + self.chain_sync_service.set_sync_fork_request(peers, hash, number); } } @@ -1219,7 +1222,6 @@ enum ServiceToWorkerMsg { RemoveSetReserved(ProtocolName, PeerId), AddToPeersSet(ProtocolName, PeerId), RemoveFromPeersSet(ProtocolName, PeerId), - SyncFork(Vec, B::Hash, NumberFor), EventStream(out_events::Sender), Request { target: PeerId, @@ -1380,11 +1382,6 @@ where .behaviour_mut() .user_protocol_mut() .remove_from_peers_set(protocol, peer_id), - ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), ServiceToWorkerMsg::Request { target, diff --git a/client/network/src/service/chainsync_tests.rs b/client/network/src/service/chainsync_tests.rs index ca44c65d267f4..27c0588a67200 100644 --- a/client/network/src/service/chainsync_tests.rs +++ b/client/network/src/service/chainsync_tests.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, NetworkWorker}; +use crate::{config, ChainSyncInterface, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; @@ -35,7 +35,7 @@ use sc_network_common::{ use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, mock::MockChainSync, - state_request_handler::StateRequestHandler, + service::mock::MockChainSyncInterface, state_request_handler::StateRequestHandler, }; use sp_core::H256; use sp_runtime::{ @@ -56,6 +56,7 @@ const PROTOCOL_NAME: &str = "/foo"; fn make_network( chain_sync: Box>, + chain_sync_service: Box>, client: Arc, ) -> (TestNetworkWorker, Arc) { let network_config = config::NetworkConfiguration { @@ -174,6 +175,7 @@ fn make_network( fork_id, import_queue, chain_sync, + chain_sync_service, metrics_registry: None, block_request_protocol_config, state_request_protocol_config, @@ -193,7 +195,7 @@ fn set_default_expecations_no_peers( chain_sync.expect_state_request().returning(|| None); chain_sync.expect_justification_requests().returning(|| Box::new(iter::empty())); chain_sync.expect_warp_sync_request().returning(|| None); - chain_sync.expect_poll_block_announce_validation().returning(|_| Poll::Pending); + chain_sync.expect_poll().returning(|_| Poll::Pending); chain_sync.expect_status().returning(|| SyncStatus { state: SyncState::Idle, best_seen_block: None, @@ -207,11 +209,18 @@ fn set_default_expecations_no_peers( #[async_std::test] async fn normal_network_poll_no_peers() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + + // build `ChainSync` and set default expectations for it let mut chain_sync = Box::new(MockChainSync::::new()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, client); + // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be + // called) + let chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + let (mut network, _) = make_network(chain_sync, chain_sync_service, client); // poll the network once futures::future::poll_fn(|cx| { @@ -224,6 +233,13 @@ async fn normal_network_poll_no_peers() { #[async_std::test] async fn request_justification() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + + // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be + // called) + let chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + // build `ChainSync` and verify that call to `request_justification()` is made let mut chain_sync = Box::new(MockChainSync::::new()); @@ -237,7 +253,7 @@ async fn request_justification() { .returning(|_, _| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, client); + let (mut network, _) = make_network(chain_sync, chain_sync_service, client); // send "request justifiction" message and poll the network network.service().request_justification(&hash, number); @@ -252,13 +268,20 @@ async fn request_justification() { #[async_std::test] async fn clear_justification_requests(&mut self) { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + + // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be + // called) + let chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + // build `ChainSync` and verify that call to `clear_justification_requests()` is made let mut chain_sync = Box::new(MockChainSync::::new()); chain_sync.expect_clear_justification_requests().once().returning(|| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, client); + let (mut network, _) = make_network(chain_sync, chain_sync_service, client); // send "request justifiction" message and poll the network network.service().clear_justification_requests(); @@ -273,15 +296,23 @@ async fn clear_justification_requests(&mut self) { #[async_std::test] async fn set_sync_fork_request() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + + // build `ChainSync` and set default expectations for it let mut chain_sync = Box::new(MockChainSync::::new()); + set_default_expecations_no_peers(&mut chain_sync); + + // build `ChainSyncInterface` provider and verify that the `set_sync_fork_request()` + // call is delegated to `ChainSyncInterface` (which eventually forwards it to `ChainSync`) + let mut chain_sync_service = + MockChainSyncInterface::::new(); let hash = H256::random(); let number = 1337u64; let peers = (0..3).map(|_| PeerId::random()).collect::>(); let copy_peers = peers.clone(); - chain_sync + chain_sync_service .expect_set_sync_fork_request() .withf(move |in_peers, in_hash, in_number| { &peers == in_peers && &hash == in_hash && &number == in_number @@ -289,8 +320,7 @@ async fn set_sync_fork_request() { .once() .returning(|_, _, _| ()); - set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, client); + let (mut network, _) = make_network(chain_sync, Box::new(chain_sync_service), client); // send "set sync fork request" message and poll the network network.service().set_sync_fork_request(copy_peers, hash, number); @@ -305,6 +335,12 @@ async fn set_sync_fork_request() { #[async_std::test] async fn on_block_finalized() { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be + // called) + let chain_sync_service = + Box::new(MockChainSyncInterface::::new()); + + // build `ChainSync` and verify that call to `on_block_finalized()` is made let mut chain_sync = Box::new(MockChainSync::::new()); @@ -326,7 +362,7 @@ async fn on_block_finalized() { .returning(|_, _| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, client); + let (mut network, _) = make_network(chain_sync, chain_sync_service, client); // send "set sync fork request" message and poll the network network.on_block_finalized(hash, header); diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index b656d7a7c0ddc..4a0ef4611da6e 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -124,7 +124,7 @@ fn build_test_full_node( protocol_config }; - let chain_sync = ChainSync::new( + let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, config::SyncMode::Fast { skip_proofs, storage_chain_mode } => @@ -172,6 +172,7 @@ fn build_test_full_node( fork_id, import_queue, chain_sync: Box::new(chain_sync), + chain_sync_service, metrics_registry: None, block_request_protocol_config, state_request_protocol_config, diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 9d032f5cca96c..323b70e9c22a4 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -34,6 +34,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-arithmetic = { version = "5.0.0", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -42,6 +43,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/final sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } [dev-dependencies] +async-std = { version = "1.11.0", features = ["attributes"] } quickcheck = { version = "1.0.3", default-features = false } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 84998c747b3cc..64d9d4d668eb3 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -32,14 +32,18 @@ pub mod block_request_handler; pub mod blocks; pub mod mock; mod schema; +pub mod service; pub mod state; pub mod state_request_handler; +#[cfg(test)] +mod tests; pub mod warp; pub mod warp_request_handler; use crate::{ blocks::BlockCollection, schema::v1::{StateRequest, StateResponse}, + service::chain_sync::{ChainSyncInterfaceHandle, ToServiceCommand}, state::StateSync, warp::{WarpProofImportResult, WarpSync}, }; @@ -67,6 +71,7 @@ use sc_network_common::{ PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }, }; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_consensus::{ @@ -264,6 +269,8 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, + /// Channel for receiving service commands + service_rx: TracingUnboundedReceiver>, } /// All the data we have about a Peer that we are trying to sync with @@ -1725,6 +1732,21 @@ where Ok(OpaqueStateResponse(Box::new(response))) } + + fn poll( + &mut self, + cx: &mut std::task::Context, + ) -> Poll> { + while let Poll::Ready(Some(event)) = self.service_rx.poll_next_unpin(cx) { + match event { + ToServiceCommand::SetSyncForkRequest(peers, hash, number) => { + self.set_sync_fork_request(peers, &hash, number); + }, + } + } + + self.poll_block_announce_validation(cx) + } } impl ChainSync @@ -1746,7 +1768,9 @@ where block_announce_validator: Box + Send>, max_parallel_downloads: u32, warp_sync_provider: Option>>, - ) -> Result { + ) -> Result<(Self, Box>), ClientError> { + let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); + let mut sync = Self { client, peers: HashMap::new(), @@ -1768,9 +1792,10 @@ where warp_sync_provider, import_existing: false, gap_sync: None, + service_rx, }; sync.reset_sync_start_point()?; - Ok(sync) + Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)))) } /// Returns the best seen block number if we don't have that block yet, `None` otherwise. @@ -2664,7 +2689,7 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); - let mut sync = + let (mut sync, _) = ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1, None) .unwrap(); @@ -2712,7 +2737,7 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -2879,7 +2904,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -2994,7 +3019,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let info = client.info(); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -3137,7 +3162,7 @@ mod test { let info = client.info(); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -3268,7 +3293,7 @@ mod test { let info = client.info(); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -3399,7 +3424,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), @@ -3432,7 +3457,7 @@ mod test { let empty_client = Arc::new(TestClientBuilder::new().build()); - let mut sync = ChainSync::new( + let (mut sync, _) = ChainSync::new( SyncMode::Full, empty_client.clone(), Box::new(DefaultBlockAnnounceValidator), diff --git a/client/network/sync/src/mock.rs b/client/network/sync/src/mock.rs index 2a3b059f735b2..fbb54bd5e998d 100644 --- a/client/network/sync/src/mock.rs +++ b/client/network/sync/src/mock.rs @@ -114,5 +114,9 @@ mockall::mock! { ) -> Result>, String>; fn encode_state_request(&self, request: &OpaqueStateRequest) -> Result, String>; fn decode_state_response(&self, response: &[u8]) -> Result; + fn poll<'a>( + &mut self, + cx: &mut std::task::Context<'a>, + ) -> Poll>; } } diff --git a/client/network/sync/src/service/chain_sync.rs b/client/network/sync/src/service/chain_sync.rs new file mode 100644 index 0000000000000..cf07c65ee3109 --- /dev/null +++ b/client/network/sync/src/service/chain_sync.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libp2p::PeerId; +use sc_network_common::service::NetworkSyncForkRequest; +use sc_utils::mpsc::TracingUnboundedSender; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +/// Commands send to `ChainSync` +#[derive(Debug)] +pub enum ToServiceCommand { + SetSyncForkRequest(Vec, B::Hash, NumberFor), +} + +/// Handle for communicating with `ChainSync` asynchronously +pub struct ChainSyncInterfaceHandle { + tx: TracingUnboundedSender>, +} + +impl ChainSyncInterfaceHandle { + /// Create new handle + pub fn new(tx: TracingUnboundedSender>) -> Self { + Self { tx } + } +} + +impl NetworkSyncForkRequest> + for ChainSyncInterfaceHandle +{ + /// Configure an explicit fork sync request. + /// + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// + /// Passing empty `peers` set effectively removes the sync request. + fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self + .tx + .unbounded_send(ToServiceCommand::SetSyncForkRequest(peers, hash, number)); + } +} diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs new file mode 100644 index 0000000000000..e283907b392d1 --- /dev/null +++ b/client/network/sync/src/service/mock.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libp2p::PeerId; +use sc_network_common::service::NetworkSyncForkRequest; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +mockall::mock! { + pub ChainSyncInterface {} + + impl NetworkSyncForkRequest> + for ChainSyncInterface + { + fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor); + } +} diff --git a/client/network/sync/src/service/mod.rs b/client/network/sync/src/service/mod.rs new file mode 100644 index 0000000000000..d64d9bbd1b01f --- /dev/null +++ b/client/network/sync/src/service/mod.rs @@ -0,0 +1,22 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! `ChainSync`-related service code + +pub mod chain_sync; +pub mod mock; diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs new file mode 100644 index 0000000000000..47483c4ac440d --- /dev/null +++ b/client/network/sync/src/tests.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ChainSync, ForkTarget}; + +use libp2p::PeerId; +use sc_network_common::{service::NetworkSyncForkRequest, sync::ChainSync as ChainSyncT}; +use sp_consensus::block_validation::DefaultBlockAnnounceValidator; +use sp_core::H256; +use std::{sync::Arc, task::Poll}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +// verify that the fork target map is empty, then submit a new sync fork request, +// poll `ChainSync` and verify that a new sync fork request has been registered +#[async_std::test] +async fn delegate_to_chainsync() { + let (mut chain_sync, chain_sync_service) = ChainSync::new( + sc_network_common::sync::SyncMode::Full, + Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), + Box::new(DefaultBlockAnnounceValidator), + 1u32, + None, + ) + .unwrap(); + + let hash = H256::random(); + let in_number = 1337u64; + let peers = (0..3).map(|_| PeerId::random()).collect::>(); + + assert!(chain_sync.fork_targets.is_empty()); + chain_sync_service.set_sync_fork_request(peers, hash, in_number); + + futures::future::poll_fn(|cx| { + let _ = chain_sync.poll(cx); + Poll::Ready(()) + }) + .await; + + if let Some(ForkTarget { number, .. }) = chain_sync.fork_targets.get(&hash) { + assert_eq!(number, &in_number); + } else { + panic!("expected to contain `ForkTarget`"); + } +} diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index a7c58631dc0f7..d4bee77b54aff 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -864,7 +864,7 @@ where let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); - let chain_sync = ChainSync::new( + let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, SyncMode::Fast { skip_proofs, storage_chain_mode } => @@ -902,6 +902,7 @@ where fork_id, import_queue, chain_sync: Box::new(chain_sync), + chain_sync_service, metrics_registry: None, block_announce_config, block_request_protocol_config, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 987198d4b7f48..1a16268839054 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -844,7 +844,7 @@ where protocol_config }; - let chain_sync = ChainSync::new( + let (chain_sync, chain_sync_service) = ChainSync::new( match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, SyncMode::Fast { skip_proofs, storage_chain_mode } => @@ -889,6 +889,7 @@ where fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), + chain_sync_service, metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, block_request_protocol_config, From 951f0754543b4efbefff52b979c8e258064d9ef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 17 Oct 2022 10:20:51 +0200 Subject: [PATCH 246/348] sp-api: Remove invalid unsafe trait bounds (#12502) The runtime api implementation contained invalid unsafe trait bounds. `Sync` was never correct there and `Send` should have not been "force implemented". --- client/beefy/src/import.rs | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 15 ++------------- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index 89a4517334189..0ed50d0ec8c98 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -78,7 +78,7 @@ where Block: BlockT, BE: Backend, Runtime: ProvideRuntimeApi, - Runtime::Api: BeefyApi + Send + Sync, + Runtime::Api: BeefyApi + Send, { fn decode_and_verify( &self, diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index aac4491720c34..8d46047dbda5a 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -535,7 +535,7 @@ impl<'a> Fold for ToClientSideDecl<'a> { if is_core_trait { // Add all the supertraits we want to have for `Core`. - input.supertraits = parse_quote!('static + Send + Sync); + input.supertraits = parse_quote!('static + Send); } else { // Add the `Core` runtime api as super trait. let crate_ = &self.crate_; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 7a26ead0f586a..c3f4e36655d22 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -211,19 +211,6 @@ fn generate_runtime_api_base_structures() -> Result { recorder: std::option::Option<#crate_::ProofRecorder>, } - // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a - // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it - // impossible to move `RuntimeApi` into another thread. - #[cfg(any(feature = "std", test))] - unsafe impl> Send - for RuntimeApiImpl - {} - - #[cfg(any(feature = "std", test))] - unsafe impl> Sync - for RuntimeApiImpl - {} - #[cfg(any(feature = "std", test))] impl> #crate_::ApiExt for RuntimeApiImpl @@ -515,6 +502,8 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> }); + where_clause.predicates.push(parse_quote! { &'static RuntimeApiImplCall: Send }); + // Require that all types used in the function signatures are unwind safe. extract_all_signature_types(&input.items).iter().for_each(|i| { where_clause.predicates.push(parse_quote! { From 273e264b27dbfdae4074b790e934d80097e7e0c0 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 17 Oct 2022 12:09:47 +0300 Subject: [PATCH 247/348] pallet-mmr: fix offchain db for sync from zero (#12498) * pallet-mmr: cosmetic improvements * pallet-mmr: fix offchain storage for initial sync * address review comments * pallet-mmr: change offchain fork-resistant key to `(prefix, pos, parent_hash)` Do this so that both canon and fork-resitant keys have the same `(prefix, pos).encode()` prefix. Might be useful in the future if we'd be able to to "get" offchain db entries using key prefixes as well. Signed-off-by: acatangiu Signed-off-by: acatangiu --- frame/beefy-mmr/src/tests.rs | 8 +- frame/merkle-mountain-range/src/lib.rs | 95 ++++++++++++------ .../merkle-mountain-range/src/mmr/storage.rs | 97 +++++++++++-------- frame/merkle-mountain-range/src/tests.rs | 71 +++++++------- 4 files changed, 163 insertions(+), 108 deletions(-) diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index b9851d9eef6cb..1826331f59e53 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -100,8 +100,8 @@ fn should_contain_mmr_digest() { #[test] fn should_contain_valid_leaf_data() { - fn node_offchain_key(parent_hash: H256, pos: usize) -> Vec { - (::INDEXING_PREFIX, parent_hash, pos as u64).encode() + fn node_offchain_key(pos: usize, parent_hash: H256) -> Vec { + (::INDEXING_PREFIX, pos as u64, parent_hash).encode() } let mut ext = new_test_ext(vec![1, 2, 3, 4]); @@ -110,7 +110,7 @@ fn should_contain_valid_leaf_data() { >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 0)); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(0, parent_hash)); assert_eq!( mmr_leaf, MmrLeaf { @@ -135,7 +135,7 @@ fn should_contain_valid_leaf_data() { >::parent_hash() }); - let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(parent_hash, 1)); + let mmr_leaf = read_mmr_leaf(&mut ext, node_offchain_key(1, parent_hash)); assert_eq!( mmr_leaf, MmrLeaf { diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index ad3ce340496e8..92ab8702b65c0 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -57,9 +57,9 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use frame_support::weights::Weight; +use frame_support::{log, traits::Get, weights::Weight}; use sp_runtime::{ - traits::{self, CheckedSub, One, Saturating}, + traits::{self, CheckedSub, One, Saturating, UniqueSaturatedInto}, SaturatedConversion, }; @@ -103,6 +103,15 @@ pub trait WeightInfo { fn on_initialize(peaks: NodeIndex) -> Weight; } +/// A MMR specific to the pallet. +type ModuleMmr = mmr::Mmr>; + +/// Leaf data. +type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; + +/// Hashing used for the pallet. +pub(crate) type HashingOf = >::Hashing; + #[frame_support::pallet] pub mod pallet { use super::*; @@ -166,7 +175,7 @@ pub mod pallet { /// Note that the leaf at each block MUST be unique. You may want to include a block hash or /// block number as an easiest way to ensure that. /// Also note that the leaf added by each block is expected to only reference data coming - /// from ancestor blocks (leaves are saved offchain using `(parent_hash, pos)` key to be + /// from ancestor blocks (leaves are saved offchain using `(pos, parent_hash)` key to be /// fork-resistant, as such conflicts could only happen on 1-block deep forks, which means /// two forks with identical line of ancestors compete to write the same offchain key, but /// that's fine as long as leaves only contain data coming from ancestors - conflicting @@ -212,12 +221,22 @@ pub mod pallet { let leaves = Self::mmr_leaves(); let peaks_before = mmr::utils::NodesUtils::new(leaves).number_of_peaks(); let data = T::LeafData::leaf_data(); + // append new leaf to MMR let mut mmr: ModuleMmr = mmr::Mmr::new(leaves); - mmr.push(data).expect("MMR push never fails."); - - // update the size - let (leaves, root) = mmr.finalize().expect("MMR finalize never fails."); + // MMR push never fails, but better safe than sorry. + if mmr.push(data).is_none() { + log::error!(target: "runtime::mmr", "MMR push failed"); + return T::WeightInfo::on_initialize(peaks_before) + } + // Update the size, `mmr.finalize()` should also never fail. + let (leaves, root) = match mmr.finalize() { + Ok((leaves, root)) => (leaves, root), + Err(e) => { + log::error!(target: "runtime::mmr", "MMR finalize failed: {:?}", e); + return T::WeightInfo::on_initialize(peaks_before) + }, + }; >::on_new_root(&root); >::put(leaves); @@ -230,21 +249,35 @@ pub mod pallet { fn offchain_worker(n: T::BlockNumber) { use mmr::storage::{OffchainStorage, Storage}; - // MMR pallet uses offchain storage to hold full MMR and leaves. - // The leaves are saved under fork-unique keys `(parent_hash, pos)`. - // MMR Runtime depends on `frame_system::block_hash(block_num)` mappings to find - // parent hashes for particular nodes or leaves. - // This MMR offchain worker function moves a rolling window of the same size - // as `frame_system::block_hash` map, where nodes/leaves added by blocks that are just + // The MMR nodes can be found in offchain db under either: + // - fork-unique keys `(prefix, pos, parent_hash)`, or, + // - "canonical" keys `(prefix, pos)`, + // depending on how many blocks in the past the node at position `pos` was + // added to the MMR. + // + // For the fork-unique keys, the MMR pallet depends on + // `frame_system::block_hash(parent_num)` mappings to find the relevant parent block + // hashes, so it is limited by `frame_system::BlockHashCount` in terms of how many + // historical forks it can track. Nodes added to MMR by block `N` can be found in + // offchain db at: + // - fork-unique keys `(prefix, pos, parent_hash)` when (`N` >= `latest_block` - + // `frame_system::BlockHashCount`); + // - "canonical" keys `(prefix, pos)` when (`N` < `latest_block` - + // `frame_system::BlockHashCount`); + // + // The offchain worker is responsible for maintaining the nodes' positions in + // offchain db as the chain progresses by moving a rolling window of the same size as + // `frame_system::block_hash` map, where nodes/leaves added by blocks that are just // about to exit the window are "canonicalized" so that their offchain key no longer - // depends on `parent_hash` therefore on access to `frame_system::block_hash`. + // depends on `parent_hash`. // // This approach works to eliminate fork-induced leaf collisions in offchain db, // under the assumption that no fork will be deeper than `frame_system::BlockHashCount` - // blocks (2400 blocks on Polkadot, Kusama, Rococo, etc): - // entries pertaining to block `N` where `N < current-2400` are moved to a key based - // solely on block number. The only way to have collisions is if two competing forks - // are deeper than 2400 blocks and they both "canonicalize" their view of block `N`. + // blocks: + // entries pertaining to block `N` where `N < current-BlockHashCount` are moved to a + // key based solely on block number. The only way to have collisions is if two + // competing forks are deeper than `frame_system::BlockHashCount` blocks and they + // both "canonicalize" their view of block `N` // Once a block is canonicalized, all MMR entries pertaining to sibling blocks from // other forks are pruned from offchain db. Storage::>::canonicalize_and_prune(n); @@ -252,15 +285,6 @@ pub mod pallet { } } -/// A MMR specific to the pallet. -type ModuleMmr = mmr::Mmr>; - -/// Leaf data. -type LeafOf = <>::LeafData as primitives::LeafDataProvider>::LeafData; - -/// Hashing used for the pallet. -pub(crate) type HashingOf = >::Hashing; - /// Stateless MMR proof verification for batch of leaves. /// /// This function can be used to verify received MMR [primitives::BatchProof] (`proof`) @@ -290,19 +314,32 @@ impl, I: 'static> Pallet { /// /// This combination makes the offchain (key,value) entry resilient to chain forks. fn node_offchain_key( - parent_hash: ::Hash, pos: NodeIndex, + parent_hash: ::Hash, ) -> sp_std::prelude::Vec { - (T::INDEXING_PREFIX, parent_hash, pos).encode() + (T::INDEXING_PREFIX, pos, parent_hash).encode() } /// Build canonical offchain key for node `pos` in MMR. /// /// Used for nodes added by now finalized blocks. + /// Never read keys using `node_canon_offchain_key` unless you sure that + /// there's no `node_offchain_key` key in the storage. fn node_canon_offchain_key(pos: NodeIndex) -> sp_std::prelude::Vec { (T::INDEXING_PREFIX, pos).encode() } + /// Return size of rolling window of leaves saved in offchain under fork-unique keys. + /// + /// Leaves outside this window are canonicalized. + /// Window size is `frame_system::BlockHashCount - 1` to make sure fork-unique keys + /// can be built using `frame_system::block_hash` map. + fn offchain_canonicalization_window() -> LeafIndex { + let window_size: LeafIndex = + ::BlockHashCount::get().unique_saturated_into(); + window_size.saturating_sub(1) + } + /// Provide the parent number for the block that added `leaf_index` to the MMR. fn leaf_index_to_parent_block_num( leaf_index: LeafIndex, diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 8b623edf56957..870ce81226bd2 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -18,11 +18,10 @@ //! A MMR storage implementations. use codec::Encode; -use frame_support::traits::Get; +use frame_support::log::{debug, error, trace}; use mmr_lib::helper; use sp_core::offchain::StorageKind; use sp_io::{offchain, offchain_index}; -use sp_runtime::traits::UniqueSaturatedInto; use sp_std::iter::Peekable; #[cfg(not(feature = "std"))] use sp_std::prelude::*; @@ -133,15 +132,14 @@ where // Effectively move a rolling window of fork-unique leaves. Once out of the window, leaves // are "canonicalized" in offchain by moving them under `Pallet::node_canon_offchain_key`. let leaves = NumberOfLeaves::::get(); - let window_size = - ::BlockHashCount::get().unique_saturated_into(); + let window_size = Pallet::::offchain_canonicalization_window(); if leaves >= window_size { // Move the rolling window towards the end of `block_num->hash` mappings available // in the runtime: we "canonicalize" the leaf at the end, let to_canon_leaf = leaves.saturating_sub(window_size); // and all the nodes added by that leaf. let to_canon_nodes = NodesUtils::right_branch_ending_in_leaf(to_canon_leaf); - frame_support::log::debug!( + debug!( target: "runtime::mmr::offchain", "Nodes to canon for leaf {}: {:?}", to_canon_leaf, to_canon_nodes ); @@ -149,7 +147,7 @@ where let to_canon_block_num = Pallet::::leaf_index_to_parent_block_num(to_canon_leaf, leaves); // Only entries under this hash (retrieved from state on current canon fork) are to be - // persisted. All other entries added by same block number will be cleared. + // persisted. All entries added by same block number on other forks will be cleared. let to_canon_hash = >::block_hash(to_canon_block_num); Self::canonicalize_nodes_for_hash(&to_canon_nodes, to_canon_hash); @@ -159,7 +157,7 @@ where Self::prune_nodes_for_forks(&to_canon_nodes, forks); }) .unwrap_or_else(|| { - frame_support::log::error!( + error!( target: "runtime::mmr::offchain", "Offchain: could not prune: no entry in pruning map for block {:?}", to_canon_block_num @@ -171,8 +169,8 @@ where fn prune_nodes_for_forks(nodes: &[NodeIndex], forks: Vec<::Hash>) { for hash in forks { for pos in nodes { - let key = Pallet::::node_offchain_key(hash, *pos); - frame_support::log::debug!( + let key = Pallet::::node_offchain_key(*pos, hash); + debug!( target: "runtime::mmr::offchain", "Clear elem at pos {} with key {:?}", pos, key @@ -187,19 +185,19 @@ where to_canon_hash: ::Hash, ) { for pos in to_canon_nodes { - let key = Pallet::::node_offchain_key(to_canon_hash, *pos); + let key = Pallet::::node_offchain_key(*pos, to_canon_hash); // Retrieve the element from Off-chain DB under fork-aware key. if let Some(elem) = offchain::local_storage_get(StorageKind::PERSISTENT, &key) { let canon_key = Pallet::::node_canon_offchain_key(*pos); // Add under new canon key. offchain::local_storage_set(StorageKind::PERSISTENT, &canon_key, &elem); - frame_support::log::debug!( + debug!( target: "runtime::mmr::offchain", "Moved elem at pos {} from key {:?} to canon key {:?}", pos, key, canon_key ); } else { - frame_support::log::error!( + error!( target: "runtime::mmr::offchain", "Could not canonicalize elem at pos {} using key {:?}", pos, key @@ -220,21 +218,18 @@ where // Find out which leaf added node `pos` in the MMR. let ancestor_leaf_idx = NodesUtils::leaf_index_that_added_node(pos); - let window_size = - ::BlockHashCount::get().unique_saturated_into(); + let window_size = Pallet::::offchain_canonicalization_window(); // Leaves older than this window should have been canonicalized. if leaves.saturating_sub(ancestor_leaf_idx) > window_size { let key = Pallet::::node_canon_offchain_key(pos); - frame_support::log::debug!( + debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, key {:?}", pos, ancestor_leaf_idx, key ); // Just for safety, to easily handle runtime upgrades where any of the window params // change and maybe we mess up storage migration, // return _if and only if_ node is found (in normal conditions it's always found), - if let Some(elem) = - sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) - { + if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { return Ok(codec::Decode::decode(&mut &*elem).ok()) } // BUT if we DID MESS UP, fall through to searching node using fork-specific key. @@ -244,20 +239,20 @@ where let ancestor_parent_block_num = Pallet::::leaf_index_to_parent_block_num(ancestor_leaf_idx, leaves); let ancestor_parent_hash = >::block_hash(ancestor_parent_block_num); - let key = Pallet::::node_offchain_key(ancestor_parent_hash, pos); - frame_support::log::debug!( + let key = Pallet::::node_offchain_key(pos, ancestor_parent_hash); + debug!( target: "runtime::mmr::offchain", "offchain db get {}: leaf idx {:?}, hash {:?}, key {:?}", pos, ancestor_leaf_idx, ancestor_parent_hash, key ); // Retrieve the element from Off-chain DB. - Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) .or_else(|| { // Again, this is just us being extra paranoid. // We get here only if we mess up a storage migration for a runtime upgrades where // say the window is increased, and for a little while following the upgrade there's // leaves inside new 'window' that had been already canonicalized before upgrade. let key = Pallet::::node_canon_offchain_key(pos); - sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) }) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } @@ -282,9 +277,8 @@ where return Ok(()) } - frame_support::log::trace!( - target: "runtime::mmr", - "elems: {:?}", + trace!( + target: "runtime::mmr", "elems: {:?}", elems.iter().map(|elem| elem.hash()).collect::>() ); @@ -309,25 +303,12 @@ where // in offchain DB to avoid DB collisions and overwrites in case of forks. let parent_hash = >::parent_hash(); for elem in elems { - // For now we store this leaf offchain keyed by `(parent_hash, node_index)` - // to make it fork-resistant. - // Offchain worker task will "canonicalize" it `frame_system::BlockHashCount` blocks - // later when we are not worried about forks anymore (highly unlikely to have a fork - // in the chain that deep). - // "Canonicalization" in this case means moving this leaf under a new key based - // only on the leaf's `node_index`. - let key = Pallet::::node_offchain_key(parent_hash, node_index); - frame_support::log::debug!( - target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", - node_index, parent_hash, key - ); - // Indexing API is used to store the full node content (both leaf and inner). - elem.using_encoded(|elem| offchain_index::set(&key, elem)); - // On-chain we are going to only store new peaks. if peaks_to_store.next_if_eq(&node_index).is_some() { >::insert(node_index, elem.hash()); } + // We are storing full node off-chain (using indexing API). + Self::store_to_offchain(node_index, parent_hash, &elem); // Increase the indices. if let Node::Data(..) = elem { @@ -348,6 +329,38 @@ where } } +impl Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ + fn store_to_offchain( + pos: NodeIndex, + parent_hash: ::Hash, + node: &NodeOf, + ) { + let encoded_node = node.encode(); + // We store this leaf offchain keyed by `(parent_hash, node_index)` to make it + // fork-resistant. Offchain worker task will "canonicalize" it + // `frame_system::BlockHashCount` blocks later, when we are not worried about forks anymore + // (multi-era-deep forks should not happen). + let key = Pallet::::node_offchain_key(pos, parent_hash); + debug!( + target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", + pos, parent_hash, key + ); + // Indexing API is used to store the full node content. + offchain_index::set(&key, &encoded_node); + // We also directly save the full node under the "canonical" key. + // This is superfluous for the normal case - this entry will possibly be overwritten + // by forks, and will also be overwritten by "offchain_worker canonicalization". + // But it is required for blocks imported during initial sync where none of the above apply + // (`offchain_worker` doesn't run for initial sync blocks). + offchain_index::set(&Pallet::::node_canon_offchain_key(pos), &encoded_node); + } +} + fn peaks_to_prune_and_store( old_size: NodeIndex, new_size: NodeIndex, @@ -356,8 +369,8 @@ fn peaks_to_prune_and_store( // both collections may share a common prefix. let peaks_before = if old_size == 0 { vec![] } else { helper::get_peaks(old_size) }; let peaks_after = helper::get_peaks(new_size); - frame_support::log::trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); - frame_support::log::trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); + trace!(target: "runtime::mmr", "peaks_before: {:?}", peaks_before); + trace!(target: "runtime::mmr", "peaks_after: {:?}", peaks_after); let mut peaks_before = peaks_before.into_iter().peekable(); let mut peaks_after = peaks_after.into_iter().peekable(); diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index a63a433029295..dbbdc12c8e1d5 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -169,25 +169,22 @@ fn should_append_to_mmr_when_on_initialize_is_called() { ext.persist_offchain_overlay(); let offchain_db = ext.offchain_db(); - assert_eq!( - offchain_db.get(&MMR::node_offchain_key(parent_b1, 0)).map(decode_node), - Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) - ); - assert_eq!( - offchain_db.get(&MMR::node_offchain_key(parent_b2, 1)).map(decode_node), - Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) - ); - assert_eq!( - offchain_db.get(&MMR::node_offchain_key(parent_b2, 2)).map(decode_node), - Some(mmr::Node::Hash(hex( - "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" - ))) - ); - assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_b2, 3)), None); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)), None); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)), None); - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)), None); + let expected = Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1)))); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(0, parent_b1)).map(decode_node), expected); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(0)).map(decode_node), expected); + + let expected = Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2)))); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(1, parent_b2)).map(decode_node), expected); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(1)).map(decode_node), expected); + + let expected = Some(mmr::Node::Hash(hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854", + ))); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(2, parent_b2)).map(decode_node), expected); + assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(2)).map(decode_node), expected); + + assert_eq!(offchain_db.get(&MMR::node_offchain_key(3, parent_b2)), None); assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(3)), None); } @@ -815,16 +812,20 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = (block_num - 1).into(); let leaf_index = u64::from(block_num - 1); let pos = helper::leaf_index_to_pos(leaf_index.into()); - // not canon, - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); let parent_hash = >::block_hash(parent_num); - // but available in fork-proof storage. + // Available in offchain db under both fork-proof key and canon key. + // We'll later check it is pruned from fork-proof key. + let expected = Some(mmr::Node::Data(( + (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), + LeafData::new(block_num.into()), + ))); assert_eq!( - offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), - Some(mmr::Node::Data(( - (leaf_index, H256::repeat_byte(u8::try_from(block_num).unwrap())), - LeafData::new(block_num.into()), - ))) + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + expected + ); + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), + expected ); } @@ -835,12 +836,16 @@ fn should_canonicalize_offchain() { let verify = |pos: NodeIndex, leaf_index: LeafIndex, expected: H256| { let parent_num: BlockNumber = leaf_index.try_into().unwrap(); let parent_hash = >::block_hash(parent_num); - // not canon, - assert_eq!(offchain_db.get(&MMR::node_canon_offchain_key(pos)), None); - // but available in fork-proof storage. + // Available in offchain db under both fork-proof key and canon key. + // We'll later check it is pruned from fork-proof key. + let expected = Some(mmr::Node::Hash(expected)); assert_eq!( - offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)).map(decode_node), - Some(mmr::Node::Hash(expected)) + offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), + expected + ); + assert_eq!( + offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)).map(decode_node), + expected ); }; verify(2, 1, hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")); @@ -867,7 +872,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = (block_num - 1).into(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), @@ -886,7 +891,7 @@ fn should_canonicalize_offchain() { let parent_num: BlockNumber = leaf_index.try_into().unwrap(); let parent_hash = >::block_hash(parent_num); // no longer available in fork-proof storage (was pruned), - assert_eq!(offchain_db.get(&MMR::node_offchain_key(parent_hash, pos)), None); + assert_eq!(offchain_db.get(&MMR::node_offchain_key(pos, parent_hash)), None); // but available using canon key. assert_eq!( offchain_db.get(&MMR::node_canon_offchain_key(pos)).map(decode_node), From acacb53f92c3da09ccad403a679f0668cc2c6e0c Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Mon, 17 Oct 2022 12:45:42 +0200 Subject: [PATCH 248/348] Fix: typo (#12505) Fix: typo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d7e6650290fba..02f8a7591acc5 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Then try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Community & Support Join the highly active and supportive community on the [Susbstrate Stack Exchange](https://substrate.stackexchange.com/) to ask questions about use and problems you run into using this software. -Please do report bugs and [isssues here](https://github.com/paritytech/substrate/issues) for anything you suspect requires action in the source. +Please do report bugs and [issues here](https://github.com/paritytech/substrate/issues) for anything you suspect requires action in the source. ## Contributions & Code of Conduct From 444cf9d87bba1071c3d1d82d21ac7419dd3953e4 Mon Sep 17 00:00:00 2001 From: Dmitrii Markin Date: Mon, 17 Oct 2022 14:02:37 +0300 Subject: [PATCH 249/348] Upgrade libp2p to 0.49.0 (#12256) * cargo upgrade libp2p * Get rid of `NetworkBehaviourEventProcess` in handling of `CustomMessageOutcome` * Get rid of `NetworkBehaviourEventProcess` in handling of `request_responses::Event` * Get rid of `NetworkBehaviourEventProcess` in handling of `peer_info::PeerInfoEvent` * Get rid of `NetworkBehaviourEventProcess` in handling of `DiscoveryOut` * Get rid of `poll()` method in `Bahaviour` * minor: comments * Upgrade libp2p to 0.49.0 (unreleased) * Support multiple Kad protocol names * Make borrow checker happy * minor: wording * Make substrate build with libp2p-0.49.0 * rustfmt * Get rid of MdnsWrapper * Resolve deprecation warnings * Fix documentation * Apply suggestions from code review: fix typos Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> * Apply suggestion: simplify kad protocol name matching Co-authored-by: Aaro Altonen <48052676+altonen@users.noreply.github.com> --- Cargo.lock | 651 +++++++----------------- client/authority-discovery/Cargo.toml | 2 +- client/authority-discovery/src/tests.rs | 5 +- client/cli/Cargo.toml | 2 +- client/consensus/common/Cargo.toml | 2 +- client/network-gossip/Cargo.toml | 2 +- client/network/Cargo.toml | 2 +- client/network/bitswap/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/light/Cargo.toml | 2 +- client/network/src/behaviour.rs | 352 +++++-------- client/network/src/config.rs | 7 +- client/network/src/discovery.rs | 151 +++--- client/network/src/peer_info.rs | 7 +- client/network/src/service.rs | 154 +++++- client/network/sync/Cargo.toml | 2 +- client/network/test/Cargo.toml | 2 +- client/network/transactions/Cargo.toml | 2 +- client/offchain/Cargo.toml | 2 +- client/peerset/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- 21 files changed, 526 insertions(+), 829 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f49ff75561db..103436dcfca4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -29,30 +29,30 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3e798aa0c8239776f54415bc06f3d74b1850f3f830b45c35cfc80556973f70" +checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ "generic-array 0.14.4", ] [[package]] name = "aes" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495ee669413bfbe9e8cace80f4d3d78e6d8c8d99579f97fb93bde351b185f2d4" +checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures 0.1.5", + "cpufeatures", "opaque-debug 0.3.0", ] [[package]] name = "aes-gcm" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a930fd487faaa92a30afa92cc9dd1526a5cff67124abbbb1c617ce070f4dcf" +checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" dependencies = [ "aead", "aes", @@ -307,9 +307,9 @@ dependencies = [ [[package]] name = "async-std-resolver" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2f8a4a203be3325981310ab243a28e6e4ea55b6519bffce05d41ab60e09ad8" +checksum = "6ba50e24d9ee0a8950d3d03fc6d0dd10aa14b5de3b101949b4e160f7fee7c723" dependencies = [ "async-std", "async-trait", @@ -545,12 +545,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "bimap" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" - [[package]] name = "bincode" version = "1.3.3" @@ -657,7 +651,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding 0.1.5", + "block-padding", "byte-tools", "byteorder", "generic-array 0.12.4", @@ -669,7 +663,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", "generic-array 0.14.4", ] @@ -691,12 +684,6 @@ dependencies = [ "byte-tools", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "blocking" version = "1.0.2" @@ -893,21 +880,21 @@ checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" [[package]] name = "chacha20" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b72a433d0cf2aef113ba70f62634c56fddb0f244e6377185c56a7cadbd8f91" +checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures 0.2.1", + "cpufeatures", "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b84ed6d1d5f7aa9bdde921a5090e0ca4d934d250ea3b402a5fab3a994e28a2a" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ "aead", "chacha20", @@ -1044,15 +1031,6 @@ dependencies = [ "os_str_bytes", ] -[[package]] -name = "cmake" -version = "0.1.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b858541263efe664aead4a5209a4ae5c5d2811167d4ed4ee0944503f8d2089" -dependencies = [ - "cc", -] - [[package]] name = "comfy-table" version = "6.0.0" @@ -1120,15 +1098,6 @@ dependencies = [ "glob", ] -[[package]] -name = "cpufeatures" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef" -dependencies = [ - "libc", -] - [[package]] name = "cpufeatures" version = "0.2.1" @@ -1482,17 +1451,6 @@ dependencies = [ "cipher", ] -[[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" -dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", -] - [[package]] name = "curve25519-dalek" version = "2.1.2" @@ -1863,9 +1821,9 @@ dependencies = [ [[package]] name = "enum-as-inner" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21cdad81446a7f7dc43f6a77409efeb9733d2fa65553efef6018ef257c959b73" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -2692,9 +2650,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b442c439366184de619215247d24e908912b175e824a530253845ac4c251a5c1" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" dependencies = [ "opaque-debug 0.3.0", "polyval", @@ -2871,12 +2829,6 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" -[[package]] -name = "hex_fmt" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - [[package]] name = "hmac" version = "0.8.1" @@ -3038,9 +2990,9 @@ dependencies = [ [[package]] name = "if-watch" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8f4a3c3d4c89351ca83e120c1c00b27df945d38e05695668c9d4b4f7bc52f3" +checksum = "065c008e570a43c00de6aed9714035e5ea6a498c255323db9091722af6ee67dd" dependencies = [ "async-io", "core-foundation", @@ -3547,9 +3499,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.46.1" +version = "0.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81327106887e42d004fbdab1fef93675be2e2e07c1b95fce45e2cc813485611d" +checksum = "ec878fda12ebec479186b3914ebc48ff180fa4c51847e11a1a68bf65249e02c1" dependencies = [ "bytes", "futures", @@ -3557,12 +3509,8 @@ dependencies = [ "getrandom 0.2.3", "instant", "lazy_static", - "libp2p-autonat", "libp2p-core", - "libp2p-deflate", "libp2p-dns", - "libp2p-floodsub", - "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-mdns", @@ -3570,49 +3518,24 @@ dependencies = [ "libp2p-mplex", "libp2p-noise", "libp2p-ping", - "libp2p-plaintext", - "libp2p-pnet", - "libp2p-relay", - "libp2p-rendezvous", "libp2p-request-response", "libp2p-swarm", "libp2p-swarm-derive", "libp2p-tcp", - "libp2p-uds", "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", "multiaddr", "parking_lot 0.12.1", "pin-project", - "rand 0.7.3", "smallvec", ] -[[package]] -name = "libp2p-autonat" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4decc51f3573653a9f4ecacb31b1b922dd20c25a6322bb15318ec04287ec46f9" -dependencies = [ - "async-trait", - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-request-response", - "libp2p-swarm", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.8.5", -] - [[package]] name = "libp2p-core" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf9b94cefab7599b2d3dff2f93bee218c6621d68590b23ede4485813cbcece6" +checksum = "799676bb0807c788065e57551c6527d461ad572162b0519d1958946ff9e0539d" dependencies = [ "asn1_der", "bs58", @@ -3623,17 +3546,15 @@ dependencies = [ "futures-timer", "instant", "lazy_static", - "libsecp256k1", "log", "multiaddr", "multihash", "multistream-select", "parking_lot 0.12.1", "pin-project", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.5", - "ring", "rw-stream-sink", "sha2 0.10.2", "smallvec", @@ -3643,22 +3564,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "libp2p-deflate" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0183dc2a3da1fbbf85e5b6cf51217f55b14f5daea0c455a9536eef646bfec71" -dependencies = [ - "flate2", - "futures", - "libp2p-core", -] - [[package]] name = "libp2p-dns" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cbf54723250fa5d521383be789bf60efdabe6bacfb443f87da261019a49b4b5" +checksum = "2322c9fb40d99101def6a01612ee30500c89abbbecb6297b3cd252903a4c1720" dependencies = [ "async-std-resolver", "futures", @@ -3669,57 +3579,11 @@ dependencies = [ "trust-dns-resolver", ] -[[package]] -name = "libp2p-floodsub" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98a4b6ffd53e355775d24b76f583fdda54b3284806f678499b57913adb94f231" -dependencies = [ - "cuckoofilter", - "fnv", - "futures", - "libp2p-core", - "libp2p-swarm", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", - "smallvec", -] - -[[package]] -name = "libp2p-gossipsub" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b4b888cfbeb1f5551acd3aa1366e01bf88ede26cc3c4645d0d2d004d5ca7b0" -dependencies = [ - "asynchronous-codec", - "base64", - "byteorder", - "bytes", - "fnv", - "futures", - "hex_fmt", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "prometheus-client", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", - "regex", - "sha2 0.10.2", - "smallvec", - "unsigned-varint", - "wasm-timer", -] - [[package]] name = "libp2p-identify" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50b585518f8efd06f93ac2f976bd672e17cdac794644b3117edd078e96bda06" +checksum = "dcf9a121f699e8719bda2e6e9e9b6ddafc6cff4602471d6481c1067930ccb29b" dependencies = [ "asynchronous-codec", "futures", @@ -3727,9 +3591,9 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru", - "prost 0.10.3", - "prost-build 0.10.4", + "lru 0.8.1", + "prost", + "prost-build", "prost-codec", "smallvec", "thiserror", @@ -3738,9 +3602,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740862893bb5f06ac24acc9d49bdeadc3a5e52e51818a30a25c1f3519da2c851" +checksum = "6721c200e2021f6c3fab8b6cf0272ead8912d871610ee194ebd628cecf428f22" dependencies = [ "arrayvec 0.7.2", "asynchronous-codec", @@ -3753,9 +3617,9 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", - "rand 0.7.3", + "prost", + "prost-build", + "rand 0.8.5", "sha2 0.10.2", "smallvec", "thiserror", @@ -3766,16 +3630,15 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66e5e5919509603281033fd16306c61df7a4428ce274b67af5e14b07de5cdcb2" +checksum = "761704e727f7d68d58d7bc2231eafae5fc1b9814de24290f126df09d4bd37a15" dependencies = [ "async-io", "data-encoding", "dns-parser", "futures", "if-watch", - "lazy_static", "libp2p-core", "libp2p-swarm", "log", @@ -3787,25 +3650,23 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.7.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8aff4a1abef42328fbb30b17c853fff9be986dc39af17ee39f9c5f755c5e0c" +checksum = "9ee31b08e78b7b8bfd1c4204a9dd8a87b4fcdf6dafc57eb51701c1c264a81cb9" dependencies = [ "libp2p-core", - "libp2p-gossipsub", "libp2p-identify", "libp2p-kad", "libp2p-ping", - "libp2p-relay", "libp2p-swarm", "prometheus-client", ] [[package]] name = "libp2p-mplex" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61fd1b20638ec209c5075dfb2e8ce6a7ea4ec3cd3ad7b77f7a477c06d53322e2" +checksum = "692664acfd98652de739a8acbb0a0d670f1d67190a49be6b4395e22c37337d89" dependencies = [ "asynchronous-codec", "bytes", @@ -3814,16 +3675,16 @@ dependencies = [ "log", "nohash-hasher", "parking_lot 0.12.1", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.37.0" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762408cb5d84b49a600422d7f9a42c18012d8da6ebcd570f9a4a4290ba41fb6f" +checksum = "048155686bd81fe6cb5efdef0c6290f25ad32a0a42e8f4f72625cf6a505a206f" dependencies = [ "bytes", "curve25519-dalek 3.0.2", @@ -3831,8 +3692,8 @@ dependencies = [ "lazy_static", "libp2p-core", "log", - "prost 0.10.3", - "prost-build 0.10.4", + "prost", + "prost-build", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3843,105 +3704,25 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.37.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "100a6934ae1dbf8a693a4e7dd1d730fd60b774dafc45688ed63b554497c6c925" -dependencies = [ - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "rand 0.7.3", - "void", -] - -[[package]] -name = "libp2p-plaintext" -version = "0.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be27bf0820a6238a4e06365b096d428271cce85a129cf16f2fe9eb1610c4df86" -dependencies = [ - "asynchronous-codec", - "bytes", - "futures", - "libp2p-core", - "log", - "prost 0.10.3", - "prost-build 0.10.4", - "unsigned-varint", - "void", -] - -[[package]] -name = "libp2p-pnet" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" -dependencies = [ - "futures", - "log", - "pin-project", - "rand 0.7.3", - "salsa20", - "sha3 0.9.1", -] - -[[package]] -name = "libp2p-relay" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4931547ee0cce03971ccc1733ff05bb0c4349fd89120a39e9861e2bbe18843c3" -dependencies = [ - "asynchronous-codec", - "bytes", - "either", - "futures", - "futures-timer", - "instant", - "libp2p-core", - "libp2p-swarm", - "log", - "pin-project", - "prost 0.10.3", - "prost-build 0.10.4", - "prost-codec", - "rand 0.8.5", - "smallvec", - "static_assertions", - "thiserror", - "void", -] - -[[package]] -name = "libp2p-rendezvous" -version = "0.7.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9511c9672ba33284838e349623319c8cad2d18cfad243ae46c6b7e8a2982ea4e" +checksum = "7228b9318d34689521349a86eb39a3c3a802c9efc99a0568062ffb80913e3f91" dependencies = [ - "asynchronous-codec", - "bimap", "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", "log", - "prost 0.10.3", - "prost-build 0.10.4", "rand 0.8.5", - "sha2 0.10.2", - "thiserror", - "unsigned-varint", "void", ] [[package]] name = "libp2p-request-response" -version = "0.19.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "508a189e2795d892c8f5c1fa1e9e0b1845d32d7b0b249dbf7b05b18811361843" +checksum = "8827af16a017b65311a410bb626205a9ad92ec0473967618425039fa5231adc1" dependencies = [ "async-trait", "bytes", @@ -3950,16 +3731,16 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "unsigned-varint", ] [[package]] name = "libp2p-swarm" -version = "0.37.0" +version = "0.40.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95ac5be6c2de2d1ff3f7693fda6faf8a827b1f3e808202277783fea9f527d114" +checksum = "46d13df7c37807965d82930c0e4b04a659efcb6cca237373b206043db5398ecf" dependencies = [ "either", "fnv", @@ -3969,7 +3750,7 @@ dependencies = [ "libp2p-core", "log", "pin-project", - "rand 0.7.3", + "rand 0.8.5", "smallvec", "thiserror", "void", @@ -3977,48 +3758,36 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.28.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f54a64b6957249e0ce782f8abf41d97f69330d02bf229f0672d864f0650cc76" +checksum = "a0eddc4497a8b5a506013c40e8189864f9c3a00db2b25671f428ae9007f3ba32" dependencies = [ + "heck", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6771dc19aa3c65d6af9a8c65222bfc8fcd446630ddca487acd161fa6096f3b" +checksum = "9839d96761491c6d3e238e70554b856956fca0ab60feb9de2cd08eed4473fa92" dependencies = [ "async-io", "futures", "futures-timer", "if-watch", - "ipnet", "libc", "libp2p-core", "log", "socket2", ] -[[package]] -name = "libp2p-uds" -version = "0.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d125e3e5f0d58f3c6ac21815b20cf4b6a88b8db9dc26368ea821838f4161fd4d" -dependencies = [ - "async-std", - "futures", - "libp2p-core", - "log", -] - [[package]] name = "libp2p-wasm-ext" -version = "0.34.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec894790eec3c1608f8d1a8a0bdf0dbeb79ed4de2dce964222011c2896dfa05a" +checksum = "a17b5b8e7a73e379e47b1b77f8a82c4721e97eca01abcd18e9cd91a23ca6ce97" dependencies = [ "futures", "js-sys", @@ -4030,9 +3799,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.36.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9808e57e81be76ff841c106b4c5974fb4d41a233a7bdd2afbf1687ac6def3818" +checksum = "3758ae6f89b2531a24b6d9f5776bda6a626b60a57600d7185d43dfa75ca5ecc4" dependencies = [ "either", "futures", @@ -4049,12 +3818,13 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.38.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6dea686217a06072033dc025631932810e2f6ad784e4fafa42e27d311c7a81c" +checksum = "30f079097a21ad017fc8139460630286f02488c8c13b26affb46623aa20d8845" dependencies = [ "futures", "libp2p-core", + "log", "parking_lot 0.12.1", "thiserror", "yamux", @@ -4233,6 +4003,15 @@ dependencies = [ "hashbrown 0.11.2", ] +[[package]] +name = "lru" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6e8aaa3f231bb4bd57b84b2d5dc3ae7f350265df8aa96492e0bc394a1571909" +dependencies = [ + "hashbrown 0.12.3", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4491,7 +4270,7 @@ dependencies = [ "digest 0.10.3", "multihash-derive", "sha2 0.10.2", - "sha3 0.10.0", + "sha3", "unsigned-varint", ] @@ -4517,9 +4296,9 @@ checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" [[package]] name = "multistream-select" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" +checksum = "9bc41247ec209813e2fd414d6e16b9d94297dacf3cd613fa6ef09cd4d9755c10" dependencies = [ "bytes", "futures", @@ -4581,9 +4360,9 @@ dependencies = [ [[package]] name = "netlink-packet-route" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733ea73609acfd7fa7ddadfb7bf709b0471668c456ad9513685af543a06342b2" +checksum = "d9ea4302b9759a7a88242299225ea3688e63c85ea136371bb6cf94fd674efaab" dependencies = [ "anyhow", "bitflags", @@ -4607,23 +4386,24 @@ dependencies = [ [[package]] name = "netlink-proto" -version = "0.9.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8785b8141e8432aa45fceb922a7e876d7da3fad37fa7e7ec702ace3aa0826b" +checksum = "65b4b14489ab424703c092062176d52ba55485a89c076b4f9db05092b7223aa6" dependencies = [ "bytes", "futures", "log", "netlink-packet-core", "netlink-sys", + "thiserror", "tokio", ] [[package]] name = "netlink-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c9f9547a08241bee7b6558b9b98e1f290d187de8b7cfca2bbb4937bcaa8f8" +checksum = "92b654097027250401127914afb37cb1f311df6610a9891ff07a757e94199027" dependencies = [ "async-io", "bytes", @@ -4634,9 +4414,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.22.3" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4916f159ed8e5de0082076562152a76b7a1f64a01fd9d1e0fea002c37624faf" +checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", @@ -4647,15 +4427,13 @@ dependencies = [ [[package]] name = "nix" -version = "0.23.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", - "cc", "cfg-if 1.0.0", "libc", - "memoffset", ] [[package]] @@ -5197,15 +4975,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "pallet-alliance" version = "4.0.0-dev" @@ -6962,23 +6731,23 @@ dependencies = [ [[package]] name = "poly1305" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fcffab1f78ebbdf4b93b68c1ffebc24037eedf271edaca795732b24e5e4e349" +checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" dependencies = [ - "cpufeatures 0.1.5", + "cpufeatures", "opaque-debug 0.3.0", "universal-hash", ] [[package]] name = "polyval" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6ba6a405ef63530d6cb12802014b22f9c5751bd17cdcddbe9e46d5c8ae83287" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ "cfg-if 1.0.0", - "cpufeatures 0.1.5", + "cpufeatures", "opaque-debug 0.3.0", "universal-hash", ] @@ -7109,37 +6878,27 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" dependencies = [ "dtoa", "itoa 1.0.1", - "owning_ref", + "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "prost" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" -dependencies = [ - "bytes", - "prost-derive 0.10.1", -] - [[package]] name = "prost" version = "0.11.0" @@ -7147,29 +6906,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.11.0", -] - -[[package]] -name = "prost-build" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" -dependencies = [ - "bytes", - "cfg-if 1.0.0", - "cmake", - "heck", - "itertools", - "lazy_static", - "log", - "multimap", - "petgraph", - "prost 0.10.3", - "prost-types 0.10.1", - "regex", - "tempfile", - "which", + "prost-derive", ] [[package]] @@ -7185,8 +6922,8 @@ dependencies = [ "log", "multimap", "petgraph", - "prost 0.11.0", - "prost-types 0.11.1", + "prost", + "prost-types", "regex", "tempfile", "which", @@ -7194,30 +6931,17 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.3", + "prost", "thiserror", "unsigned-varint", ] -[[package]] -name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "prost-derive" version = "0.11.0" @@ -7231,16 +6955,6 @@ dependencies = [ "syn", ] -[[package]] -name = "prost-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost 0.10.3", -] - [[package]] name = "prost-types" version = "0.11.1" @@ -7248,7 +6962,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.11.0", + "prost", ] [[package]] @@ -7679,16 +7393,16 @@ dependencies = [ [[package]] name = "rtnetlink" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f54290e54521dac3de4149d83ddf9f62a359b3cc93bcb494a794a41e6f4744b" +checksum = "322c53fd76a18698f1c27381d58091de3a043d356aa5bd0d510608b565f469a0" dependencies = [ "async-global-executor", "futures", "log", "netlink-packet-route", "netlink-proto", - "nix 0.22.3", + "nix 0.24.2", "thiserror", ] @@ -7818,15 +7532,6 @@ dependencies = [ "rustc_version 0.2.3", ] -[[package]] -name = "salsa20" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0fbb5f676da676c260ba276a8f43a8dc67cf02d1438423aeb1c677a7212686" -dependencies = [ - "cipher", -] - [[package]] name = "same-file" version = "1.0.6" @@ -7857,8 +7562,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8296,7 +8001,7 @@ dependencies = [ "criterion", "env_logger", "lazy_static", - "lru", + "lru 0.7.5", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", @@ -8509,11 +8214,11 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru", + "lru 0.7.5", "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost 0.11.0", + "prost", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8550,8 +8255,8 @@ dependencies = [ "futures", "libp2p", "log", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -8580,7 +8285,7 @@ dependencies = [ "libp2p", "linked_hash_set", "parity-scale-codec", - "prost-build 0.11.1", + "prost-build", "sc-consensus", "sc-peerset", "serde", @@ -8603,7 +8308,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "lru", + "lru 0.7.5", "quickcheck", "sc-network-common", "sc-peerset", @@ -8622,8 +8327,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8643,11 +8348,11 @@ dependencies = [ "futures", "libp2p", "log", - "lru", + "lru 0.7.5", "mockall", "parity-scale-codec", - "prost 0.11.0", - "prost-build 0.11.1", + "prost", + "prost-build", "quickcheck", "sc-block-builder", "sc-client-api", @@ -9435,7 +9140,7 @@ checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", - "cpufeatures 0.2.1", + "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", ] @@ -9447,22 +9152,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ "cfg-if 1.0.0", - "cpufeatures 0.2.1", + "cpufeatures", "digest 0.10.3", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - [[package]] name = "sha3" version = "0.10.0" @@ -9738,7 +9431,7 @@ version = "4.0.0-dev" dependencies = [ "futures", "log", - "lru", + "lru 0.7.5", "parity-scale-codec", "parking_lot 0.12.1", "sp-api", @@ -9900,7 +9593,7 @@ dependencies = [ "byteorder", "digest 0.10.3", "sha2 0.10.2", - "sha3 0.10.0", + "sha3", "sp-std", "twox-hash", ] @@ -10379,7 +10072,7 @@ dependencies = [ "hash-db", "hashbrown 0.12.3", "lazy_static", - "lru", + "lru 0.7.5", "memory-db", "nohash-hasher", "parity-scale-codec", @@ -11289,9 +10982,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c31f240f59877c3d4bb3b3ea0ec5a6a0cff07323580ff8c7a605cd7d08b255d" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -11303,30 +10996,30 @@ dependencies = [ "idna", "ipnet", "lazy_static", - "log", "rand 0.8.5", "smallvec", "thiserror", "tinyvec", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ba72c2ea84515690c9fcef4c6c660bb9df3036ed1051686de84605b74fd558" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ "cfg-if 1.0.0", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot 0.12.1", "resolv-conf", "smallvec", "thiserror", + "tracing", "trust-dns-proto", ] @@ -11468,9 +11161,9 @@ checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "universal-hash" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ "generic-array 0.14.4", "subtle", @@ -12237,15 +11930,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.29.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac7fef12f4b59cd0a29339406cc9203ab44e440ddff6b3f5a41455349fa9cf3" +checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" dependencies = [ - "windows_aarch64_msvc 0.29.0", - "windows_i686_gnu 0.29.0", - "windows_i686_msvc 0.29.0", - "windows_x86_64_gnu 0.29.0", - "windows_x86_64_msvc 0.29.0", + "windows_aarch64_msvc 0.34.0", + "windows_i686_gnu 0.34.0", + "windows_i686_msvc 0.34.0", + "windows_x86_64_gnu 0.34.0", + "windows_x86_64_msvc 0.34.0", ] [[package]] @@ -12276,15 +11969,15 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d027175d00b01e0cbeb97d6ab6ebe03b12330a35786cbaca5252b1c4bf5d9b" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" [[package]] name = "windows_aarch64_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" [[package]] name = "windows_aarch64_msvc" @@ -12294,15 +11987,15 @@ checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] name = "windows_i686_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8793f59f7b8e8b01eda1a652b2697d87b93097198ae85f823b969ca5b89bba58" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" [[package]] name = "windows_i686_gnu" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" [[package]] name = "windows_i686_gnu" @@ -12312,15 +12005,15 @@ checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] name = "windows_i686_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8602f6c418b67024be2996c512f5f995de3ba417f4c75af68401ab8756796ae4" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" [[package]] name = "windows_i686_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" [[package]] name = "windows_i686_msvc" @@ -12330,15 +12023,15 @@ checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] name = "windows_x86_64_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d615f419543e0bd7d2b3323af0d86ff19cbc4f816e6453f36a2c2ce889c354" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" [[package]] name = "windows_x86_64_gnu" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" [[package]] name = "windows_x86_64_gnu" @@ -12348,15 +12041,15 @@ checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] name = "windows_x86_64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d95421d9ed3672c280884da53201a5c46b7b2765ca6faf34b0d71cf34a3561" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" [[package]] name = "windows_x86_64_msvc" -version = "0.32.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" [[package]] name = "windows_x86_64_msvc" @@ -12384,9 +12077,9 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" +checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f" dependencies = [ "curve25519-dalek 3.0.2", "rand_core 0.5.1", @@ -12395,9 +12088,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" +checksum = "e5d9ba232399af1783a58d8eb26f6b5006fbefe2dc9ef36bd283324792d03ea5" dependencies = [ "futures", "log", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 37377cdc6dde3..91c977d90a660 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } +libp2p = { version = "0.49.0", default-features = false, features = ["kad"] } log = "0.4.17" prost = "0.11" rand = "0.7.2" diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 334b2638ca58c..208440b7ab1ea 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -91,10 +91,7 @@ fn cryptos_are_compatible() { let libp2p_public = libp2p_secret.public(); let sp_core_secret = { - let libp2p_ed_secret = match libp2p_secret.clone() { - libp2p::identity::Keypair::Ed25519(x) => x, - _ => panic!("generate_ed25519 should have generated an Ed25519 key ¯\\_(ツ)_/¯"), - }; + let libp2p::identity::Keypair::Ed25519(libp2p_ed_secret) = libp2p_secret.clone(); sp_core::ed25519::Pair::from_seed_slice(&libp2p_ed_secret.secret().as_ref()).unwrap() }; let sp_core_public = sp_core_secret.public(); diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 66742e14789ef..88568509c8709 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ chrono = "0.4.10" clap = { version = "3.1.18", features = ["derive"] } fdlimit = "0.2.1" futures = "0.3.21" -libp2p = "0.46.1" +libp2p = "0.49.0" log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 180ad7c38008d..d5745665a79fd 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p = { version = "0.46.1", default-features = false } +libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" parking_lot = "0.12.1" serde = { version = "1.0", features = ["derive"] } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 8ecf2b9cec787..e84013fbfe436 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] ahash = "0.7.6" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.46.1", default-features = false } +libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" lru = "0.7.5" tracing = "0.1.29" diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 99e6c9e708e7f..9c2833ec00908 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -26,7 +26,7 @@ fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" -libp2p = "0.46.1" +libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad", "mdns-async-io", "mplex", "noise", "ping", "tcp", "yamux"] } linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" diff --git a/client/network/bitswap/Cargo.toml b/client/network/bitswap/Cargo.toml index 9fa34aab17dfb..f60e21b4429fb 100644 --- a/client/network/bitswap/Cargo.toml +++ b/client/network/bitswap/Cargo.toml @@ -19,7 +19,7 @@ prost-build = "0.11" [dependencies] cid = "0.8.6" futures = "0.3.21" -libp2p = "0.46.1" +libp2p = "0.49.0" log = "0.4.17" prost = "0.11" thiserror = "1.0" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 0e9801ec79e63..48d83a59c742b 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -25,7 +25,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ ] } futures = "0.3.21" futures-timer = "3.0.2" -libp2p = "0.46.1" +libp2p = { version = "0.49.0", features = [ "request-response", "kad" ] } linked_hash_set = "0.1.3" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } smallvec = "1.8.0" diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index a1a5dcf85eb5d..cd3be390d48c8 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.46.1" +libp2p = "0.49.0" log = "0.4.16" prost = "0.11" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index b31f36eb46692..fa130fb4baacd 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -24,18 +24,13 @@ use crate::{ }; use bytes::Bytes; -use codec::Encode; use futures::channel::oneshot; use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, - identify::IdentifyInfo, + identify::Info as IdentifyInfo, kad::record, - swarm::{ - NetworkBehaviour, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters, - }, NetworkBehaviour, }; -use log::debug; use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ @@ -46,26 +41,22 @@ use sc_network_common::{ ProtocolName, }, request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, + sync::{warp::WarpProofRequest, OpaqueBlockRequest, OpaqueStateRequest}, }; -use sc_peerset::PeersetHandle; +use sc_peerset::{PeersetHandle, ReputationChange}; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, }; -use std::{ - collections::{HashSet, VecDeque}, - iter, - task::{Context, Poll}, - time::Duration, -}; +use std::{collections::HashSet, time::Duration}; pub use crate::request_responses::{InboundFailure, OutboundFailure, RequestId, ResponseFailure}; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourOut", poll_method = "poll", event_process = true)] +#[behaviour(out_event = "BehaviourOut")] pub struct Behaviour where B: BlockT, @@ -80,25 +71,6 @@ where discovery: DiscoveryBehaviour, /// Generic request-response protocols. request_responses: request_responses::RequestResponsesBehaviour, - - /// Queue of events to produce for the outside. - #[behaviour(ignore)] - events: VecDeque>, - - /// Protocol name used to send out block requests via - /// [`request_responses::RequestResponsesBehaviour`]. - #[behaviour(ignore)] - block_request_protocol_name: String, - - /// Protocol name used to send out state requests via - /// [`request_responses::RequestResponsesBehaviour`]. - #[behaviour(ignore)] - state_request_protocol_name: String, - - /// Protocol name used to send out warp sync requests via - /// [`request_responses::RequestResponsesBehaviour`]. - #[behaviour(ignore)] - warp_sync_protocol_name: Option, } /// Event generated by `Behaviour`. @@ -107,7 +79,7 @@ pub enum BehaviourOut { JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. - RandomKademliaStarted(ProtocolId), + RandomKademliaStarted(Vec), /// We have received a request from a peer and answered it. /// @@ -136,6 +108,12 @@ pub enum BehaviourOut { result: Result<(), RequestFailure>, }, + /// A request protocol handler issued reputation changes for the given peer. + ReputationChanges { + peer: PeerId, + changes: Vec, + }, + /// Opened a substream with the given node with the given notifications protocol. /// /// The protocol is always one of the notification protocols that have been registered. @@ -186,15 +164,60 @@ pub enum BehaviourOut { messages: Vec<(ProtocolName, Bytes)>, }, + /// A new block request must be emitted. + BlockRequest { + /// Node we send the request to. + target: PeerId, + /// Opaque implementation-specific block request. + request: OpaqueBlockRequest, + /// One-shot channel to receive the response. + pending_response: oneshot::Sender, RequestFailure>>, + }, + + /// A new state request must be emitted. + StateRequest { + /// Node we send the request to. + target: PeerId, + /// Opaque implementation-specific state request. + request: OpaqueStateRequest, + /// One-shot channel to receive the response. + pending_response: oneshot::Sender, RequestFailure>>, + }, + + /// A new warp sync request must be emitted. + WarpSyncRequest { + /// Node we send the request to. + target: PeerId, + /// Warp sync request. + request: WarpProofRequest, + /// One-shot channel to receive the response. + pending_response: oneshot::Sender, RequestFailure>>, + }, + /// Now connected to a new peer for syncing purposes. SyncConnected(PeerId), /// No longer connected to a peer for syncing purposes. SyncDisconnected(PeerId), + /// We have obtained identity information from a peer, including the addresses it is listening + /// on. + PeerIdentify { + /// Id of the peer that has been identified. + peer_id: PeerId, + /// Information about the peer. + info: IdentifyInfo, + }, + + /// We have learned about the existence of a node on the default set. + Discovered(PeerId), + /// Events generated by a DHT as a response to get_value or put_value requests as well as the /// request duration. Dht(DhtEvent, Duration), + + /// Ignored event generated by lower layers. + None, } impl Behaviour @@ -216,17 +239,9 @@ where mut request_response_protocols: Vec, peerset: PeersetHandle, ) -> Result { - // Extract protocol name and add to `request_response_protocols`. - let block_request_protocol_name = block_request_protocol_config.name.to_string(); - let state_request_protocol_name = state_request_protocol_config.name.to_string(); - let warp_sync_protocol_name = match warp_sync_protocol_config { - Some(config) => { - let name = config.name.to_string(); - request_response_protocols.push(config); - Some(name) - }, - None => None, - }; + if let Some(config) = warp_sync_protocol_config { + request_response_protocols.push(config); + } request_response_protocols.push(block_request_protocol_config); request_response_protocols.push(state_request_protocol_config); request_response_protocols.push(light_client_request_protocol_config); @@ -239,10 +254,6 @@ where request_response_protocols.into_iter(), peerset, )?, - events: VecDeque::new(), - block_request_protocol_name, - state_request_protocol_name, - warp_sync_protocol_name, }) } @@ -310,6 +321,17 @@ where &mut self.substrate } + /// Add a self-reported address of a remote peer to the k-buckets of the supported + /// DHTs (`supported_protocols`). + pub fn add_self_reported_address_to_dht( + &mut self, + peer_id: &PeerId, + supported_protocols: &[impl AsRef<[u8]>], + addr: Multiaddr, + ) { + self.discovery.add_self_reported_address(peer_id, supported_protocols, addr); + } + /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a /// `ValueNotFound` event. pub fn get_value(&mut self, key: record::Key) { @@ -333,221 +355,91 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl NetworkBehaviourEventProcess> for Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn inject_event(&mut self, event: CustomMessageOutcome) { +impl From> for BehaviourOut { + fn from(event: CustomMessageOutcome) -> Self { match event { CustomMessageOutcome::BlockImport(origin, blocks) => - self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self - .events - .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), - CustomMessageOutcome::BlockRequest { target, request, pending_response } => { - match self.substrate.encode_block_request(&request) { - Ok(data) => { - self.request_responses.send_request( - &target, - &self.block_request_protocol_name, - data, - pending_response, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: "sync", - "Failed to encode block request {:?}: {:?}", - request, err - ); - }, - } - }, - CustomMessageOutcome::StateRequest { target, request, pending_response } => { - match self.substrate.encode_state_request(&request) { - Ok(data) => { - self.request_responses.send_request( - &target, - &self.state_request_protocol_name, - data, - pending_response, - IfDisconnected::ImmediateError, - ); - }, - Err(err) => { - log::warn!( - target: "sync", - "Failed to encode state request {:?}: {:?}", - request, err - ); - }, - } - }, + BehaviourOut::BlockImport(origin, blocks), + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => + BehaviourOut::JustificationImport(origin, hash, nb, justification), + CustomMessageOutcome::BlockRequest { target, request, pending_response } => + BehaviourOut::BlockRequest { target, request, pending_response }, + CustomMessageOutcome::StateRequest { target, request, pending_response } => + BehaviourOut::StateRequest { target, request, pending_response }, CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => - match &self.warp_sync_protocol_name { - Some(name) => self.request_responses.send_request( - &target, - name, - request.encode(), - pending_response, - IfDisconnected::ImmediateError, - ), - None => { - log::warn!( - target: "sync", - "Trying to send warp sync request when no protocol is configured {:?}", - request, - ); - }, - }, + BehaviourOut::WarpSyncRequest { target, request, pending_response }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, negotiated_fallback, roles, notifications_sink, - } => { - self.events.push_back(BehaviourOut::NotificationStreamOpened { - remote, - protocol, - negotiated_fallback, - role: reported_roles_to_observed_role(roles), - notifications_sink, - }); - }, - CustomMessageOutcome::NotificationStreamReplaced { + } => BehaviourOut::NotificationStreamOpened { remote, protocol, + negotiated_fallback, + role: reported_roles_to_observed_role(roles), notifications_sink, - } => self.events.push_back(BehaviourOut::NotificationStreamReplaced { + }, + CustomMessageOutcome::NotificationStreamReplaced { remote, protocol, notifications_sink, - }), - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => self - .events - .push_back(BehaviourOut::NotificationStreamClosed { remote, protocol }), - CustomMessageOutcome::NotificationsReceived { remote, messages } => { - self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); - }, - CustomMessageOutcome::PeerNewBest(_peer_id, _number) => {}, - CustomMessageOutcome::SyncConnected(peer_id) => - self.events.push_back(BehaviourOut::SyncConnected(peer_id)), + } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => + BehaviourOut::NotificationStreamClosed { remote, protocol }, + CustomMessageOutcome::NotificationsReceived { remote, messages } => + BehaviourOut::NotificationsReceived { remote, messages }, + CustomMessageOutcome::PeerNewBest(_peer_id, _number) => BehaviourOut::None, + CustomMessageOutcome::SyncConnected(peer_id) => BehaviourOut::SyncConnected(peer_id), CustomMessageOutcome::SyncDisconnected(peer_id) => - self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)), - CustomMessageOutcome::None => {}, + BehaviourOut::SyncDisconnected(peer_id), + CustomMessageOutcome::None => BehaviourOut::None, } } } -impl NetworkBehaviourEventProcess for Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn inject_event(&mut self, event: request_responses::Event) { +impl From for BehaviourOut { + fn from(event: request_responses::Event) -> Self { match event { - request_responses::Event::InboundRequest { peer, protocol, result } => { - self.events.push_back(BehaviourOut::InboundRequest { peer, protocol, result }); - }, - request_responses::Event::RequestFinished { peer, protocol, duration, result } => { - self.events.push_back(BehaviourOut::RequestFinished { - peer, - protocol, - duration, - result, - }); - }, + request_responses::Event::InboundRequest { peer, protocol, result } => + BehaviourOut::InboundRequest { peer, protocol, result }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => + BehaviourOut::RequestFinished { peer, protocol, duration, result }, request_responses::Event::ReputationChanges { peer, changes } => - for change in changes { - self.substrate.report_peer(peer, change); - }, + BehaviourOut::ReputationChanges { peer, changes }, } } } -impl NetworkBehaviourEventProcess for Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { - let peer_info::PeerInfoEvent::Identified { - peer_id, - info: IdentifyInfo { protocol_version, agent_version, mut listen_addrs, protocols, .. }, - } = event; - - if listen_addrs.len() > 30 { - debug!( - target: "sub-libp2p", - "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", - peer_id, protocol_version, agent_version - ); - listen_addrs.truncate(30); - } - - for addr in listen_addrs { - self.discovery.add_self_reported_address(&peer_id, protocols.iter(), addr); - } - self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); +impl From for BehaviourOut { + fn from(event: peer_info::PeerInfoEvent) -> Self { + let peer_info::PeerInfoEvent::Identified { peer_id, info } = event; + BehaviourOut::PeerIdentify { peer_id, info } } } -impl NetworkBehaviourEventProcess for Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn inject_event(&mut self, out: DiscoveryOut) { - match out { +impl From for BehaviourOut { + fn from(event: DiscoveryOut) -> Self { + match event { DiscoveryOut::UnroutablePeer(_peer_id) => { // Obtaining and reporting listen addresses for unroutable peers back // to Kademlia is handled by the `Identify` protocol, part of the - // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` - // implementation for `PeerInfoEvent`. - }, - DiscoveryOut::Discovered(peer_id) => { - self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); - }, - DiscoveryOut::ValueFound(results, duration) => { - self.events - .push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); - }, - DiscoveryOut::ValueNotFound(key, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); - }, - DiscoveryOut::ValuePut(key, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); - }, - DiscoveryOut::ValuePutFailed(key, duration) => { - self.events - .push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); + // `PeerInfoBehaviour`. See the `From` + // implementation. + BehaviourOut::None }, + DiscoveryOut::Discovered(peer_id) => BehaviourOut::Discovered(peer_id), + DiscoveryOut::ValueFound(results, duration) => + BehaviourOut::Dht(DhtEvent::ValueFound(results), duration), + DiscoveryOut::ValueNotFound(key, duration) => + BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration), + DiscoveryOut::ValuePut(key, duration) => + BehaviourOut::Dht(DhtEvent::ValuePut(key), duration), + DiscoveryOut::ValuePutFailed(key, duration) => + BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration), DiscoveryOut::RandomKademliaStarted(protocols) => - for protocol in protocols { - self.events.push_back(BehaviourOut::RandomKademliaStarted(protocol)); - }, - } - } -} - -impl Behaviour -where - B: BlockT, - Client: HeaderBackend + 'static, -{ - fn poll( - &mut self, - _cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll, ::ConnectionHandler>> - { - if let Some(event) = self.events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + BehaviourOut::RandomKademliaStarted(protocols), } - - Poll::Pending } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index dcd7c7d8ad6de..14f7e8ffbf76a 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -455,11 +455,8 @@ mod tests { } fn secret_bytes(kp: &Keypair) -> Vec { - match kp { - Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), - Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), - _ => panic!("Unexpected keypair."), - } + let Keypair::Ed25519(p) = kp; + p.secret().as_ref().iter().cloned().collect() } #[test] diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 8422e34485125..da4dec70b29ab 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -198,7 +198,7 @@ impl DiscoveryConfig { let proto_name = protocol_name_from_protocol_id(&protocol_id); let mut config = KademliaConfig::default(); - config.set_protocol_name(proto_name); + config.set_protocol_names(std::iter::once(proto_name.into()).collect()); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the // auto-insertion and instead add peers manually. @@ -232,9 +232,15 @@ impl DiscoveryConfig { allow_private_ipv4, discovery_only_if_under_num, mdns: if enable_mdns { - MdnsWrapper::Instantiating(Mdns::new(MdnsConfig::default()).boxed()) + match Mdns::new(MdnsConfig::default()) { + Ok(mdns) => Some(mdns), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + None + }, + } } else { - MdnsWrapper::Disabled + None }, allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( @@ -256,7 +262,7 @@ pub struct DiscoveryBehaviour { /// Kademlia requests and answers. kademlias: HashMap>, /// Discovers nodes on the local network. - mdns: MdnsWrapper, + mdns: Option, /// Stream that fires when we need to perform the next random Kademlia query. `None` if /// random walking is disabled. next_kad_random_query: Option, @@ -320,7 +326,7 @@ impl DiscoveryBehaviour { pub fn add_self_reported_address( &mut self, peer_id: &PeerId, - supported_protocols: impl Iterator>, + supported_protocols: &[impl AsRef<[u8]>], addr: Multiaddr, ) { if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { @@ -329,17 +335,18 @@ impl DiscoveryBehaviour { } let mut added = false; - for protocol in supported_protocols { - for kademlia in self.kademlias.values_mut() { - if protocol.as_ref() == kademlia.protocol_name() { - trace!( - target: "sub-libp2p", - "Adding self-reported address {} from {} to Kademlia DHT {}.", - addr, peer_id, String::from_utf8_lossy(kademlia.protocol_name()), - ); - kademlia.add_address(peer_id, addr.clone()); - added = true; - } + for kademlia in self.kademlias.values_mut() { + if let Some(matching_protocol) = supported_protocols + .iter() + .find(|p| kademlia.protocol_names().iter().any(|k| k.as_ref() == p.as_ref())) + { + trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT {}.", + addr, peer_id, String::from_utf8_lossy(matching_protocol.as_ref()), + ); + kademlia.add_address(peer_id, addr.clone()); + added = true; } } @@ -532,7 +539,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { list_to_filter.extend(k.addresses_of_peer(peer_id)) } - list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); + if let Some(ref mut mdns) = self.mdns { + list_to_filter.extend(mdns.addresses_of_peer(peer_id)); + } if !self.allow_private_ipv4 { list_to_filter.retain(|addr| match addr.iter().next() { @@ -913,36 +922,38 @@ impl NetworkBehaviour for DiscoveryBehaviour { } // Poll mDNS. - while let Poll::Ready(ev) = self.mdns.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(event) => match event { - MdnsEvent::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue - } - - self.pending_events - .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - } + if let Some(ref mut mdns) = self.mdns { + while let Poll::Ready(ev) = mdns.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(event) => match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue + } + + self.pending_events + .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + } + }, + MdnsEvent::Expired(_) => {}, + }, + NetworkBehaviourAction::Dial { .. } => { + unreachable!("mDNS never dials!"); }, - MdnsEvent::Expired(_) => {}, - }, - NetworkBehaviourAction::Dial { .. } => { - unreachable!("mDNS never dials!"); - }, - NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ - NetworkBehaviourAction::ReportObservedAddr { address, score } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, - score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => - return Poll::Ready(NetworkBehaviourAction::CloseConnection { - peer_id, - connection, - }), + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, + }), + } } } @@ -959,45 +970,6 @@ fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { v } -/// [`Mdns::new`] returns a future. Instead of forcing [`DiscoveryConfig::finish`] and all its -/// callers to be async, lazily instantiate [`Mdns`]. -enum MdnsWrapper { - Instantiating(futures::future::BoxFuture<'static, std::io::Result>), - Ready(Mdns), - Disabled, -} - -impl MdnsWrapper { - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - match self { - Self::Instantiating(_) => Vec::new(), - Self::Ready(mdns) => mdns.addresses_of_peer(peer_id), - Self::Disabled => Vec::new(), - } - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - params: &mut impl PollParameters, - ) -> Poll::ConnectionHandler>> { - loop { - match self { - Self::Instantiating(fut) => - *self = match futures::ready!(fut.as_mut().poll(cx)) { - Ok(mdns) => Self::Ready(mdns), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - Self::Disabled - }, - }, - Self::Ready(mdns) => return mdns.poll(cx, params), - Self::Disabled => return Poll::Pending, - } - } - } -} - #[cfg(test)] mod tests { use super::{protocol_name_from_protocol_id, DiscoveryConfig, DiscoveryOut}; @@ -1100,8 +1072,7 @@ mod tests { .behaviour_mut() .add_self_reported_address( &other, - [protocol_name_from_protocol_id(&protocol_id)] - .iter(), + &[protocol_name_from_protocol_id(&protocol_id)], addr, ); @@ -1156,7 +1127,7 @@ mod tests { // Add remote peer with unsupported protocol. discovery.add_self_reported_address( &remote_peer_id, - [protocol_name_from_protocol_id(&unsupported_protocol_id)].iter(), + &[protocol_name_from_protocol_id(&unsupported_protocol_id)], remote_addr.clone(), ); @@ -1173,7 +1144,7 @@ mod tests { // Add remote peer with supported protocol. discovery.add_self_reported_address( &remote_peer_id, - [protocol_name_from_protocol_id(&supported_protocol_id)].iter(), + &[protocol_name_from_protocol_id(&supported_protocol_id)], remote_addr.clone(), ); @@ -1212,7 +1183,7 @@ mod tests { // Add remote peer with `protocol_a` only. discovery.add_self_reported_address( &remote_peer_id, - [protocol_name_from_protocol_id(&protocol_a)].iter(), + &[protocol_name_from_protocol_id(&protocol_a)], remote_addr.clone(), ); diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index c62c2ea1c5d98..e04d006f50501 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -23,8 +23,11 @@ use libp2p::{ connection::ConnectionId, either::EitherOutput, transport::ListenerId, ConnectedPoint, PeerId, PublicKey, }, - identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, - ping::{Ping, PingConfig, PingEvent, PingSuccess}, + identify::{ + Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent, + Info as IdentifyInfo, + }, + ping::{Behaviour as Ping, Config as PingConfig, Event as PingEvent, Success as PingSuccess}, swarm::{ ConnectionHandler, IntoConnectionHandler, IntoConnectionHandlerSelect, NetworkBehaviour, NetworkBehaviourAction, PollParameters, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d1f428cc292c3..f95e67df142b0 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -38,9 +38,11 @@ use crate::{ transport, ChainSyncInterface, ReputationChange, }; +use codec::Encode; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, + identify::Info as IdentifyInfo, kad::record::Key as KademliaKey, multiaddr, ping::Failure as PingFailure, @@ -264,6 +266,11 @@ where let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); + let block_request_protocol_name = params.block_request_protocol_config.name.clone(); + let state_request_protocol_name = params.state_request_protocol_config.name.clone(); + let warp_sync_protocol_name = + params.warp_sync_protocol_config.as_ref().map(|c| c.name.clone()); + // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = format!( @@ -455,6 +462,9 @@ where peers_notifications_sinks, metrics, boot_node_ids, + block_request_protocol_name, + state_request_protocol_name, + warp_sync_protocol_name, _marker: Default::default(), }) } @@ -1273,6 +1283,15 @@ where /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc>>, + /// Protocol name used to send out block requests via + /// [`crate::request_responses::RequestResponsesBehaviour`]. + block_request_protocol_name: ProtocolName, + /// Protocol name used to send out state requests via + /// [`crate::request_responses::RequestResponsesBehaviour`]. + state_request_protocol_name: ProtocolName, + /// Protocol name used to send out warp sync requests via + /// [`crate::request_responses::RequestResponsesBehaviour`]. + warp_sync_protocol_name: Option, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -1451,6 +1470,84 @@ where } this.import_queue.import_justifications(origin, hash, nb, justifications); }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockRequest { + target, + request, + pending_response, + })) => { + match this + .network_service + .behaviour() + .user_protocol() + .encode_block_request(&request) + { + Ok(data) => { + this.network_service.behaviour_mut().send_request( + &target, + &this.block_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + request, err + ); + }, + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::StateRequest { + target, + request, + pending_response, + })) => { + match this + .network_service + .behaviour() + .user_protocol() + .encode_state_request(&request) + { + Ok(data) => { + this.network_service.behaviour_mut().send_request( + &target, + &this.state_request_protocol_name, + data, + pending_response, + IfDisconnected::ImmediateError, + ); + }, + Err(err) => { + log::warn!( + target: "sync", + "Failed to encode state request {:?}: {:?}", + request, err + ); + }, + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::WarpSyncRequest { + target, + request, + pending_response, + })) => match &this.warp_sync_protocol_name { + Some(name) => this.network_service.behaviour_mut().send_request( + &target, + &name, + request.encode(), + pending_response, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: "sync", + "Trying to send warp sync request when no protocol is configured {:?}", + request, + ); + }, + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, @@ -1526,14 +1623,58 @@ where }, } }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::ReputationChanges { + peer, + changes, + })) => + for change in changes { + this.network_service.behaviour().user_protocol().report_peer(peer, change); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::PeerIdentify { + peer_id, + info: + IdentifyInfo { + protocol_version, + agent_version, + mut listen_addrs, + protocols, + .. + }, + })) => { + if listen_addrs.len() > 30 { + debug!( + target: "sub-libp2p", + "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", + peer_id, protocol_version, agent_version + ); + listen_addrs.truncate(30); + } + for addr in listen_addrs { + this.network_service + .behaviour_mut() + .add_self_reported_address_to_dht(&peer_id, &protocols, addr); + } + this.network_service + .behaviour_mut() + .user_protocol_mut() + .add_default_set_discovered_nodes(iter::once(peer_id)); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Discovered(peer_id))) => { + this.network_service + .behaviour_mut() + .user_protocol_mut() + .add_default_set_discovered_nodes(iter::once(peer_id)); + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( - protocol, + protocols, ))) => if let Some(metrics) = this.metrics.as_ref() { - metrics - .kademlia_random_queries_total - .with_label_values(&[protocol.as_ref()]) - .inc(); + for protocol in protocols { + metrics + .kademlia_random_queries_total + .with_label_values(&[protocol.as_ref()]) + .inc(); + } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, @@ -1654,6 +1795,9 @@ where this.event_streams.send(Event::Dht(event)); }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::None)) => { + // Ignored event from lower layers. + }, Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 323b70e9c22a4..441b02fb22b8a 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -22,7 +22,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } futures = "0.3.21" -libp2p = "0.46.1" +libp2p = "0.49.0" log = "0.4.17" lru = "0.7.5" mockall = "0.11.2" diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 990129229a40f..30a57bc1b5171 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -17,7 +17,7 @@ async-std = "1.11.0" async-trait = "0.1.57" futures = "0.3.21" futures-timer = "3.0.1" -libp2p = { version = "0.46.1", default-features = false } +libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" parking_lot = "0.12.1" rand = "0.7.2" diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index 3b60497d42b9b..d92c07cd461a8 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -18,7 +18,7 @@ array-bytes = "4.1" codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" hex = "0.4.0" -libp2p = "0.46.1" +libp2p = "0.49.0" log = "0.4.17" pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 5ebb21406efbf..1e8c802496453 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -21,7 +21,7 @@ futures = "0.3.21" futures-timer = "3.0.2" hyper = { version = "0.14.16", features = ["stream", "http2"] } hyper-rustls = { version = "0.23.0", features = ["http2"] } -libp2p = { version = "0.46.1", default-features = false } +libp2p = { version = "0.49.0", default-features = false } num_cpus = "1.13" once_cell = "1.8" parking_lot = "0.12.1" diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index d3d6e267f1768..ade2bc3d78d03 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -libp2p = { version = "0.46.1", default-features = false } +libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" serde_json = "1.0.85" wasm-timer = "0.2" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 0be1268e13d43..f8c6f281546db 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] chrono = "0.4.19" futures = "0.3.21" -libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.49.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" parking_lot = "0.12.1" pin-project = "1.0.12" From ed2466e376af406eb4e56906cdd843f791b1fd98 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 17 Oct 2022 18:44:42 +0200 Subject: [PATCH 250/348] Execute try-state at end of each test to ensure pallet data integrity (#12453) * execute try-state at end of tests * run post condition only with try runtime * Revert "run post condition only with try runtime" This reverts commit 7db0ecf7eaa2ee5afa5a995487b73d023ba3bcd9. * voterlist contains validators as well * fmt * simplify * fmt Co-authored-by: parity-processbot <> --- frame/staking/src/mock.rs | 108 ++---------------------------- frame/staking/src/pallet/impls.rs | 5 +- 2 files changed, 7 insertions(+), 106 deletions(-) diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 3a9351ef4a271..24e5aa97160ee 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -18,9 +18,7 @@ //! Test utilities use crate::{self as pallet_staking, *}; -use frame_election_provider_support::{ - onchain, SequentialPhragmen, SortedListProvider, VoteWeight, -}; +use frame_election_provider_support::{onchain, SequentialPhragmen, VoteWeight}; use frame_support::{ assert_ok, parameter_types, traits::{ @@ -548,108 +546,10 @@ impl ExtBuilder { sp_tracing::try_init_simple(); let mut ext = self.build(); ext.execute_with(test); - ext.execute_with(post_conditions); - } -} - -fn post_conditions() { - check_nominators(); - check_exposures(); - check_ledgers(); - check_count(); -} - -fn check_count() { - let nominator_count = Nominators::::iter_keys().count() as u32; - let validator_count = Validators::::iter().count() as u32; - assert_eq!(nominator_count, Nominators::::count()); - assert_eq!(validator_count, Validators::::count()); - - // the voters that the `VoterList` list is storing for us. - let external_voters = ::VoterList::count(); - assert_eq!(external_voters, nominator_count + validator_count); -} - -fn check_ledgers() { - // check the ledger of all stakers. - Bonded::::iter().for_each(|(_, ctrl)| assert_ledger_consistent(ctrl)) -} - -fn check_exposures() { - // a check per validator to ensure the exposure struct is always sane. - let era = active_era(); - ErasStakers::::iter_prefix_values(era).for_each(|expo| { - assert_eq!( - expo.total as u128, - expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), - "wrong total exposure.", - ); - }) -} - -fn check_nominators() { - // a check per nominator to ensure their entire stake is correctly distributed. Will only kick- - // in if the nomination was submitted before the current era. - let era = active_era(); - >::iter() - .filter_map( - |(nominator, nomination)| { - if nomination.submitted_in > era { - Some(nominator) - } else { - None - } - }, - ) - .for_each(|nominator| { - // must be bonded. - assert_is_stash(nominator); - let mut sum = 0; - Session::validators() - .iter() - .map(|v| Staking::eras_stakers(era, v)) - .for_each(|e| { - let individual = - e.others.iter().filter(|e| e.who == nominator).collect::>(); - let len = individual.len(); - match len { - 0 => { /* not supporting this validator at all. */ }, - 1 => sum += individual[0].value, - _ => panic!("nominator cannot back a validator more than once."), - }; - }); - - let nominator_stake = Staking::slashable_balance_of(&nominator); - // a nominator cannot over-spend. - assert!( - nominator_stake >= sum, - "failed: Nominator({}) stake({}) >= sum divided({})", - nominator, - nominator_stake, - sum, - ); - - let diff = nominator_stake - sum; - assert!(diff < 100); + ext.execute_with(|| { + Staking::do_try_state(System::block_number()).unwrap(); }); -} - -fn assert_is_stash(acc: AccountId) { - assert!(Staking::bonded(&acc).is_some(), "Not a stash."); -} - -fn assert_ledger_consistent(ctrl: AccountId) { - // ensures ledger.total == ledger.active + sum(ledger.unlocking). - let ledger = Staking::ledger(ctrl).expect("Not a controller."); - let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); - assert_eq!(real_total, ledger.total); - assert!( - ledger.active >= Balances::minimum_balance() || ledger.active == 0, - "{}: active ledger amount ({}) must be greater than ED {}", - ctrl, - ledger.active, - Balances::minimum_balance() - ); + } } pub(crate) fn active_era() -> EraIndex { diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 6da27da362b53..4ef064229693c 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -1552,11 +1552,12 @@ impl StakingInterface for Pallet { } } -#[cfg(feature = "try-runtime")] +#[cfg(any(test, feature = "try-runtime"))] impl Pallet { pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), &'static str> { ensure!( - T::VoterList::iter().all(|x| >::contains_key(&x)), + T::VoterList::iter() + .all(|x| >::contains_key(&x) || >::contains_key(&x)), "VoterList contains non-nominators" ); T::VoterList::try_state()?; From e8a4408c53ae2a1a0ef6cb2f7b603887a231c16f Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 18 Oct 2022 08:52:46 +0200 Subject: [PATCH 251/348] Bump clap to 4.0.x and adjust to best practices (#12381) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump clap to 3.2.22 * Replace `from_os_str` with `value_parser` * Replace `from_str` and `try_from_str` with `value_parser` * Move possible_values to the new format * Remove unwanted print * Add missing match branch * Update clap to 4.0.9 and make it compile * Replace deprecated `clap` macro with `command` and `value` * Move remaining `clap` attributes to `arg` * Remove no-op value_parsers * Adjust value_parser for state_version * Remove "deprecated" feature flag and bump to 4.0.11 * Improve range Co-authored-by: Bastian Köcher * Apply suggestions * Trigger CI * Fix unused error warning * Fix doc errors * Fix ArgGroup naming conflict * Change default_value to default_value_t * Use 1.. instead of 0.. Co-authored-by: Bastian Köcher --- Cargo.lock | 60 ++++----- bin/node-template/node/Cargo.toml | 2 +- bin/node-template/node/src/cli.rs | 6 +- bin/node/bench/Cargo.toml | 2 +- bin/node/bench/src/main.rs | 10 +- bin/node/cli/Cargo.toml | 6 +- bin/node/cli/build.rs | 2 +- bin/node/cli/src/cli.rs | 10 +- bin/node/inspect/Cargo.toml | 2 +- bin/node/inspect/src/cli.rs | 4 +- bin/utils/chain-spec-builder/Cargo.toml | 2 +- bin/utils/chain-spec-builder/src/main.rs | 22 ++-- bin/utils/subkey/Cargo.toml | 2 +- bin/utils/subkey/src/lib.rs | 2 +- client/cli/Cargo.toml | 2 +- client/cli/src/arg_enums.rs | 120 +++++++++--------- client/cli/src/commands/build_spec_cmd.rs | 4 +- client/cli/src/commands/check_block_cmd.rs | 4 +- client/cli/src/commands/export_blocks_cmd.rs | 8 +- client/cli/src/commands/export_state_cmd.rs | 2 +- client/cli/src/commands/generate.rs | 4 +- client/cli/src/commands/generate_node_key.rs | 6 +- client/cli/src/commands/import_blocks_cmd.rs | 6 +- client/cli/src/commands/insert_key.rs | 8 +- client/cli/src/commands/inspect_key.rs | 6 +- client/cli/src/commands/inspect_node_key.rs | 8 +- client/cli/src/commands/purge_chain_cmd.rs | 2 +- client/cli/src/commands/revert_cmd.rs | 2 +- client/cli/src/commands/run_cmd.rs | 80 ++++++------ client/cli/src/commands/sign.rs | 8 +- client/cli/src/commands/vanity.rs | 4 +- client/cli/src/commands/verify.rs | 6 +- client/cli/src/lib.rs | 18 +-- client/cli/src/params/database_params.rs | 10 +- client/cli/src/params/import_params.rs | 40 +++--- client/cli/src/params/keystore_params.rs | 17 ++- client/cli/src/params/mod.rs | 22 +++- client/cli/src/params/network_params.rs | 38 +++--- client/cli/src/params/node_key_params.rs | 8 +- .../cli/src/params/offchain_worker_params.rs | 8 +- client/cli/src/params/pruning_params.rs | 4 +- client/cli/src/params/shared_params.rs | 18 +-- .../cli/src/params/transaction_pool_params.rs | 6 +- .../solution-type/fuzzer/Cargo.toml | 2 +- primitives/npos-elections/fuzzer/Cargo.toml | 4 +- .../npos-elections/fuzzer/src/phragmen_pjr.rs | 8 +- primitives/storage/src/lib.rs | 11 ++ scripts/ci/node-template-release/Cargo.toml | 2 +- scripts/ci/node-template-release/src/main.rs | 4 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- .../frame/benchmarking-cli/src/block/bench.rs | 6 +- utils/frame/benchmarking-cli/src/block/cmd.rs | 2 +- .../benchmarking-cli/src/extrinsic/bench.rs | 6 +- .../benchmarking-cli/src/extrinsic/cmd.rs | 8 +- .../frame/benchmarking-cli/src/machine/mod.rs | 12 +- .../benchmarking-cli/src/overhead/cmd.rs | 4 +- .../frame/benchmarking-cli/src/pallet/mod.rs | 62 ++++----- .../frame/benchmarking-cli/src/shared/mod.rs | 8 +- .../src/shared/weight_params.rs | 8 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 22 ++-- utils/frame/frame-utilities-cli/Cargo.toml | 2 +- .../frame-utilities-cli/src/pallet_id.rs | 13 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- .../generate-bags/node-runtime/src/main.rs | 6 +- utils/frame/try-runtime/cli/Cargo.toml | 2 +- .../cli/src/commands/execute_block.rs | 18 ++- .../cli/src/commands/follow_chain.rs | 8 +- .../cli/src/commands/offchain_worker.rs | 14 +- .../cli/src/commands/on_runtime_upgrade.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 38 +++--- 70 files changed, 434 insertions(+), 443 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 103436dcfca4e..2c1b427f0a2e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -908,7 +908,7 @@ name = "chain-spec-builder" version = "2.0.0" dependencies = [ "ansi_term", - "clap 3.1.18", + "clap 4.0.11", "node-cli", "rand 0.8.5", "sc-chain-spec", @@ -979,41 +979,39 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" dependencies = [ "bitflags", - "textwrap 0.11.0", + "textwrap", "unicode-width", ] [[package]] name = "clap" -version = "3.1.18" +version = "4.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" +checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", - "indexmap", - "lazy_static", + "once_cell", "strsim", "termcolor", - "textwrap 0.15.0", ] [[package]] name = "clap_complete" -version = "3.0.2" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a394f7ec0715b42a4e52b294984c27c9a61f77c8d82f7774c5198350be143f19" +checksum = "11cba7abac9b56dfe2f035098cdb3a43946f276e6db83b72c4e692343f9aab9a" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", ] [[package]] name = "clap_derive" -version = "3.1.18" +version = "4.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" dependencies = [ "heck", "proc-macro-error", @@ -1024,9 +1022,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" dependencies = [ "os_str_bytes", ] @@ -2127,7 +2125,7 @@ dependencies = [ "Inflector", "array-bytes", "chrono", - "clap 3.1.18", + "clap 4.0.11", "comfy-table", "frame-benchmarking", "frame-support", @@ -2208,7 +2206,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -4441,7 +4439,7 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes", - "clap 3.1.18", + "clap 4.0.11", "derive_more", "fs_extra", "futures", @@ -4480,7 +4478,7 @@ dependencies = [ "array-bytes", "assert_cmd", "async-std", - "clap 3.1.18", + "clap 4.0.11", "clap_complete", "criterion", "frame-benchmarking-cli", @@ -4597,7 +4595,7 @@ dependencies = [ name = "node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -4656,7 +4654,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "generate-bags", "kitchensink-runtime", ] @@ -4665,7 +4663,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -6855,9 +6853,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ "unicode-ident", ] @@ -7653,7 +7651,7 @@ version = "0.10.0-dev" dependencies = [ "array-bytes", "chrono", - "clap 3.1.18", + "clap 4.0.11", "fdlimit", "futures", "libp2p", @@ -9762,7 +9760,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "honggfuzz", "parity-scale-codec", "rand 0.8.5", @@ -10259,7 +10257,7 @@ dependencies = [ name = "subkey" version = "2.0.2" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "sc-cli", ] @@ -10287,7 +10285,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "frame-support", "frame-system", "sc-cli", @@ -10617,12 +10615,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" version = "1.0.30" @@ -11033,7 +11025,7 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" name = "try-runtime-cli" version = "0.10.0-dev" dependencies = [ - "clap 3.1.18", + "clap 4.0.11", "frame-try-runtime", "jsonrpsee", "log", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index dc1d3caa8b75d..c60018e14969c 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", features = ["wasmtime"] } sp-core = { version = "6.0.0", path = "../../../primitives/core" } diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 4ab4d34210c98..dd610477ac469 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -2,7 +2,7 @@ use sc_cli::RunCmd; #[derive(Debug, clap::Parser)] pub struct Cli { - #[clap(subcommand)] + #[command(subcommand)] pub subcommand: Option, #[clap(flatten)] @@ -12,7 +12,7 @@ pub struct Cli { #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// Key management cli utilities - #[clap(subcommand)] + #[command(subcommand)] Key(sc_cli::KeySubcommand), /// Build a chain specification. @@ -37,7 +37,7 @@ pub enum Subcommand { Revert(sc_cli::RevertCmd), /// Sub-commands concerned with benchmarking. - #[clap(subcommand)] + #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), /// Try some command against runtime state. diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 42953da837100..54a1d4900c60b 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] array-bytes = "4.1" -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } log = "0.4.17" node-primitives = { version = "2.0.0", path = "../primitives" } node-testing = { version = "3.0.0-dev", path = "../testing" } diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index d97c7af26535b..8a5d99640eb1b 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -43,18 +43,18 @@ use crate::{ }; #[derive(Debug, Parser)] -#[clap(name = "node-bench", about = "Node integration benchmarks")] +#[command(name = "node-bench", about = "Node integration benchmarks")] struct Opt { /// Show list of all available benchmarks. /// /// Will output ("name", "path"). Benchmarks can then be filtered by path. - #[clap(short, long)] + #[arg(short, long)] list: bool, /// Machine readable json output. /// /// This also suppresses all regular output (except to stderr) - #[clap(short, long)] + #[arg(short, long)] json: bool, /// Filter benchmarks. @@ -63,7 +63,7 @@ struct Opt { filter: Option, /// Number of transactions for block import with `custom` size. - #[clap(long)] + #[arg(long)] transactions: Option, /// Mode @@ -72,7 +72,7 @@ struct Opt { /// /// "profile" mode adds pauses between measurable runs, /// so that actual interval can be selected in the profiler of choice. - #[clap(short, long, default_value = "regular")] + #[arg(short, long, default_value = "regular")] mode: BenchmarkMode, } diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index edb434e3c04e3..25e7ef095ed3e 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -35,7 +35,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies array-bytes = "4.1" -clap = { version = "3.1.18", features = ["derive"], optional = true } +clap = { version = "4.0.9", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } jsonrpsee = { version = "0.15.1", features = ["server"] } @@ -136,8 +136,8 @@ remote-externalities = { path = "../../../utils/frame/remote-externalities" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } [build-dependencies] -clap = { version = "3.1.18", optional = true } -clap_complete = { version = "3.0", optional = true } +clap = { version = "4.0.9", optional = true } +clap_complete = { version = "4.0.2", optional = true } node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } frame-benchmarking-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/benchmarking-cli" } substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index 6a3d13dda6a00..e8142b297f1b2 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -25,7 +25,7 @@ fn main() { mod cli { include!("src/cli.rs"); - use clap::{ArgEnum, CommandFactory}; + use clap::{CommandFactory, ValueEnum}; use clap_complete::{generate_to, Shell}; use std::{env, fs, path::Path}; use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 5b2977599bab0..bb7f8a4c60aa9 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -20,7 +20,7 @@ #[derive(Debug, clap::Parser)] pub struct Cli { /// Possible subcommand with parameters. - #[clap(subcommand)] + #[command(subcommand)] pub subcommand: Option, #[allow(missing_docs)] @@ -34,7 +34,7 @@ pub struct Cli { /// /// The results are then printed out in the logs, and also sent as part of /// telemetry, if telemetry is enabled. - #[clap(long)] + #[arg(long)] pub no_hardware_benchmarks: bool, } @@ -42,7 +42,7 @@ pub struct Cli { #[derive(Debug, clap::Subcommand)] pub enum Subcommand { /// The custom inspect subcommmand for decoding blocks and extrinsics. - #[clap( + #[command( name = "inspect", about = "Decode given block or extrinsic using current native runtime." )] @@ -50,7 +50,7 @@ pub enum Subcommand { /// Sub-commands concerned with benchmarking. /// The pallet benchmarking moved to the `pallet` sub-command. - #[clap(subcommand)] + #[command(subcommand)] Benchmark(frame_benchmarking_cli::BenchmarkCmd), /// Try some command against runtime state. @@ -62,7 +62,7 @@ pub enum Subcommand { TryRuntime, /// Key management cli utilities - #[clap(subcommand)] + #[command(subcommand)] Key(sc_cli::KeySubcommand), /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index ede96a76206ce..b7eccf9c36bd2 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "3.1.6", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } thiserror = "1.0" sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index cc1f232e1fe0f..fa6344ac346e9 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -46,7 +46,7 @@ pub enum InspectSubCmd { /// Can be either a block hash (no 0x prefix) or a number to retrieve existing block, /// or a 0x-prefixed bytes hex string, representing SCALE encoding of /// a block. - #[clap(value_name = "HASH or NUMBER or BYTES")] + #[arg(value_name = "HASH or NUMBER or BYTES")] input: String, }, /// Decode extrinsic with native version of runtime and print out the details. @@ -56,7 +56,7 @@ pub enum InspectSubCmd { /// Can be either a block hash (no 0x prefix) or number and the index, in the form /// of `{block}:{index}` or a 0x-prefixed bytes hex string, /// representing SCALE encoding of an extrinsic. - #[clap(value_name = "BLOCK:INDEX or BYTES")] + #[arg(value_name = "BLOCK:INDEX or BYTES")] input: String, }, } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index e3deb54ea8ac9..dc53dc08ec6af 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } rand = "0.8" node-cli = { version = "3.0.0-dev", path = "../../node/cli" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index 641416154115b..7dfcaed773f40 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -37,51 +37,51 @@ use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. #[derive(Parser)] -#[clap(rename_all = "kebab-case")] +#[command(rename_all = "kebab-case")] enum ChainSpecBuilder { /// Create a new chain spec with the given authorities, endowed and sudo /// accounts. New { /// Authority key seed. - #[clap(long, short, required = true)] + #[arg(long, short, required = true)] authority_seeds: Vec, /// Active nominators (SS58 format), each backing a random subset of the aforementioned /// authorities. - #[clap(long, short, default_value = "0")] + #[arg(long, short, default_value = "0")] nominator_accounts: Vec, /// Endowed account address (SS58 format). - #[clap(long, short)] + #[arg(long, short)] endowed_accounts: Vec, /// Sudo account address (SS58 format). - #[clap(long, short)] + #[arg(long, short)] sudo_account: String, /// The path where the chain spec should be saved. - #[clap(long, short, default_value = "./chain_spec.json")] + #[arg(long, short, default_value = "./chain_spec.json")] chain_spec_path: PathBuf, }, /// Create a new chain spec with the given number of authorities and endowed /// accounts. Random keys will be generated as required. Generate { /// The number of authorities. - #[clap(long, short)] + #[arg(long, short)] authorities: usize, /// The number of nominators backing the aforementioned authorities. /// /// Will nominate a random subset of `authorities`. - #[clap(long, short, default_value = "0")] + #[arg(long, short, default_value_t = 0)] nominators: usize, /// The number of endowed accounts. - #[clap(long, short, default_value = "0")] + #[arg(long, short, default_value_t = 0)] endowed: usize, /// The path where the chain spec should be saved. - #[clap(long, short, default_value = "./chain_spec.json")] + #[arg(long, short, default_value = "./chain_spec.json")] chain_spec_path: PathBuf, /// Path to use when saving generated keystores for each authority. /// /// At this path, a new folder will be created for each authority's /// keystore named `auth-$i` where `i` is the authority index, i.e. /// `auth-0`, `auth-1`, etc. - #[clap(long, short)] + #[arg(long, short)] keystore_path: Option, }, } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 4c4e47e702be6..77c323821a508 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -17,5 +17,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/bin/utils/subkey/src/lib.rs b/bin/utils/subkey/src/lib.rs index 6773ba822340f..280b90848fbf5 100644 --- a/bin/utils/subkey/src/lib.rs +++ b/bin/utils/subkey/src/lib.rs @@ -23,7 +23,7 @@ use sc_cli::{ }; #[derive(Debug, Parser)] -#[clap( +#[command( name = "subkey", author = "Parity Team ", about = "Utility for generating and restoring with Substrate keys", diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 88568509c8709..04824859ce8ab 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "4.1" chrono = "0.4.10" -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive", "string"] } fdlimit = "0.2.1" futures = "0.3.21" libp2p = "0.49.0" diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 283fef985dfb9..d761c854a6f0d 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -16,13 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Definitions of [`ArgEnum`] types. +//! Definitions of [`ValueEnum`] types. -use clap::ArgEnum; +use clap::{builder::PossibleValue, ValueEnum}; /// The instantiation strategy to use in compiled mode. -#[derive(Debug, Clone, Copy, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Clone, Copy, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum WasmtimeInstantiationStrategy { /// Pool the instances to avoid initializing everything from scratch /// on each instantiation. Use copy-on-write memory when possible. @@ -59,20 +59,22 @@ pub enum WasmExecutionMethod { Compiled, } -impl std::fmt::Display for WasmExecutionMethod { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Interpreted => write!(f, "Interpreted"), - Self::Compiled => write!(f, "Compiled"), +const INTERPRETED_NAME: &str = "interpreted-i-know-what-i-do"; + +impl clap::ValueEnum for WasmExecutionMethod { + /// All possible argument values, in display order. + fn value_variants<'a>() -> &'a [Self] { + let variants = &[Self::Interpreted, Self::Compiled]; + if cfg!(feature = "wasmtime") { + variants + } else { + &variants[..1] } } -} - -impl std::str::FromStr for WasmExecutionMethod { - type Err = String; - fn from_str(s: &str) -> Result { - if s.eq_ignore_ascii_case("interpreted-i-know-what-i-do") { + /// Parse an argument into `Self`. + fn from_str(s: &str, _: bool) -> Result { + if s.eq_ignore_ascii_case(INTERPRETED_NAME) { Ok(Self::Interpreted) } else if s.eq_ignore_ascii_case("compiled") { #[cfg(feature = "wasmtime")] @@ -84,19 +86,29 @@ impl std::str::FromStr for WasmExecutionMethod { Err("`Compiled` variant requires the `wasmtime` feature to be enabled".into()) } } else { - Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) + Err(format!("Unknown variant `{}`", s)) + } + } + + /// The canonical argument value. + /// + /// The value is `None` for skipped variants. + fn to_possible_value(&self) -> Option { + match self { + #[cfg(feature = "wasmtime")] + WasmExecutionMethod::Compiled => Some(PossibleValue::new("compiled")), + #[cfg(not(feature = "wasmtime"))] + WasmExecutionMethod::Compiled => None, + WasmExecutionMethod::Interpreted => Some(PossibleValue::new(INTERPRETED_NAME)), } } } -impl WasmExecutionMethod { - /// Returns all the variants of this enum to be shown in the cli. - pub fn variants() -> &'static [&'static str] { - let variants = &["interpreted-i-know-what-i-do", "compiled"]; - if cfg!(feature = "wasmtime") { - variants - } else { - &variants[..1] +impl std::fmt::Display for WasmExecutionMethod { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Interpreted => write!(f, "Interpreted"), + Self::Compiled => write!(f, "Compiled"), } } } @@ -133,15 +145,15 @@ pub fn execution_method_from_cli( /// The default [`WasmExecutionMethod`]. #[cfg(feature = "wasmtime")] -pub const DEFAULT_WASM_EXECUTION_METHOD: &str = "compiled"; +pub const DEFAULT_WASM_EXECUTION_METHOD: WasmExecutionMethod = WasmExecutionMethod::Compiled; /// The default [`WasmExecutionMethod`]. #[cfg(not(feature = "wasmtime"))] -pub const DEFAULT_WASM_EXECUTION_METHOD: &str = "interpreted-i-know-what-i-do"; +pub const DEFAULT_WASM_EXECUTION_METHOD: WasmExecutionMethod = WasmExecutionMethod::Interpreted; #[allow(missing_docs)] -#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum TracingReceiver { /// Output the tracing records using the log. Log, @@ -156,16 +168,16 @@ impl Into for TracingReceiver { } /// The type of the node key. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum NodeKeyType { /// Use ed25519. Ed25519, } /// The crypto scheme to use. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum CryptoScheme { /// Use ed25519. Ed25519, @@ -176,8 +188,8 @@ pub enum CryptoScheme { } /// The type of the output format. -#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum OutputType { /// Output as json. Json, @@ -186,8 +198,8 @@ pub enum OutputType { } /// How to execute blocks -#[derive(Debug, Copy, Clone, PartialEq, Eq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, Eq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum ExecutionStrategy { /// Execute with native build (if available, WebAssembly otherwise). Native, @@ -212,8 +224,8 @@ impl Into for ExecutionStrategy { /// Available RPC methods. #[allow(missing_docs)] -#[derive(Debug, Copy, Clone, PartialEq, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Copy, Clone, PartialEq, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum RpcMethods { /// Expose every RPC method only when RPC is listening on `localhost`, /// otherwise serve only safe RPC methods. @@ -235,7 +247,8 @@ impl Into for RpcMethods { } /// Database backend -#[derive(Debug, Clone, PartialEq, Copy)] +#[derive(Debug, Clone, PartialEq, Copy, clap::ValueEnum)] +#[value(rename_all = "lower")] pub enum Database { /// Facebooks RocksDB #[cfg(feature = "rocksdb")] @@ -246,29 +259,10 @@ pub enum Database { /// instance of ParityDb Auto, /// ParityDb. + #[value(name = "paritydb-experimental")] ParityDbDeprecated, } -impl std::str::FromStr for Database { - type Err = String; - - fn from_str(s: &str) -> Result { - #[cfg(feature = "rocksdb")] - if s.eq_ignore_ascii_case("rocksdb") { - return Ok(Self::RocksDb) - } - if s.eq_ignore_ascii_case("paritydb-experimental") { - return Ok(Self::ParityDbDeprecated) - } else if s.eq_ignore_ascii_case("paritydb") { - return Ok(Self::ParityDb) - } else if s.eq_ignore_ascii_case("auto") { - Ok(Self::Auto) - } else { - Err(format!("Unknown variant `{}`, known variants: {:?}", s, Self::variants())) - } - } -} - impl Database { /// Returns all the variants of this enum to be shown in the cli. pub const fn variants() -> &'static [&'static str] { @@ -284,8 +278,8 @@ impl Database { /// Whether off-chain workers are enabled. #[allow(missing_docs)] -#[derive(Debug, Clone, ArgEnum)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Clone, ValueEnum)] +#[value(rename_all = "kebab-case")] pub enum OffchainWorkerEnabled { /// Always have offchain worker enabled. Always, @@ -296,8 +290,8 @@ pub enum OffchainWorkerEnabled { } /// Syncing mode. -#[derive(Debug, Clone, Copy, ArgEnum, PartialEq)] -#[clap(rename_all = "kebab-case")] +#[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] +#[value(rename_all = "kebab-case")] pub enum SyncMode { /// Full sync. Download end verify all blocks. Full, diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 3196a3e7b915f..5ab3ce9e88a09 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -34,14 +34,14 @@ use std::io::Write; #[derive(Debug, Clone, Parser)] pub struct BuildSpecCmd { /// Force raw genesis storage output. - #[clap(long)] + #[arg(long)] pub raw: bool, /// Disable adding the default bootnode to the specification. /// /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the /// specification when no bootnode exists. - #[clap(long)] + #[arg(long)] pub disable_default_bootnode: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index b7e69b1360a0a..3e25eab2c4350 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -30,13 +30,13 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct CheckBlockCmd { /// Block hash or number - #[clap(value_name = "HASH or NUMBER")] + #[arg(value_name = "HASH or NUMBER")] pub input: BlockNumberOrHash, /// The default number of 64KB pages to ever allocate for Wasm execution. /// /// Don't alter this unless you know what you're doing. - #[clap(long, value_name = "COUNT")] + #[arg(long, value_name = "COUNT")] pub default_heap_pages: Option, #[allow(missing_docs)] diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index ff35b5a51fcad..e2f83200e511c 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -32,23 +32,23 @@ use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct ExportBlocksCmd { /// Output file name or stdout if unspecified. - #[clap(parse(from_os_str))] + #[arg()] pub output: Option, /// Specify starting block number. /// /// Default is 1. - #[clap(long, value_name = "BLOCK")] + #[arg(long, value_name = "BLOCK")] pub from: Option, /// Specify last block number. /// /// Default is best block. - #[clap(long, value_name = "BLOCK")] + #[arg(long, value_name = "BLOCK")] pub to: Option, /// Use binary output rather than JSON. - #[clap(long)] + #[arg(long)] pub binary: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index b76724caf0fef..63aa12213b3da 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -32,7 +32,7 @@ use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; #[derive(Debug, Clone, Parser)] pub struct ExportStateCmd { /// Block hash or number. - #[clap(value_name = "HASH or NUMBER")] + #[arg(value_name = "HASH or NUMBER")] pub input: Option, #[allow(missing_docs)] diff --git a/client/cli/src/commands/generate.rs b/client/cli/src/commands/generate.rs index 5b1b708f8669c..461cb98bc2e51 100644 --- a/client/cli/src/commands/generate.rs +++ b/client/cli/src/commands/generate.rs @@ -25,10 +25,10 @@ use clap::Parser; /// The `generate` command #[derive(Debug, Clone, Parser)] -#[clap(name = "generate", about = "Generate a random account")] +#[command(name = "generate", about = "Generate a random account")] pub struct GenerateCmd { /// The number of words in the phrase to generate. One of 12 (default), 15, 18, 21 and 24. - #[clap(short = 'w', long, value_name = "WORDS")] + #[arg(short = 'w', long, value_name = "WORDS")] words: Option, #[allow(missing_docs)] diff --git a/client/cli/src/commands/generate_node_key.rs b/client/cli/src/commands/generate_node_key.rs index 3afd99d60a123..e84b4a71d6d72 100644 --- a/client/cli/src/commands/generate_node_key.rs +++ b/client/cli/src/commands/generate_node_key.rs @@ -28,7 +28,7 @@ use std::{ /// The `generate-node-key` command #[derive(Debug, Parser)] -#[clap( +#[command( name = "generate-node-key", about = "Generate a random node key, write it to a file or stdout \ and write the corresponding peer-id to stderr" @@ -37,13 +37,13 @@ pub struct GenerateNodeKeyCmd { /// Name of file to save secret key to. /// /// If not given, the secret key is printed to stdout. - #[clap(long)] + #[arg(long)] file: Option, /// The output is in raw binary format. /// /// If not given, the output is written as an hex encoded string. - #[clap(long)] + #[arg(long)] bin: bool, } diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 749824834bf7b..debc697242ddd 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -37,17 +37,17 @@ use std::{ #[derive(Debug, Parser)] pub struct ImportBlocksCmd { /// Input file or stdin if unspecified. - #[clap(parse(from_os_str))] + #[arg()] pub input: Option, /// The default number of 64KB pages to ever allocate for Wasm execution. /// /// Don't alter this unless you know what you're doing. - #[clap(long, value_name = "COUNT")] + #[arg(long, value_name = "COUNT")] pub default_heap_pages: Option, /// Try importing blocks from binary format rather than JSON. - #[clap(long)] + #[arg(long)] pub binary: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 5029ecd29248c..7d66a680df8c0 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -29,16 +29,16 @@ use std::sync::Arc; /// The `insert` command #[derive(Debug, Clone, Parser)] -#[clap(name = "insert", about = "Insert a key to the keystore of a node.")] +#[command(name = "insert", about = "Insert a key to the keystore of a node.")] pub struct InsertKeyCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. /// If not given, you will be prompted for the URI. - #[clap(long)] + #[arg(long)] suri: Option, /// Key type, examples: "gran", or "imon" - #[clap(long)] + #[arg(long)] key_type: String, #[allow(missing_docs)] @@ -50,7 +50,7 @@ pub struct InsertKeyCmd { pub keystore_params: KeystoreParams, /// The cryptography scheme that should be used to generate the key out of the given URI. - #[clap(long, value_name = "SCHEME", arg_enum, ignore_case = true)] + #[arg(long, value_name = "SCHEME", value_enum, ignore_case = true)] pub scheme: CryptoScheme, } diff --git a/client/cli/src/commands/inspect_key.rs b/client/cli/src/commands/inspect_key.rs index 59f105dc52a5c..369fd10926dce 100644 --- a/client/cli/src/commands/inspect_key.rs +++ b/client/cli/src/commands/inspect_key.rs @@ -27,7 +27,7 @@ use std::str::FromStr; /// The `inspect` command #[derive(Debug, Parser)] -#[clap( +#[command( name = "inspect", about = "Gets a public key and a SS58 address from the provided Secret URI" )] @@ -44,7 +44,7 @@ pub struct InspectKeyCmd { uri: Option, /// Is the given `uri` a hex encoded public key? - #[clap(long)] + #[arg(long)] public: bool, #[allow(missing_docs)] @@ -72,7 +72,7 @@ pub struct InspectKeyCmd { /// /// If there is no derivation in `--uri`, the public key will be checked against the public key /// of `--uri` directly. - #[clap(long, conflicts_with = "public")] + #[arg(long, conflicts_with = "public")] pub expect_public: Option, } diff --git a/client/cli/src/commands/inspect_node_key.rs b/client/cli/src/commands/inspect_node_key.rs index a9dff9c760d33..9300007cb6bf2 100644 --- a/client/cli/src/commands/inspect_node_key.rs +++ b/client/cli/src/commands/inspect_node_key.rs @@ -28,7 +28,7 @@ use std::{ /// The `inspect-node-key` command #[derive(Debug, Parser)] -#[clap( +#[command( name = "inspect-node-key", about = "Load a node key from a file or stdin and print the corresponding peer-id." )] @@ -36,18 +36,18 @@ pub struct InspectNodeKeyCmd { /// Name of file to read the secret key from. /// /// If not given, the secret key is read from stdin (up to EOF). - #[clap(long)] + #[arg(long)] file: Option, /// The input is in raw binary format. /// /// If not given, the input is read as an hex encoded string. - #[clap(long)] + #[arg(long)] bin: bool, /// This argument is deprecated and has no effect for this command. #[deprecated(note = "Network identifier is not used for node-key inspection")] - #[clap(short = 'n', long = "network", value_name = "NETWORK", ignore_case = true)] + #[arg(short = 'n', long = "network", value_name = "NETWORK", ignore_case = true)] pub network_scheme: Option, } diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index b89487a18f779..9a3aeee50e944 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -33,7 +33,7 @@ use std::{ #[derive(Debug, Clone, Parser)] pub struct PurgeChainCmd { /// Skip interactive prompt by answering yes automatically. - #[clap(short = 'y')] + #[arg(short = 'y')] pub yes: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index f65e348b37b89..8477630cf9404 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -31,7 +31,7 @@ use std::{fmt::Debug, str::FromStr, sync::Arc}; #[derive(Debug, Parser)] pub struct RevertCmd { /// Number of blocks to revert. - #[clap(default_value = "256")] + #[arg(default_value = "256")] pub num: GenericNumber, #[allow(missing_docs)] diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 11cfb9d0ff651..35181d83f805f 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -42,12 +42,12 @@ pub struct RunCmd { /// The node will be started with the authority role and actively /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). - #[clap(long)] + #[arg(long)] pub validator: bool, /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA /// observer. - #[clap(long)] + #[arg(long)] pub no_grandpa: bool, /// Listen to all RPC interfaces. @@ -56,13 +56,13 @@ pub struct RunCmd { /// proxy server to filter out dangerous methods. More details: /// . /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. - #[clap(long)] + #[arg(long)] pub rpc_external: bool, /// Listen to all RPC interfaces. /// /// Same as `--rpc-external`. - #[clap(long)] + #[arg(long)] pub unsafe_rpc_external: bool, /// RPC methods to expose. @@ -71,12 +71,12 @@ pub struct RunCmd { /// - `safe`: Exposes only a safe subset of RPC methods, denying unsafe RPC methods. /// - `auto`: Acts as `safe` if RPC is served externally, e.g. when `--{rpc,ws}-external` is /// passed, otherwise acts as `unsafe`. - #[clap( + #[arg( long, value_name = "METHOD SET", - arg_enum, + value_enum, ignore_case = true, - default_value = "auto", + default_value_t = RpcMethods::Auto, verbatim_doc_comment )] pub rpc_methods: RpcMethods, @@ -87,59 +87,59 @@ pub struct RunCmd { /// proxy server to filter out dangerous methods. More details: /// . /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. - #[clap(long)] + #[arg(long)] pub ws_external: bool, /// Listen to all Websocket interfaces. /// /// Same as `--ws-external` but doesn't warn you about it. - #[clap(long)] + #[arg(long)] pub unsafe_ws_external: bool, /// DEPRECATED, this has no affect anymore. Use `rpc_max_request_size` or /// `rpc_max_response_size` instead. - #[clap(long)] + #[arg(long)] pub rpc_max_payload: Option, /// Set the the maximum RPC request payload size for both HTTP and WS in megabytes. /// Default is 15MiB. - #[clap(long)] + #[arg(long)] pub rpc_max_request_size: Option, /// Set the the maximum RPC response payload size for both HTTP and WS in megabytes. /// Default is 15MiB. - #[clap(long)] + #[arg(long)] pub rpc_max_response_size: Option, /// Set the the maximum concurrent subscriptions per connection. /// Default is 1024. - #[clap(long)] + #[arg(long)] pub rpc_max_subscriptions_per_connection: Option, /// Expose Prometheus exporter on all interfaces. /// /// Default is local. - #[clap(long)] + #[arg(long)] pub prometheus_external: bool, /// DEPRECATED, IPC support has been removed. - #[clap(long, value_name = "PATH")] + #[arg(long, value_name = "PATH")] pub ipc_path: Option, /// Specify HTTP RPC server TCP port. - #[clap(long, value_name = "PORT")] + #[arg(long, value_name = "PORT")] pub rpc_port: Option, /// Specify WebSockets RPC server TCP port. - #[clap(long, value_name = "PORT")] + #[arg(long, value_name = "PORT")] pub ws_port: Option, /// Maximum number of WS RPC server connections. - #[clap(long, value_name = "COUNT")] + #[arg(long, value_name = "COUNT")] pub ws_max_connections: Option, /// DEPRECATED, this has no affect anymore. Use `rpc_max_response_size` instead. - #[clap(long)] + #[arg(long)] pub ws_max_out_buffer_capacity: Option, /// Specify browser Origins allowed to access the HTTP & WS RPC servers. @@ -148,29 +148,29 @@ pub struct RunCmd { /// value). Value of `all` will disable origin validation. Default is to /// allow localhost and origins. When running in /// --dev mode the default is to allow all origins. - #[clap(long, value_name = "ORIGINS", parse(from_str = parse_cors))] + #[arg(long, value_name = "ORIGINS", value_parser = parse_cors)] pub rpc_cors: Option, /// Specify Prometheus exporter TCP Port. - #[clap(long, value_name = "PORT")] + #[arg(long, value_name = "PORT")] pub prometheus_port: Option, /// Do not expose a Prometheus exporter endpoint. /// /// Prometheus metric endpoint is enabled by default. - #[clap(long)] + #[arg(long)] pub no_prometheus: bool, /// The human-readable name for this node. /// /// The node name will be reported to the telemetry server, if enabled. - #[clap(long, value_name = "NAME")] + #[arg(long, value_name = "NAME")] pub name: Option, /// Disable connecting to the Substrate telemetry server. /// /// Telemetry is on by default on global chains. - #[clap(long)] + #[arg(long)] pub no_telemetry: bool, /// The URL of the telemetry server to connect to. @@ -179,7 +179,7 @@ pub struct RunCmd { /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting /// the least verbosity. /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. - #[clap(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] + #[arg(long = "telemetry-url", value_name = "URL VERBOSITY", value_parser = parse_telemetry_endpoints)] pub telemetry_endpoints: Vec<(String, u8)>, #[allow(missing_docs)] @@ -203,40 +203,40 @@ pub struct RunCmd { pub pool_config: TransactionPoolParams, /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. - #[clap(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] + #[arg(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub alice: bool, /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] + #[arg(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] pub bob: bool, /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to /// keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] pub charlie: bool, /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] pub dave: bool, /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] pub eve: bool, /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] pub ferdie: bool, /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] pub one: bool, /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. - #[clap(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] + #[arg(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] pub two: bool, /// Enable authoring even when offline. - #[clap(long)] + #[arg(long)] pub force_authoring: bool, #[allow(missing_docs)] @@ -246,11 +246,11 @@ pub struct RunCmd { /// The size of the instances cache for each runtime. /// /// The default value is 8 and the values higher than 256 are ignored. - #[clap(long)] + #[arg(long)] pub max_runtime_instances: Option, /// Maximum number of different runtimes that can be cached. - #[clap(long, default_value = "2")] + #[arg(long, default_value_t = 2)] pub runtime_cache_size: u8, /// Run a temporary node. @@ -262,7 +262,7 @@ pub struct RunCmd { /// which includes: database, node key and keystore. /// /// When `--dev` is given and no explicit `--base-path`, this option is implied. - #[clap(long, conflicts_with = "base-path")] + #[arg(long, conflicts_with = "base_path")] pub tmp: bool, } @@ -597,7 +597,7 @@ impl From for Option> { } /// Parse cors origins. -fn parse_cors(s: &str) -> Cors { +fn parse_cors(s: &str) -> Result { let mut is_all = false; let mut origins = Vec::new(); for part in s.split(',') { @@ -611,9 +611,9 @@ fn parse_cors(s: &str) -> Cors { } if is_all { - Cors::All + Ok(Cors::All) } else { - Cors::List(origins) + Ok(Cors::List(origins)) } } diff --git a/client/cli/src/commands/sign.rs b/client/cli/src/commands/sign.rs index 7d93390326717..2c3ff3a1575fd 100644 --- a/client/cli/src/commands/sign.rs +++ b/client/cli/src/commands/sign.rs @@ -23,21 +23,21 @@ use sp_core::crypto::SecretString; /// The `sign` command #[derive(Debug, Clone, Parser)] -#[clap(name = "sign", about = "Sign a message, with a given (secret) key")] +#[command(name = "sign", about = "Sign a message, with a given (secret) key")] pub struct SignCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. /// If not given, you will be prompted for the URI. - #[clap(long)] + #[arg(long)] suri: Option, /// Message to sign, if not provided you will be prompted to /// pass the message via STDIN - #[clap(long)] + #[arg(long)] message: Option, /// The message on STDIN is hex-encoded data - #[clap(long)] + #[arg(long)] hex: bool, #[allow(missing_docs)] diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index 289dc1705c3a3..ae0007ac7964d 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -29,10 +29,10 @@ use utils::print_from_uri; /// The `vanity` command #[derive(Debug, Clone, Parser)] -#[clap(name = "vanity", about = "Generate a seed that provides a vanity address")] +#[command(name = "vanity", about = "Generate a seed that provides a vanity address")] pub struct VanityCmd { /// Desired pattern - #[clap(long, parse(try_from_str = assert_non_empty_string))] + #[arg(long, value_parser = assert_non_empty_string)] pattern: String, #[allow(missing_docs)] diff --git a/client/cli/src/commands/verify.rs b/client/cli/src/commands/verify.rs index 6389cdbde2c17..82554fbf268fa 100644 --- a/client/cli/src/commands/verify.rs +++ b/client/cli/src/commands/verify.rs @@ -24,7 +24,7 @@ use sp_core::crypto::{ByteArray, Ss58Codec}; /// The `verify` command #[derive(Debug, Clone, Parser)] -#[clap( +#[command( name = "verify", about = "Verify a signature for a message, provided on STDIN, with a given (public or secret) key" )] @@ -39,11 +39,11 @@ pub struct VerifyCmd { /// Message to verify, if not provided you will be prompted to /// pass the message via STDIN - #[clap(long)] + #[arg(long)] message: Option, /// The message on STDIN is hex-encoded data - #[clap(long)] + #[arg(long)] hex: bool, #[allow(missing_docs)] diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index e01befbef41a2..b6d0d47e52c1f 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -129,9 +129,9 @@ pub trait SubstrateCli: Sized { let about = Self::description(); let app = app .name(name) - .author(author.as_str()) - .about(about.as_str()) - .version(full_version.as_str()) + .author(author) + .about(about) + .version(full_version) .propagate_version(true) .args_conflicts_with_subcommands(true) .subcommand_negates_reqs(true); @@ -153,9 +153,9 @@ pub trait SubstrateCli: Sized { /// /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are /// used. It will return a [`clap::Error`], where the [`clap::Error::kind`] is a - /// [`clap::ErrorKind::DisplayHelp`] or [`clap::ErrorKind::DisplayVersion`] respectively. - /// You must call [`clap::Error::exit`] or perform a [`std::process::exit`]. - fn try_from_iter(iter: I) -> clap::Result + /// [`clap::error::ErrorKind::DisplayHelp`] or [`clap::error::ErrorKind::DisplayVersion`] + /// respectively. You must call [`clap::Error::exit`] or perform a [`std::process::exit`]. + fn try_from_iter(iter: I) -> clap::error::Result where Self: Parser + Sized, I: IntoIterator, @@ -169,11 +169,7 @@ pub trait SubstrateCli: Sized { let name = Self::executable_name(); let author = Self::author(); let about = Self::description(); - let app = app - .name(name) - .author(author.as_str()) - .about(about.as_str()) - .version(full_version.as_str()); + let app = app.name(name).author(author).about(about).version(full_version); let matches = app.try_get_matches_from(iter)?; diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index e954b8cc3bc20..3c9ae09e9ab42 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -23,17 +23,11 @@ use clap::Args; #[derive(Debug, Clone, PartialEq, Args)] pub struct DatabaseParams { /// Select database backend to use. - #[clap( - long, - alias = "db", - value_name = "DB", - ignore_case = true, - possible_values = Database::variants(), - )] + #[arg(long, alias = "db", value_name = "DB", value_enum)] pub database: Option, /// Limit the memory the database cache can use. - #[clap(long = "db-cache", value_name = "MiB")] + #[arg(long = "db-cache", value_name = "MiB")] pub database_cache_size: Option, } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 3cd9fd83bd31b..b7ccbf1c8ed55 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -42,12 +42,12 @@ pub struct ImportParams { pub database_params: DatabaseParams, /// Method for executing Wasm runtime code. - #[clap( + #[arg( long = "wasm-execution", value_name = "METHOD", - possible_values = WasmExecutionMethod::variants(), + value_enum, ignore_case = true, - default_value = DEFAULT_WASM_EXECUTION_METHOD, + default_value_t = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, @@ -64,18 +64,18 @@ pub struct ImportParams { /// The `legacy-instance-reuse` strategy is deprecated and will /// be removed in the future. It should only be used in case of /// issues with the default instantiation strategy. - #[clap( + #[arg( long, value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - arg_enum, + value_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Specify the path where local WASM runtimes are stored. /// /// These runtimes will override on-chain runtimes when the version matches. - #[clap(long, value_name = "PATH", parse(from_os_str))] + #[arg(long, value_name = "PATH")] pub wasm_runtime_overrides: Option, #[allow(missing_docs)] @@ -85,13 +85,13 @@ pub struct ImportParams { /// Specify the state cache size. /// /// Providing `0` will disable the cache. - #[clap(long, value_name = "Bytes", default_value = "67108864")] + #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, /// DEPRECATED /// /// Switch to `--trie-cache-size`. - #[clap(long)] + #[arg(long)] state_cache_size: Option, } @@ -156,39 +156,39 @@ impl ImportParams { pub struct ExecutionStrategiesParams { /// The means of execution used when calling into the runtime for importing blocks as /// part of an initial sync. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_syncing: Option, /// The means of execution used when calling into the runtime for general block import /// (including locally authored blocks). - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_import_block: Option, /// The means of execution used when calling into the runtime while constructing blocks. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_block_construction: Option, /// The means of execution used when calling into the runtime while using an off-chain worker. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_offchain_worker: Option, /// The means of execution used when calling into the runtime while not syncing, importing or /// constructing blocks. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution_other: Option, /// The execution strategy that should be used by all execution contexts. - #[clap( + #[arg( long, value_name = "STRATEGY", - arg_enum, + value_enum, ignore_case = true, conflicts_with_all = &[ - "execution-other", - "execution-offchain-worker", - "execution-block-construction", - "execution-import-block", - "execution-syncing", + "execution_other", + "execution_offchain_worker", + "execution_block_construction", + "execution_import_block", + "execution_syncing", ] )] pub execution: Option, diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index 386d1791dd805..d6c3c35d82418 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -32,32 +32,31 @@ const DEFAULT_KEYSTORE_CONFIG_PATH: &str = "keystore"; #[derive(Debug, Clone, Args)] pub struct KeystoreParams { /// Specify custom URIs to connect to for keystore-services - #[clap(long)] + #[arg(long)] pub keystore_uri: Option, /// Specify custom keystore path. - #[clap(long, value_name = "PATH", parse(from_os_str))] + #[arg(long, value_name = "PATH")] pub keystore_path: Option, /// Use interactive shell for entering the password used by the keystore. - #[clap(long, conflicts_with_all = &["password", "password-filename"])] + #[arg(long, conflicts_with_all = &["password", "password_filename"])] pub password_interactive: bool, /// Password used by the keystore. This allows appending an extra user-defined secret to the /// seed. - #[clap( + #[arg( long, - parse(try_from_str = secret_string_from_str), - conflicts_with_all = &["password-interactive", "password-filename"] + value_parser = secret_string_from_str, + conflicts_with_all = &["password_interactive", "password_filename"] )] pub password: Option, /// File that contains the password used by the keystore. - #[clap( + #[arg( long, value_name = "PATH", - parse(from_os_str), - conflicts_with_all = &["password-interactive", "password"] + conflicts_with_all = &["password_interactive", "password"] )] pub password_filename: Option, } diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 9fccce606b4e4..3197deb101bcc 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -27,7 +27,7 @@ mod transaction_pool_params; use crate::arg_enums::{CryptoScheme, OutputType}; use clap::Args; -use sp_core::crypto::Ss58AddressFormat; +use sp_core::crypto::{Ss58AddressFormat, Ss58AddressFormatRegistry}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor}, @@ -40,6 +40,17 @@ pub use crate::params::{ transaction_pool_params::*, }; +/// Parse Ss58AddressFormat +pub fn parse_ss58_address_format(x: &str) -> Result { + match Ss58AddressFormatRegistry::try_from(x) { + Ok(format_registry) => Ok(format_registry.into()), + Err(_) => Err(format!( + "Unable to parse variant. Known variants: {:?}", + Ss58AddressFormat::all_names() + )), + } +} + /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a /// decimal. #[derive(Debug, Clone)] @@ -118,7 +129,7 @@ impl BlockNumberOrHash { #[derive(Debug, Clone, Args)] pub struct CryptoSchemeFlag { /// cryptography scheme - #[clap(long, value_name = "SCHEME", arg_enum, ignore_case = true, default_value = "sr25519")] + #[arg(long, value_name = "SCHEME", value_enum, ignore_case = true, default_value_t = CryptoScheme::Sr25519)] pub scheme: CryptoScheme, } @@ -126,7 +137,7 @@ pub struct CryptoSchemeFlag { #[derive(Debug, Clone, Args)] pub struct OutputTypeFlag { /// output format - #[clap(long, value_name = "FORMAT", arg_enum, ignore_case = true, default_value = "text")] + #[arg(long, value_name = "FORMAT", value_enum, ignore_case = true, default_value_t = OutputType::Text)] pub output_type: OutputType, } @@ -134,13 +145,12 @@ pub struct OutputTypeFlag { #[derive(Debug, Clone, Args)] pub struct NetworkSchemeFlag { /// network address format - #[clap( + #[arg( short = 'n', long, value_name = "NETWORK", - possible_values = &Ss58AddressFormat::all_names()[..], ignore_case = true, - parse(try_from_str = Ss58AddressFormat::try_from), + value_parser = parse_ss58_address_format, )] pub network: Option, } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 0450b5f0e2566..39f98b3573933 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -33,11 +33,11 @@ use std::{borrow::Cow, path::PathBuf}; #[derive(Debug, Clone, Args)] pub struct NetworkParams { /// Specify a list of bootnodes. - #[clap(long, value_name = "ADDR", multiple_values(true))] + #[arg(long, value_name = "ADDR", num_args = 1..)] pub bootnodes: Vec, /// Specify a list of reserved node addresses. - #[clap(long, value_name = "ADDR", multiple_values(true))] + #[arg(long, value_name = "ADDR", num_args = 1..)] pub reserved_nodes: Vec, /// Whether to only synchronize the chain with reserved nodes. @@ -48,12 +48,12 @@ pub struct NetworkParams { /// In particular, if you are a validator your node might still connect to other /// validator nodes and collator nodes regardless of whether they are defined as /// reserved nodes. - #[clap(long)] + #[arg(long)] pub reserved_only: bool, /// The public address that other nodes will use to connect to it. /// This can be used if there's a proxy in front of this node. - #[clap(long, value_name = "PUBLIC_ADDR", multiple_values(true))] + #[arg(long, value_name = "PUBLIC_ADDR", num_args = 1..)] pub public_addr: Vec, /// Listen on this multiaddress. @@ -61,49 +61,49 @@ pub struct NetworkParams { /// By default: /// If `--validator` is passed: `/ip4/0.0.0.0/tcp/` and `/ip6/[::]/tcp/`. /// Otherwise: `/ip4/0.0.0.0/tcp//ws` and `/ip6/[::]/tcp//ws`. - #[clap(long, value_name = "LISTEN_ADDR", multiple_values(true))] + #[arg(long, value_name = "LISTEN_ADDR", num_args = 1..)] pub listen_addr: Vec, /// Specify p2p protocol TCP port. - #[clap(long, value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] + #[arg(long, value_name = "PORT", conflicts_with_all = &[ "listen_addr" ])] pub port: Option, /// Always forbid connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in /// their chain specifications. - #[clap(long, conflicts_with_all = &["allow-private-ipv4"])] + #[arg(long, conflicts_with_all = &["allow_private_ipv4"])] pub no_private_ipv4: bool, /// Always accept connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as /// "local" in their chain specifications, or when `--dev` is passed. - #[clap(long, conflicts_with_all = &["no-private-ipv4"])] + #[arg(long, conflicts_with_all = &["no_private_ipv4"])] pub allow_private_ipv4: bool, /// Specify the number of outgoing connections we're trying to maintain. - #[clap(long, value_name = "COUNT", default_value = "25")] + #[arg(long, value_name = "COUNT", default_value_t = 25)] pub out_peers: u32, /// Maximum number of inbound full nodes peers. - #[clap(long, value_name = "COUNT", default_value = "25")] + #[arg(long, value_name = "COUNT", default_value_t = 25)] pub in_peers: u32, /// Maximum number of inbound light nodes peers. - #[clap(long, value_name = "COUNT", default_value = "100")] + #[arg(long, value_name = "COUNT", default_value_t = 100)] pub in_peers_light: u32, /// Disable mDNS discovery. /// /// By default, the network will use mDNS to discover other nodes on the /// local network. This disables it. Automatically implied when using --dev. - #[clap(long)] + #[arg(long)] pub no_mdns: bool, /// Maximum number of peers from which to ask for the same blocks in parallel. /// /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. - #[clap(long, value_name = "COUNT", default_value = "5")] + #[arg(long, value_name = "COUNT", default_value_t = 5)] pub max_parallel_downloads: u32, #[allow(missing_docs)] @@ -114,7 +114,7 @@ pub struct NetworkParams { /// /// By default this option is `true` for `--dev` or when the chain type is /// `Local`/`Development` and false otherwise. - #[clap(long)] + #[arg(long)] pub discover_local: bool, /// Require iterative Kademlia DHT queries to use disjoint paths for increased resiliency in @@ -122,11 +122,11 @@ pub struct NetworkParams { /// /// See the S/Kademlia paper for more information on the high level design as well as its /// security improvements. - #[clap(long)] + #[arg(long)] pub kademlia_disjoint_query_paths: bool, /// Join the IPFS network and serve transactions over bitswap protocol. - #[clap(long)] + #[arg(long)] pub ipfs_server: bool, /// Blockchain syncing mode. @@ -135,11 +135,11 @@ pub struct NetworkParams { /// - `fast`: Download blocks and the latest state only. /// - `fast-unsafe`: Same as `fast`, but skip downloading state proofs. /// - `warp`: Download the latest state and proof. - #[clap( + #[arg( long, - arg_enum, + value_enum, value_name = "SYNC_MODE", - default_value = "full", + default_value_t = SyncMode::Full, ignore_case = true, verbatim_doc_comment )] diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 00ce9e8027aab..2346455c26a37 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -46,7 +46,7 @@ pub struct NodeKeyParams { /// WARNING: Secrets provided as command-line arguments are easily exposed. /// Use of this option should be limited to development and testing. To use /// an externally managed secret key, use `--node-key-file` instead. - #[clap(long, value_name = "KEY")] + #[arg(long, value_name = "KEY")] pub node_key: Option, /// The type of secret key to use for libp2p networking. @@ -66,7 +66,7 @@ pub struct NodeKeyParams { /// /// The node's secret key determines the corresponding public key and hence the /// node's peer ID in the context of libp2p. - #[clap(long, value_name = "TYPE", arg_enum, ignore_case = true, default_value = "ed25519")] + #[arg(long, value_name = "TYPE", value_enum, ignore_case = true, default_value_t = NodeKeyType::Ed25519)] pub node_key_type: NodeKeyType, /// The file from which to read the node's secret key to use for libp2p networking. @@ -79,7 +79,7 @@ pub struct NodeKeyParams { /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. - #[clap(long, value_name = "FILE")] + #[arg(long, value_name = "FILE")] pub node_key_file: Option, } @@ -122,7 +122,7 @@ fn parse_ed25519_secret(hex: &str) -> error::Result, /// Specify the blocks pruning mode, a number of blocks to keep or 'archive'. /// @@ -38,7 +38,7 @@ pub struct PruningParams { /// or for the last N blocks (i.e a number). /// /// NOTE: only finalized blocks are subject for removal! - #[clap(alias = "keep-blocks", long, value_name = "COUNT")] + #[arg(alias = "keep-blocks", long, value_name = "COUNT")] pub blocks_pruning: Option, } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 4a8fc4bcfffdd..6c03ac2c4ec23 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -28,25 +28,25 @@ pub struct SharedParams { /// /// It can be one of the predefined ones (dev, local, or staging) or it can be a path to a file /// with the chainspec (such as one exported by the `build-spec` subcommand). - #[clap(long, value_name = "CHAIN_SPEC")] + #[arg(long, value_name = "CHAIN_SPEC")] pub chain: Option, /// Specify the development chain. /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. - #[clap(long, conflicts_with_all = &["chain"])] + #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, /// Specify custom base path. - #[clap(long, short = 'd', value_name = "PATH", parse(from_os_str))] + #[arg(long, short = 'd', value_name = "PATH")] pub base_path: Option, /// Sets a custom logging filter. Syntax is =, e.g. -lsync=debug. /// /// Log levels (least to most verbose) are error, warn, info, debug, and trace. /// By default, all targets log `info`. The global log level can be set with -l. - #[clap(short = 'l', long, value_name = "LOG_PATTERN", multiple_values(true))] + #[arg(short = 'l', long, value_name = "LOG_PATTERN", num_args = 1..)] pub log: Vec, /// Enable detailed log output. @@ -54,11 +54,11 @@ pub struct SharedParams { /// This includes displaying the log target, log level and thread name. /// /// This is automatically enabled when something is logged with any higher level than `info`. - #[clap(long)] + #[arg(long)] pub detailed_log_output: bool, /// Disable log color output. - #[clap(long)] + #[arg(long)] pub disable_log_color: bool, /// Enable feature to dynamically update and reload the log filter. @@ -68,15 +68,15 @@ pub struct SharedParams { /// /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this /// option not being set. - #[clap(long)] + #[arg(long)] pub enable_log_reloading: bool, /// Sets a custom profiling filter. Syntax is the same as for logging: = - #[clap(long, value_name = "TARGETS")] + #[arg(long, value_name = "TARGETS")] pub tracing_targets: Option, /// Receiver to process tracing messages. - #[clap(long, value_name = "RECEIVER", arg_enum, ignore_case = true, default_value = "log")] + #[arg(long, value_name = "RECEIVER", value_enum, ignore_case = true, default_value_t = TracingReceiver::Log)] pub tracing_receiver: TracingReceiver, } diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index 6429dfec3f908..6b3a2d8a97a01 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -23,15 +23,15 @@ use sc_service::config::TransactionPoolOptions; #[derive(Debug, Clone, Args)] pub struct TransactionPoolParams { /// Maximum number of transactions in the transaction pool. - #[clap(long, value_name = "COUNT", default_value = "8192")] + #[arg(long, value_name = "COUNT", default_value_t = 8192)] pub pool_limit: usize, /// Maximum number of kilobytes of all transactions stored in the pool. - #[clap(long, value_name = "COUNT", default_value = "20480")] + #[arg(long, value_name = "COUNT", default_value_t = 20480)] pub pool_kbytes: usize, /// How long a transaction is banned for, if it is considered invalid. Defaults to 1800s. - #[clap(long, value_name = "SECONDS")] + #[arg(long, value_name = "SECONDS")] pub tx_ban_seconds: Option, } diff --git a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 10f82cd316851..2cc620452586d 100644 --- a/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index a200d5c41ee35..293a17624820b 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } @@ -36,4 +36,4 @@ path = "src/phragmms_balancing.rs" [[bin]] name = "phragmen_pjr" -path = "src/phragmen_pjr.rs" \ No newline at end of file +path = "src/phragmen_pjr.rs" diff --git a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs index 2396fdfa3b40e..0401249a3df1d 100644 --- a/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs +++ b/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs @@ -68,18 +68,18 @@ fn main() { #[cfg(not(fuzzing))] #[derive(Debug, Parser)] -#[clap(author, version, about)] +#[command(author, version, about)] struct Opt { /// How many candidates participate in this election - #[clap(short, long)] + #[arg(short, long)] candidates: Option, /// How many voters participate in this election - #[clap(short, long)] + #[arg(short, long)] voters: Option, /// Random seed to use in this election - #[clap(long)] + #[arg(long)] seed: Option, } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 9c046bf9d2dd1..79c1012196bde 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +use core::fmt::Display; + #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_debug_derive::RuntimeDebug; @@ -412,6 +414,15 @@ pub enum StateVersion { V1 = 1, } +impl Display for StateVersion { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + StateVersion::V0 => f.write_str("0"), + StateVersion::V1 => f.write_str("1"), + } + } +} + impl Default for StateVersion { fn default() -> Self { StateVersion::V1 diff --git a/scripts/ci/node-template-release/Cargo.toml b/scripts/ci/node-template-release/Cargo.toml index 8871a04e19b16..0800b17536453 100644 --- a/scripts/ci/node-template-release/Cargo.toml +++ b/scripts/ci/node-template-release/Cargo.toml @@ -10,7 +10,7 @@ homepage = "https://substrate.io" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "3.0", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } flate2 = "1.0" fs_extra = "1" git2 = "0.8" diff --git a/scripts/ci/node-template-release/src/main.rs b/scripts/ci/node-template-release/src/main.rs index 62e9b66715768..91a7e865458cf 100644 --- a/scripts/ci/node-template-release/src/main.rs +++ b/scripts/ci/node-template-release/src/main.rs @@ -29,10 +29,10 @@ type CargoToml = HashMap; #[derive(Parser)] struct Options { /// The path to the `node-template` source. - #[clap(parse(from_os_str))] + #[arg()] node_template: PathBuf, /// The path where to output the generated `tar.gz` file. - #[clap(parse(from_os_str))] + #[arg()] output: PathBuf, } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 8eedcb870a3d0..ea908bf0336bf 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "4.1" chrono = "0.4" -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } comfy-table = { version = "6.0.0", default-features = false } handlebars = "4.2.2" diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 32282b314b804..318c58de8a553 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -43,15 +43,15 @@ const LOG_TARGET: &'static str = "benchmark::block::weight"; #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] pub struct BenchmarkParams { /// Number of the first block to consider. - #[clap(long)] + #[arg(long)] pub from: u32, /// Last block number to consider. - #[clap(long)] + #[arg(long)] pub to: u32, /// Number of times that the benchmark should be repeated for each block. - #[clap(long, default_value = "10")] + #[arg(long, default_value_t = 10)] pub repeat: u32, } diff --git a/utils/frame/benchmarking-cli/src/block/cmd.rs b/utils/frame/benchmarking-cli/src/block/cmd.rs index 14cbb1b438686..f5f7e15a316b3 100644 --- a/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -71,7 +71,7 @@ pub struct BlockCmd { /// Enable the Trie cache. /// /// This should only be used for performance analysis and not for final results. - #[clap(long)] + #[arg(long)] pub enable_trie_cache: bool, } diff --git a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 5dcf05956d42c..2a86c10e7ad27 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -43,17 +43,17 @@ use crate::shared::{StatSelect, Stats}; #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] pub struct BenchmarkParams { /// Rounds of warmups before measuring. - #[clap(long, default_value = "10")] + #[arg(long, default_value_t = 10)] pub warmup: u32, /// How many times the benchmark should be repeated. - #[clap(long, default_value = "100")] + #[arg(long, default_value_t = 100)] pub repeat: u32, /// Maximal number of extrinsics that should be put into a block. /// /// Only useful for debugging. - #[clap(long)] + #[arg(long)] pub max_ext_per_block: Option, } diff --git a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs index 339d6126bcd09..b95cd6b5c2e42 100644 --- a/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs +++ b/utils/frame/benchmarking-cli/src/extrinsic/cmd.rs @@ -62,21 +62,21 @@ pub struct ExtrinsicParams { /// List all available pallets and extrinsics. /// /// The format is CSV with header `pallet, extrinsic`. - #[clap(long)] + #[arg(long)] pub list: bool, /// Pallet name of the extrinsic to benchmark. - #[clap(long, value_name = "PALLET", required_unless_present = "list")] + #[arg(long, value_name = "PALLET", required_unless_present = "list")] pub pallet: Option, /// Extrinsic to benchmark. - #[clap(long, value_name = "EXTRINSIC", required_unless_present = "list")] + #[arg(long, value_name = "EXTRINSIC", required_unless_present = "list")] pub extrinsic: Option, /// Enable the Trie cache. /// /// This should only be used for performance analysis and not for final results. - #[clap(long)] + #[arg(long)] pub enable_trie_cache: bool, } diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index 5f27c71983905..a19db671f3cd1 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -53,30 +53,30 @@ pub struct MachineCmd { /// Do not return an error if any check fails. /// /// Should only be used for debugging. - #[clap(long)] + #[arg(long)] pub allow_fail: bool, /// Set a fault tolerance for passing a requirement. /// /// 10% means that the test would pass even when only 90% score was archived. /// Can be used to mitigate outliers of the benchmarks. - #[clap(long, default_value = "10.0", value_name = "PERCENT")] + #[arg(long, default_value_t = 10.0, value_name = "PERCENT")] pub tolerance: f64, /// Time limit for the verification benchmark. - #[clap(long, default_value = "5.0", value_name = "SECONDS")] + #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] pub verify_duration: f32, /// Time limit for the hash function benchmark. - #[clap(long, default_value = "5.0", value_name = "SECONDS")] + #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] pub hash_duration: f32, /// Time limit for the memory benchmark. - #[clap(long, default_value = "5.0", value_name = "SECONDS")] + #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] pub memory_duration: f32, /// Time limit for each disk benchmark. - #[clap(long, default_value = "5.0", value_name = "SECONDS")] + #[arg(long, default_value_t = 5.0, value_name = "SECONDS")] pub disk_duration: f32, } diff --git a/utils/frame/benchmarking-cli/src/overhead/cmd.rs b/utils/frame/benchmarking-cli/src/overhead/cmd.rs index 93b0e7c472647..74c1e7efc24d4 100644 --- a/utils/frame/benchmarking-cli/src/overhead/cmd.rs +++ b/utils/frame/benchmarking-cli/src/overhead/cmd.rs @@ -73,13 +73,13 @@ pub struct OverheadParams { /// Add a header to the generated weight output file. /// /// Good for adding LICENSE headers. - #[clap(long, value_name = "PATH")] + #[arg(long, value_name = "PATH")] pub header: Option, /// Enable the Trie cache. /// /// This should only be used for performance analysis and not for final results. - #[clap(long)] + #[arg(long)] pub enable_trie_cache: bool, } diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index 0e698c4e73910..b10f531bc0aed 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -27,69 +27,69 @@ use std::{fmt::Debug, path::PathBuf}; // Add a more relaxed parsing for pallet names by allowing pallet directory names with `-` to be // used like crate names with `_` -fn parse_pallet_name(pallet: &str) -> String { - pallet.replace("-", "_") +fn parse_pallet_name(pallet: &str) -> std::result::Result { + Ok(pallet.replace("-", "_")) } /// Benchmark the extrinsic weight of FRAME Pallets. #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present_any = ["list", "json-input"])] + #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input"])] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[clap(short, long, required_unless_present_any = ["list", "json-input"])] + #[arg(short, long, required_unless_present_any = ["list", "json_input"])] pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[clap(short, long, default_value = "2")] + #[arg(short, long, default_value_t = 2)] pub steps: u32, /// Indicates lowest values for each of the component ranges. - #[clap(long = "low", use_value_delimiter = true)] + #[arg(long = "low", value_delimiter = ',')] pub lowest_range_values: Vec, /// Indicates highest values for each of the component ranges. - #[clap(long = "high", use_value_delimiter = true)] + #[arg(long = "high", value_delimiter = ',')] pub highest_range_values: Vec, /// Select how many repetitions of this benchmark should run from within the wasm. - #[clap(short, long, default_value = "1")] + #[arg(short, long, default_value_t = 1)] pub repeat: u32, /// Select how many repetitions of this benchmark should run from the client. /// /// NOTE: Using this alone may give slower results, but will afford you maximum Wasm memory. - #[clap(long, default_value = "1")] + #[arg(long, default_value_t = 1)] pub external_repeat: u32, /// Print the raw results in JSON format. - #[clap(long = "json")] + #[arg(long = "json")] pub json_output: bool, /// Write the raw results in JSON format into the given file. - #[clap(long, conflicts_with = "json-output")] + #[arg(long, conflicts_with = "json_output")] pub json_file: Option, /// Don't print the median-slopes linear regression analysis. - #[clap(long)] + #[arg(long)] pub no_median_slopes: bool, /// Don't print the min-squares linear regression analysis. - #[clap(long)] + #[arg(long)] pub no_min_squares: bool, /// Output the benchmarks to a Rust file at the given path. - #[clap(long)] + #[arg(long)] pub output: Option, /// Add a header file to your outputted benchmarks. - #[clap(long)] + #[arg(long)] pub header: Option, /// Path to Handlebars template file used for outputting benchmark results. (Optional) - #[clap(long)] + #[arg(long)] pub template: Option, #[allow(missing_docs)] @@ -100,25 +100,25 @@ pub struct PalletCmd { /// * min-squares (default) /// * median-slopes /// * max (max of min squares and median slopes for each value) - #[clap(long)] + #[arg(long)] pub output_analysis: Option, /// Set the heap pages while running benchmarks. If not set, the default value from the client /// is used. - #[clap(long)] + #[arg(long)] pub heap_pages: Option, /// Disable verification logic when running benchmarks. - #[clap(long)] + #[arg(long)] pub no_verify: bool, /// Display and run extra benchmarks that would otherwise not be needed for weight /// construction. - #[clap(long)] + #[arg(long)] pub extra: bool, /// Estimate PoV size. - #[clap(long)] + #[arg(long)] pub record_proof: bool, #[allow(missing_docs)] @@ -126,50 +126,50 @@ pub struct PalletCmd { pub shared_params: sc_cli::SharedParams, /// The execution strategy that should be used for benchmarks. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true)] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true)] pub execution: Option, /// Method for executing Wasm runtime code. - #[clap( + #[arg( long = "wasm-execution", value_name = "METHOD", - possible_values = WasmExecutionMethod::variants(), + value_enum, ignore_case = true, - default_value = DEFAULT_WASM_EXECUTION_METHOD, + default_value_t = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. /// /// Only has an effect when `wasm-execution` is set to `compiled`. - #[clap( + #[arg( long = "wasm-instantiation-strategy", value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - arg_enum, + value_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// Limit the memory the database cache can use. - #[clap(long = "db-cache", value_name = "MiB", default_value = "1024")] + #[arg(long = "db-cache", value_name = "MiB", default_value_t = 1024)] pub database_cache_size: u32, /// List the benchmarks that match your query rather than running them. /// /// When nothing is provided, we list all benchmarks. - #[clap(long)] + #[arg(long)] pub list: bool, /// If enabled, the storage info is not displayed in the output next to the analysis. /// /// This is independent of the storage info appearing in the *output file*. Use a Handlebar /// template for that purpose. - #[clap(long)] + #[arg(long)] pub no_storage_info: bool, /// A path to a `.json` file with existing benchmark results generated with `--json` or /// `--json-file`. When specified the benchmarks are not actually executed, and the data for /// the analysis is read from this file. - #[clap(long)] + #[arg(long)] pub json_input: Option, } diff --git a/utils/frame/benchmarking-cli/src/shared/mod.rs b/utils/frame/benchmarking-cli/src/shared/mod.rs index 33189792c4008..ea5415f33f020 100644 --- a/utils/frame/benchmarking-cli/src/shared/mod.rs +++ b/utils/frame/benchmarking-cli/src/shared/mod.rs @@ -95,22 +95,22 @@ pub fn check_build_profile() -> Result<(), String> { /// Parameters to configure how the host info will be determined. #[derive(Debug, Default, Serialize, Clone, PartialEq, Args)] -#[clap(rename_all = "kebab-case")] +#[command(rename_all = "kebab-case")] pub struct HostInfoParams { /// Manually override the hostname to use. - #[clap(long)] + #[arg(long)] pub hostname_override: Option, /// Specify a fallback hostname if no-one could be detected automatically. /// /// Note: This only exists to make the `hostname` function infallible. - #[clap(long, default_value = "")] + #[arg(long, default_value = "")] pub hostname_fallback: String, /// Specify a fallback CPU name if no-one could be detected automatically. /// /// Note: This only exists to make the `cpuname` function infallible. - #[clap(long, default_value = "")] + #[arg(long, default_value = "")] pub cpuname_fallback: String, } diff --git a/utils/frame/benchmarking-cli/src/shared/weight_params.rs b/utils/frame/benchmarking-cli/src/shared/weight_params.rs index 4dd80cd41ff3d..030bbfa00d468 100644 --- a/utils/frame/benchmarking-cli/src/shared/weight_params.rs +++ b/utils/frame/benchmarking-cli/src/shared/weight_params.rs @@ -31,23 +31,23 @@ pub struct WeightParams { /// File or directory to write the *weight* files to. /// /// For Substrate this should be `frame/support/src/weights`. - #[clap(long)] + #[arg(long)] pub weight_path: Option, /// Select a specific metric to calculate the final weight output. - #[clap(long = "metric", default_value = "average")] + #[arg(long = "metric", default_value = "average")] pub weight_metric: StatSelect, /// Multiply the resulting weight with the given factor. Must be positive. /// /// Is applied before `weight_add`. - #[clap(long = "mul", default_value = "1")] + #[arg(long = "mul", default_value_t = 1.0)] pub weight_mul: f64, /// Add the given offset to the resulting weight. /// /// Is applied after `weight_mul`. - #[clap(long = "add", default_value = "0")] + #[arg(long = "add", default_value_t = 0)] pub weight_add: u64, } diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index a4c4188dfe073..86041d9517de6 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -68,54 +68,54 @@ pub struct StorageParams { pub hostinfo: HostInfoParams, /// Skip the `read` benchmark. - #[clap(long)] + #[arg(long)] pub skip_read: bool, /// Skip the `write` benchmark. - #[clap(long)] + #[arg(long)] pub skip_write: bool, /// Specify the Handlebars template to use for outputting benchmark results. - #[clap(long)] + #[arg(long)] pub template_path: Option, /// Add a header to the generated weight output file. /// /// Good for adding LICENSE headers. - #[clap(long, value_name = "PATH")] + #[arg(long, value_name = "PATH")] pub header: Option, /// Path to write the raw 'read' results in JSON format to. Can be a file or directory. - #[clap(long)] + #[arg(long)] pub json_read_path: Option, /// Path to write the raw 'write' results in JSON format to. Can be a file or directory. - #[clap(long)] + #[arg(long)] pub json_write_path: Option, /// Rounds of warmups before measuring. - #[clap(long, default_value = "1")] + #[arg(long, default_value_t = 1)] pub warmups: u32, /// The `StateVersion` to use. Substrate `--dev` should use `V1` and Polkadot `V0`. /// Selecting the wrong version can corrupt the DB. - #[clap(long, possible_values = ["0", "1"])] + #[arg(long, value_parser = clap::value_parser!(u8).range(0..=1))] pub state_version: u8, /// Trie cache size in bytes. /// /// Providing `0` will disable the cache. - #[clap(long, value_name = "Bytes", default_value = "67108864")] + #[arg(long, value_name = "Bytes", default_value_t = 67108864)] pub trie_cache_size: usize, /// Enable the Trie cache. /// /// This should only be used for performance analysis and not for final results. - #[clap(long)] + #[arg(long)] pub enable_trie_cache: bool, /// Include child trees in benchmark. - #[clap(long)] + #[arg(long)] pub include_child_trees: bool, } diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index f01e2f1a5d51f..89e9ee79db214 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } frame-support = { version = "4.0.0-dev", path = "../../../frame/support" } frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli" } diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index 4ad82a01c2433..2a80e3a3d312d 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -28,31 +28,30 @@ use sp_runtime::traits::AccountIdConversion; /// The `palletid` command #[derive(Debug, Parser)] -#[clap(name = "palletid", about = "Inspect a module ID address")] +#[command(name = "palletid", about = "Inspect a module ID address")] pub struct PalletIdCmd { /// The module ID used to derive the account id: String, /// network address format - #[clap( + #[arg( long, value_name = "NETWORK", - possible_values = &Ss58AddressFormat::all_names()[..], - parse(try_from_str = Ss58AddressFormat::try_from), + value_parser = sc_cli::parse_ss58_address_format, ignore_case = true, )] pub network: Option, #[allow(missing_docs)] - #[clap(flatten)] + #[command(flatten)] pub output_scheme: OutputTypeFlag, #[allow(missing_docs)] - #[clap(flatten)] + #[command(flatten)] pub crypto_scheme: CryptoSchemeFlag, #[allow(missing_docs)] - #[clap(flatten)] + #[command(flatten)] pub keystore_params: KeystoreParams, } diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index 5af7dd78a08e8..6cc14a0595501 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -14,4 +14,4 @@ kitchensink-runtime = { version = "3.0.0-dev", path = "../../../../bin/node/runt generate-bags = { version = "4.0.0-dev", path = "../" } # third-party -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } diff --git a/utils/frame/generate-bags/node-runtime/src/main.rs b/utils/frame/generate-bags/node-runtime/src/main.rs index 5ea1262d95d34..27e51b205f8ce 100644 --- a/utils/frame/generate-bags/node-runtime/src/main.rs +++ b/utils/frame/generate-bags/node-runtime/src/main.rs @@ -25,19 +25,19 @@ use std::path::PathBuf; // #[clap(author, version, about)] struct Opt { /// How many bags to generate. - #[clap(long, default_value = "200")] + #[arg(long, default_value_t = 200)] n_bags: usize, /// Where to write the output. output: PathBuf, /// The total issuance of the currency used to create `VoteWeight`. - #[clap(short, long)] + #[arg(short, long)] total_issuance: u128, /// The minimum account balance (i.e. existential deposit) for the currency used to create /// `VoteWeight`. - #[clap(short, long)] + #[arg(short, long)] minimum_balance: u128, } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 956eaff745335..8a9d5a5f8979a 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "3.1.18", features = ["derive"] } +clap = { version = "4.0.9", features = ["derive"] } log = "0.4.17" parity-scale-codec = "3.0.0" serde = "1.0.136" diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 0c8b968d1b4b1..df8c51b5a92d4 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -33,11 +33,11 @@ use std::{fmt::Debug, str::FromStr}; #[derive(Debug, Clone, clap::Parser)] pub struct ExecuteBlockCmd { /// Overwrite the wasm code in state or not. - #[clap(long)] + #[arg(long)] overwrite_wasm_code: bool, /// If set the state root check is disabled. - #[clap(long)] + #[arg(long)] no_state_root_check: bool, /// Which try-state targets to execute when running this command. @@ -49,17 +49,16 @@ pub struct ExecuteBlockCmd { /// `Staking, System`). /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a /// round-robin fashion. - #[clap(long, default_value = "none")] + #[arg(long, default_value = "none")] try_state: frame_try_runtime::TryStateSelect, /// The block hash at which to fetch the block. /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[clap( + #[arg( long, - multiple_values = false, - parse(try_from_str = crate::parse::hash) + value_parser = crate::parse::hash )] block_at: Option, @@ -67,10 +66,9 @@ pub struct ExecuteBlockCmd { /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::uri` is. Only use this (with care) when combined with a snapshot. - #[clap( + #[arg( long, - multiple_values = false, - parse(try_from_str = crate::parse::url) + value_parser = crate::parse::url )] block_ws_uri: Option, @@ -79,7 +77,7 @@ pub struct ExecuteBlockCmd { /// For this command only, if the `live` is used, then state of the parent block is fetched. /// /// If `block_at` is provided, then the [`State::Live::at`] is being ignored. - #[clap(subcommand)] + #[command(subcommand)] state: State, } diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index fb5345827858a..df6d40b2b8ab1 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -43,11 +43,11 @@ const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; #[derive(Debug, Clone, clap::Parser)] pub struct FollowChainCmd { /// The url to connect to. - #[clap(short, long, parse(try_from_str = parse::url))] + #[arg(short, long, value_parser = parse::url)] uri: String, /// If set, then the state root check is enabled. - #[clap(long)] + #[arg(long)] state_root_check: bool, /// Which try-state targets to execute when running this command. @@ -59,11 +59,11 @@ pub struct FollowChainCmd { /// `Staking, System`). /// - `rr-[x]` where `[x]` is a number. Then, the given number of pallets are checked in a /// round-robin fashion. - #[clap(long, default_value = "none")] + #[arg(long, default_value = "none")] try_state: frame_try_runtime::TryStateSelect, /// If present, a single connection to a node will be kept and reused for fetching blocks. - #[clap(long)] + #[arg(long)] keep_connection: bool, } diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index b0404fff18135..6f48d1f8f68a7 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -31,17 +31,16 @@ use std::{fmt::Debug, str::FromStr}; #[derive(Debug, Clone, clap::Parser)] pub struct OffchainWorkerCmd { /// Overwrite the wasm code in state or not. - #[clap(long)] + #[arg(long)] overwrite_wasm_code: bool, /// The block hash at which to fetch the header. /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::at` is. Only use this (with care) when combined with a snapshot. - #[clap( + #[arg( long, - multiple_values = false, - parse(try_from_str = parse::hash) + value_parser = parse::hash )] header_at: Option, @@ -49,15 +48,14 @@ pub struct OffchainWorkerCmd { /// /// If the `live` state type is being used, then this can be omitted, and is equal to whatever /// the `state::uri` is. Only use this (with care) when combined with a snapshot. - #[clap( + #[arg( long, - multiple_values = false, - parse(try_from_str = parse::url) + value_parser = parse::url )] header_ws_uri: Option, /// The state type to use. - #[clap(subcommand)] + #[command(subcommand)] pub state: State, } diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 1d7d876a4aa92..5e11123526532 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -32,7 +32,7 @@ use crate::{ #[derive(Debug, Clone, clap::Parser)] pub struct OnRuntimeUpgradeCmd { /// The state type to use. - #[clap(subcommand)] + #[command(subcommand)] pub state: State, } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index b8a2779d57e19..9034f356a9f67 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -385,6 +385,7 @@ pub enum Command { /// Shared parameters of the `try-runtime` commands #[derive(Debug, Clone, clap::Parser)] +#[group(skip)] pub struct SharedParams { /// Shared parameters of substrate cli. #[allow(missing_docs)] @@ -392,41 +393,41 @@ pub struct SharedParams { pub shared_params: sc_cli::SharedParams, /// The execution strategy that should be used. - #[clap(long, value_name = "STRATEGY", arg_enum, ignore_case = true, default_value = "wasm")] + #[arg(long, value_name = "STRATEGY", value_enum, ignore_case = true, default_value_t = ExecutionStrategy::Wasm)] pub execution: ExecutionStrategy, /// Type of wasm execution used. - #[clap( + #[arg( long = "wasm-execution", value_name = "METHOD", - possible_values = WasmExecutionMethod::variants(), + value_enum, ignore_case = true, - default_value = DEFAULT_WASM_EXECUTION_METHOD, + default_value_t = DEFAULT_WASM_EXECUTION_METHOD, )] pub wasm_method: WasmExecutionMethod, /// The WASM instantiation method to use. /// /// Only has an effect when `wasm-execution` is set to `compiled`. - #[clap( + #[arg( long = "wasm-instantiation-strategy", value_name = "STRATEGY", default_value_t = DEFAULT_WASMTIME_INSTANTIATION_STRATEGY, - arg_enum, + value_enum, )] pub wasmtime_instantiation_strategy: WasmtimeInstantiationStrategy, /// The number of 64KB pages to allocate for Wasm execution. Defaults to /// [`sc_service::Configuration.default_heap_pages`]. - #[clap(long)] + #[arg(long)] pub heap_pages: Option, /// When enabled, the spec check will not panic, and instead only show a warning. - #[clap(long)] + #[arg(long)] pub no_spec_check_panic: bool, /// State version that is used by the chain. - #[clap(long, default_value = "1", parse(try_from_str = parse::state_version))] + #[arg(long, default_value_t = StateVersion::V1, value_parser = parse::state_version)] pub state_version: StateVersion, } @@ -438,7 +439,7 @@ pub struct TryRuntimeCmd { #[clap(flatten)] pub shared: SharedParams, - #[clap(subcommand)] + #[command(subcommand)] pub command: Command, } @@ -449,17 +450,17 @@ pub enum State { /// /// This can be crated by passing a value to [`State::Live::snapshot_path`]. Snap { - #[clap(short, long)] + #[arg(short, long)] snapshot_path: PathBuf, }, /// Use a live chain as the source of runtime state. Live { /// The url to connect to. - #[clap( + #[arg( short, long, - parse(try_from_str = parse::url), + value_parser = parse::url, )] uri: String, @@ -467,21 +468,20 @@ pub enum State { /// /// If non provided, then the latest finalized head is used. This is particularly useful /// for [`Command::OnRuntimeUpgrade`]. - #[clap( + #[arg( short, long, - multiple_values = false, - parse(try_from_str = parse::hash), + value_parser = parse::hash, )] at: Option, /// An optional state snapshot file to WRITE to. Not written if set to `None`. - #[clap(short, long)] + #[arg(short, long)] snapshot_path: Option, /// A pallet to scrape. Can be provided multiple times. If empty, entire chain state will /// be scraped. - #[clap(short, long, multiple_values = true)] + #[arg(short, long, num_args = 1..)] pallet: Vec, /// Fetch the child-keys as well. @@ -489,7 +489,7 @@ pub enum State { /// Default is `false`, if specific `--pallets` are specified, `true` otherwise. In other /// words, if you scrape the whole state the child tree data is included out of the box. /// Otherwise, it must be enabled explicitly using this flag. - #[clap(long)] + #[arg(long)] child_tree: bool, }, } From b9a555d6331bf74e0880ba075814d8d90ae9e1e3 Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Tue, 18 Oct 2022 08:28:07 +0100 Subject: [PATCH 252/348] implement storage decode length for BTreeSet (#12503) * implement storage decode length for BTreeSet * Orderly moving of things around * include test for append and decode_len * fix cargo clippy issue --- frame/support/src/storage/mod.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index b70436785af12..333f4382557b1 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -27,7 +27,7 @@ use crate::{ use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; use sp_core::storage::ChildInfo; use sp_runtime::generic::{Digest, DigestItem}; -use sp_std::{marker::PhantomData, prelude::*}; +use sp_std::{collections::btree_set::BTreeSet, marker::PhantomData, prelude::*}; pub use self::{ transactional::{ @@ -1303,6 +1303,7 @@ mod private { impl Sealed for WeakBoundedVec {} impl Sealed for bounded_btree_map::BoundedBTreeMap {} impl Sealed for bounded_btree_set::BoundedBTreeSet {} + impl Sealed for BTreeSet {} macro_rules! impl_sealed_for_tuple { ($($elem:ident),+) => { @@ -1335,6 +1336,9 @@ mod private { impl StorageAppend for Vec {} impl StorageDecodeLength for Vec {} +impl StorageAppend for BTreeSet {} +impl StorageDecodeLength for BTreeSet {} + /// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the /// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` /// format ever changes, we need to remove this here. @@ -1832,4 +1836,22 @@ mod test { ); }); } + + #[crate::storage_alias] + type FooSet = StorageValue>; + + #[test] + fn btree_set_append_and_decode_len_works() { + TestExternalities::default().execute_with(|| { + let btree = BTreeSet::from([1, 2, 3]); + FooSet::put(btree); + + FooSet::append(4); + FooSet::append(5); + FooSet::append(6); + FooSet::append(7); + + assert_eq!(FooSet::decode_len().unwrap(), 7); + }); + } } From 415648d87848dfa02955e3bea73f55503b852bc1 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Tue, 18 Oct 2022 10:46:25 +0200 Subject: [PATCH 253/348] Decrease default --out-peers from 25 to 15 (#12434) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- client/cli/src/params/network_params.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 39f98b3573933..5580dea45bde6 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -82,12 +82,13 @@ pub struct NetworkParams { pub allow_private_ipv4: bool, /// Specify the number of outgoing connections we're trying to maintain. - #[arg(long, value_name = "COUNT", default_value_t = 25)] + #[arg(long, value_name = "COUNT", default_value_t = 15)] pub out_peers: u32, /// Maximum number of inbound full nodes peers. #[arg(long, value_name = "COUNT", default_value_t = 25)] pub in_peers: u32, + /// Maximum number of inbound light nodes peers. #[arg(long, value_name = "COUNT", default_value_t = 100)] pub in_peers_light: u32, From 4c45673d92678c85732b06a7cd47da5c056484c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 18 Oct 2022 12:07:02 +0200 Subject: [PATCH 254/348] Prepare for latest clippy (nightly 09-10-2022) (#12466) --- client/api/src/in_mem.rs | 2 +- client/consensus/slots/src/lib.rs | 2 +- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 2 +- client/network/sync/src/lib.rs | 3 +-- client/state-db/src/pruning.rs | 2 +- client/transaction-pool/src/graph/pool.rs | 6 +++--- frame/transaction-payment/src/lib.rs | 8 ++++---- primitives/runtime/src/generic/era.rs | 4 ++-- primitives/runtime/src/offchain/storage_lock.rs | 14 ++++++-------- primitives/state-machine/src/in_memory_backend.rs | 4 ++-- .../state-machine/src/overlayed_changes/mod.rs | 2 +- primitives/trie/src/recorder.rs | 2 +- test-utils/runtime/client/src/lib.rs | 2 +- utils/frame/benchmarking-cli/src/pallet/command.rs | 5 ++--- 15 files changed, 28 insertions(+), 32 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 9cea1883bdcdc..99efdd3bf1d22 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -516,7 +516,7 @@ where ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + let child_delta = storage.children_default.values().map(|child_content| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 6225bbbda1745..90bfef6c1609c 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -1118,7 +1118,7 @@ mod test { // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` let expected_intervals: Vec<_> = - (0..497).map(|i| (i / 2).max(1).min(expected_distance)).collect(); + (0..497).map(|i| (i / 2).clamp(1, expected_distance)).collect(); assert_eq!(intervals, expected_intervals); } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b1f4e3b6c0af5..13d91fff0b555 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -116,7 +116,7 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; - let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| { + let child_delta = genesis.children_default.values().map(|child_content| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index ae777ad154f6d..1a80c16c4e59d 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -798,7 +798,7 @@ impl BlockImportOperation { return Err(sp_blockchain::Error::InvalidState) } - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + let child_delta = storage.children_default.values().map(|child_content| { ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 64d9d4d668eb3..e9c2b24b2ba95 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -1444,8 +1444,7 @@ where if let SyncMode::LightState { skip_proofs, .. } = &self.mode { if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { // Finalized a recent block. - let mut heads: Vec<_> = - self.peers.iter().map(|(_, peer)| peer.best_number).collect(); + let mut heads: Vec<_> = self.peers.values().map(|peer| peer.best_number).collect(); heads.sort(); let median = heads[heads.len() / 2]; if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 2c23110910495..50a46def59541 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -117,7 +117,7 @@ impl DeathRowQueue { window_size: u32, ) -> Result, Error> { // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` - let cache_capacity = window_size.max(1).min(DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; + let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; let mut cache = VecDeque::with_capacity(cache_capacity); trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); // Load block from db diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 9afceffe8dddf..d311e0d0c8f2f 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -168,7 +168,7 @@ impl Pool { ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; - Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) + Ok(self.validated_pool.submit(validated_transactions.into_values())) } /// Resubmit the given extrinsics to the pool. @@ -182,7 +182,7 @@ impl Pool { ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; - Ok(self.validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx))) + Ok(self.validated_pool.submit(validated_transactions.into_values())) } /// Imports one unverified extrinsic to the pool @@ -349,7 +349,7 @@ impl Pool { at, known_imported_hashes, pruned_hashes, - reverified_transactions.into_iter().map(|(_, xt)| xt).collect(), + reverified_transactions.into_values().collect(), ) } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index ce85bf93a90a7..5027db41ec098 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -228,11 +228,11 @@ where if positive { let excess = first_term.saturating_add(second_term).saturating_mul(previous); - previous.saturating_add(excess).max(min_multiplier).min(max_multiplier) + previous.saturating_add(excess).clamp(min_multiplier, max_multiplier) } else { // Defensive-only: first_term > second_term. Safe subtraction. let negative = first_term.saturating_sub(second_term).saturating_mul(previous); - previous.saturating_sub(negative).max(min_multiplier).min(max_multiplier) + previous.saturating_sub(negative).clamp(min_multiplier, max_multiplier) } } } @@ -713,8 +713,8 @@ where let max_block_weight = max_block_weight.ref_time(); let info_weight = info.weight.ref_time(); - let bounded_weight = info_weight.max(1).min(max_block_weight); - let bounded_length = (len as u64).max(1).min(max_block_length); + let bounded_weight = info_weight.clamp(1, max_block_weight); + let bounded_length = (len as u64).clamp(1, max_block_length); let max_tx_per_block_weight = max_block_weight / bounded_weight; let max_tx_per_block_length = max_block_length / bounded_length; diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 2ca50b12b2e1f..b26545fb8404e 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -63,7 +63,7 @@ impl Era { /// does not exceed `BlockHashCount` parameter passed to `system` module, since that /// prunes old blocks and renders transactions immediately invalid. pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two().unwrap_or(1 << 16).max(4).min(1 << 16); + let period = period.checked_next_power_of_two().unwrap_or(1 << 16).clamp(4, 1 << 16); let phase = current % period; let quantize_factor = (period >> 12).max(1); let quantized_phase = phase / quantize_factor * quantize_factor; @@ -105,7 +105,7 @@ impl Encode for Era { Self::Immortal => output.push_byte(0), Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | + let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 | ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); }, diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 4ea9030745296..47325743bd2f3 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -77,8 +77,8 @@ const STORAGE_LOCK_DEFAULT_EXPIRY_DURATION: Duration = Duration::from_millis(20_ const STORAGE_LOCK_DEFAULT_EXPIRY_BLOCKS: u32 = 4; /// Time between checks if the lock is still being held in milliseconds. -const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN: Duration = Duration::from_millis(100); -const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX: Duration = Duration::from_millis(10); +const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN: Duration = Duration::from_millis(10); +const STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX: Duration = Duration::from_millis(100); /// Lockable item for use with a persisted storage lock. /// @@ -137,10 +137,9 @@ impl Lockable for Time { let remainder: Duration = now.diff(deadline); // do not snooze the full duration, but instead snooze max 100ms // it might get unlocked in another thread - use core::cmp::{max, min}; - let snooze = max( - min(remainder, STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX), + let snooze = remainder.clamp( STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN, + STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX, ); sp_io::offchain::sleep_until(now.add(snooze)); } @@ -239,10 +238,9 @@ impl Lockable for BlockAndTime { fn snooze(deadline: &Self::Deadline) { let now = offchain::timestamp(); let remainder: Duration = now.diff(&(deadline.timestamp)); - use core::cmp::{max, min}; - let snooze = max( - min(remainder, STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX), + let snooze = remainder.clamp( STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MIN, + STORAGE_LOCK_PER_CHECK_ITERATION_SNOOZE_MAX, ); sp_io::offchain::sleep_until(now.add(snooze)); } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index e205e83e85572..06714fb41405a 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -157,8 +157,8 @@ where fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners .children_default - .into_iter() - .map(|(_k, c)| (Some(c.child_info), c.data)) + .into_values() + .map(|c| (Some(c.child_info), c.data)) .collect(); inner.insert(None, inners.top); (inner, state_version).into() diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index ef5c02d2ec1fa..9eb26d52f79f8 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -474,7 +474,7 @@ impl OverlayedChanges { pub fn children( &self, ) -> impl Iterator, &ChildInfo)> { - self.children.iter().map(|(_, v)| (v.0.changes(), &v.1)) + self.children.values().map(|v| (v.0.changes(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs index 5599ad1c36904..6fbf698592a31 100644 --- a/primitives/trie/src/recorder.rs +++ b/primitives/trie/src/recorder.rs @@ -109,7 +109,7 @@ impl Recorder { /// Returns the [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { let recorder = self.inner.lock(); - StorageProof::new(recorder.accessed_nodes.iter().map(|(_, v)| v.clone())) + StorageProof::new(recorder.accessed_nodes.values().cloned()) } /// Returns the estimated encoded size of the proof. diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index fe0fef3516671..99d4e1163e272 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -134,7 +134,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); } - let child_roots = storage.children_default.iter().map(|(_sk, child_content)| { + let child_roots = storage.children_default.values().map(|child_content| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect(), diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index daf4aa74e1394..6e413bdf7e2c5 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -268,9 +268,8 @@ impl PalletCmd { for s in 0..self.steps { // This is the value we will be testing for component `name` - let component_value = ((lowest as f32 + step_size * s as f32) as u32) - .min(highest) - .max(lowest); + let component_value = + ((lowest as f32 + step_size * s as f32) as u32).clamp(lowest, highest); // Select the max value for all the other components. let c: Vec<(BenchmarkParameter, u32)> = components From 281bcd44fca0ff2471bddba3195a88801874dfdb Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 18 Oct 2022 13:15:40 +0300 Subject: [PATCH 255/348] derive Serialize/Deserialize for BeefyAuthoritySet (#12516) --- Cargo.lock | 1 + primitives/beefy/Cargo.toml | 2 ++ primitives/beefy/src/mmr.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 2c1b427f0a2e2..7ea4e3e5c048e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -535,6 +535,7 @@ dependencies = [ "array-bytes", "parity-scale-codec", "scale-info", + "serde", "sp-api", "sp-application-crypto", "sp-core", diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index f85e2edf4f442..22e41b5130abb 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.136", optional = true, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } @@ -32,6 +33,7 @@ default = ["std"] std = [ "codec/std", "scale-info/std", + "serde", "sp-api/std", "sp-application-crypto/std", "sp-core/std", diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 0edb8babd608e..78ceef0b6b396 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -101,6 +101,7 @@ impl MmrLeafVersion { /// Details of a BEEFY authority set. #[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct BeefyAuthoritySet { /// Id of the set. /// From e34b840b8818856a5bb8dbfe91d5b8a918e44fb8 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 18 Oct 2022 14:52:04 +0200 Subject: [PATCH 256/348] BlockId removal: refactor: StorageProvider (#12510) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * BlockId removal: refactor: StorageProvider It changes the arguments of `Backend::StorageProvider` trait from: block: `BlockId` to: hash: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update client/api/src/backend.rs Co-authored-by: Dmitrii Markin * GrandpaBlockImport::current_set_id reworked * ExportStateCmd reworked * trigger CI job * trigger CI job Co-authored-by: Bastian Köcher Co-authored-by: Dmitrii Markin --- client/api/src/backend.rs | 44 ++++++++--------- client/cli/src/commands/export_state_cmd.rs | 10 ++-- client/finality-grandpa/src/import.rs | 12 +++-- client/rpc/src/state/state_full.rs | 36 ++++++-------- .../service/src/chain_ops/export_raw_state.rs | 15 ++---- client/service/src/client/client.rs | 48 ++++++++----------- client/service/test/src/client/mod.rs | 20 ++++---- .../frame/benchmarking-cli/src/block/bench.rs | 10 ++-- utils/frame/benchmarking-cli/src/block/cmd.rs | 4 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 6 +-- .../benchmarking-cli/src/storage/read.rs | 17 +++---- .../benchmarking-cli/src/storage/write.rs | 10 ++-- 12 files changed, 112 insertions(+), 120 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 0e94d28b75dd5..f358385acd708 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -357,77 +357,77 @@ where /// Provides acess to storage primitives pub trait StorageProvider> { - /// Given a `BlockId` and a key, return the value under the key in that block. + /// Given a block's `Hash` and a key, return the value under the key in that block. fn storage( &self, - id: &BlockId, + hash: &Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. + /// Given a block's `Hash` and a key prefix, return the matching storage keys in that block. fn storage_keys( &self, - id: &BlockId, + hash: &Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key, return the value under the hash in that block. + /// Given a block's `Hash` and a key, return the value under the hash in that block. fn storage_hash( &self, - id: &BlockId, + hash: &Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in - /// that block. + /// Given a block's `Hash` and a key prefix, return the matching child storage keys and values + /// in that block. fn storage_pairs( &self, - id: &BlockId, + hash: &Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in - /// that block. + /// Given a block's `Hash` and a key prefix, return a `KeyIterator` iterates matching storage + /// keys in that block. fn storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &Block::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key and a child storage key, return the value under the key in that - /// block. + /// Given a block's `Hash`, a key and a child storage key, return the value under the key in + /// that block. fn child_storage( &self, - id: &BlockId, + hash: &Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage - /// keys. + /// Given a block's `Hash`, a key prefix, and a child storage key, return the matching child + /// storage keys. fn child_storage_keys( &self, - id: &BlockId, + hash: &Block::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` and a key `prefix` and a child storage key, + /// Given a block's `Hash` and a key `prefix` and a child storage key, /// return a `KeyIterator` that iterates matching storage keys in that block. fn child_storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &Block::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; - /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that + /// Given a block's `Hash`, a key and a child storage key, return the hash under the key in that /// block. fn child_storage_hash( &self, - id: &BlockId, + hash: &Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 63aa12213b3da..1bcf21f388a62 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -23,7 +23,7 @@ use crate::{ }; use clap::Parser; use log::info; -use sc_client_api::{StorageProvider, UsageProvider}; +use sc_client_api::{HeaderBackend, StorageProvider, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; @@ -57,7 +57,7 @@ impl ExportStateCmd { ) -> error::Result<()> where B: BlockT, - C: UsageProvider + StorageProvider, + C: UsageProvider + StorageProvider + HeaderBackend, BA: sc_client_api::backend::Backend, B::Hash: FromStr, ::Err: Debug, @@ -65,7 +65,11 @@ impl ExportStateCmd { { info!("Exporting raw state..."); let block_id = self.input.as_ref().map(|b| b.parse()).transpose()?; - let raw_state = sc_service::chain_ops::export_raw_state(client, block_id)?; + let hash = match block_id { + Some(id) => client.expect_block_hash_from_id(&id)?, + None => client.usage_info().chain.best_hash, + }; + let raw_state = sc_service::chain_ops::export_raw_state(client, &hash)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index b5a0d7be70f19..d0a66888ec072 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -424,13 +424,15 @@ where } /// Read current set id form a given state. - fn current_set_id(&self, id: &BlockId) -> Result { + fn current_set_id(&self, hash: &Block::Hash) -> Result { + let id = &BlockId::hash(*hash); let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { ConsensusError::ClientImport(format!( "Unable to retrieve current runtime version. {}", e )) })?; + if runtime_version .api_version(&>::ID) .map_or(false, |v| v < 3) @@ -439,7 +441,8 @@ where // This code may be removed once warp sync to an old runtime is no longer needed. for prefix in ["GrandpaFinality", "Grandpa"] { let k = [twox_128(prefix.as_bytes()), twox_128(b"CurrentSetId")].concat(); - if let Ok(Some(id)) = self.inner.storage(id, &sc_client_api::StorageKey(k.to_vec())) + if let Ok(Some(id)) = + self.inner.storage(hash, &sc_client_api::StorageKey(k.to_vec())) { if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { return Ok(id) @@ -472,13 +475,12 @@ where // finality proofs and that the state is correct and final. // So we can read the authority list and set id from the state. self.authority_set_hard_forks.clear(); - let block_id = BlockId::hash(hash); let authorities = self .inner .runtime_api() - .grandpa_authorities(&block_id) + .grandpa_authorities(&BlockId::hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let set_id = self.current_set_id(&block_id)?; + let set_id = self.current_set_id(&hash)?; let authority_set = AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 42ba70b0af7e7..d6ab93f7680b0 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -145,10 +145,9 @@ where ) -> Result<()> { for block_hash in &range.hashes { let mut block_changes = StorageChangeSet { block: *block_hash, changes: Vec::new() }; - let id = BlockId::hash(*block_hash); for key in keys { let (has_changed, data) = { - let curr_data = self.client.storage(&id, key).map_err(client_err)?; + let curr_data = self.client.storage(block_hash, key).map_err(client_err)?; match last_values.get(key) { Some(prev_data) => (curr_data != *prev_data, curr_data), None => (true, curr_data), @@ -214,7 +213,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) + .and_then(|block| self.client.storage_keys(&block, &prefix)) .map_err(client_err) } @@ -224,7 +223,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) + .and_then(|block| self.client.storage_pairs(&block, &prefix)) .map_err(client_err) } @@ -237,11 +236,7 @@ where ) -> std::result::Result, Error> { self.block_or_best(block) .and_then(|block| { - self.client.storage_keys_iter( - &BlockId::Hash(block), - prefix.as_ref(), - start_key.as_ref(), - ) + self.client.storage_keys_iter(&block, prefix.as_ref(), start_key.as_ref()) }) .map(|iter| iter.take(count as usize).collect()) .map_err(client_err) @@ -253,7 +248,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) + .and_then(|block| self.client.storage(&block, &key)) .map_err(client_err) } @@ -267,14 +262,14 @@ where Err(e) => return Err(client_err(e)), }; - match self.client.storage(&BlockId::Hash(block), &key) { + match self.client.storage(&block, &key) { Ok(Some(d)) => return Ok(Some(d.0.len() as u64)), Err(e) => return Err(client_err(e)), Ok(None) => {}, } self.client - .storage_pairs(&BlockId::Hash(block), &key) + .storage_pairs(&block, &key) .map(|kv| { let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); if item_sum > 0 { @@ -292,7 +287,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) + .and_then(|block| self.client.storage_hash(&block, &key)) .map_err(client_err) } @@ -418,7 +413,7 @@ where let changes = keys .into_iter() .map(|key| { - let v = self.client.storage(&BlockId::Hash(block), &key).ok().flatten(); + let v = self.client.storage(&block, &key).ok().flatten(); (key, v) }) .collect(); @@ -522,7 +517,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_keys(&BlockId::Hash(block), &child_info, &prefix) + self.client.child_storage_keys(&block, &child_info, &prefix) }) .map_err(client_err) } @@ -543,7 +538,7 @@ where None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( - &BlockId::Hash(block), + &block, child_info, prefix.as_ref(), start_key.as_ref(), @@ -566,7 +561,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage(&BlockId::Hash(block), &child_info, &key) + self.client.child_storage(&block, &child_info, &key) }) .map_err(client_err) } @@ -589,10 +584,7 @@ where keys.into_iter() .map(move |key| { - client - .clone() - .child_storage(&BlockId::Hash(block), &child_info, &key) - .map_err(client_err) + client.clone().child_storage(&block, &child_info, &key).map_err(client_err) }) .collect() } @@ -610,7 +602,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_hash(&BlockId::Hash(block), &child_info, &key) + self.client.child_storage_hash(&block, &child_info, &key) }) .map_err(client_err) } diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index ffe91d0d7355e..04dba387de908 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -19,25 +19,20 @@ use crate::error::Error; use sc_client_api::{StorageProvider, UsageProvider}; use sp_core::storage::{well_known_keys, ChildInfo, Storage, StorageChild, StorageKey, StorageMap}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::{collections::HashMap, sync::Arc}; /// Export the raw state at the given `block`. If `block` is `None`, the /// best block will be used. -pub fn export_raw_state( - client: Arc, - block: Option>, -) -> Result +pub fn export_raw_state(client: Arc, hash: &B::Hash) -> Result where C: UsageProvider + StorageProvider, B: BlockT, BA: sc_client_api::backend::Backend, { - let block = block.unwrap_or_else(|| BlockId::Hash(client.usage_info().chain.best_hash)); - let empty_key = StorageKey(Vec::new()); - let mut top_storage = client.storage_pairs(&block, &empty_key)?; + let mut top_storage = client.storage_pairs(hash, &empty_key)?; let mut children_default = HashMap::new(); // Remove all default child storage roots from the top storage and collect the child storage @@ -52,10 +47,10 @@ where StorageKey(key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec()); let child_info = ChildInfo::new_default(&key.0); - let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; + let keys = client.child_storage_keys(hash, &child_info, &empty_key)?; let mut pairs = StorageMap::new(); keys.into_iter().try_for_each(|k| { - if let Some(value) = client.child_storage(&block, &child_info, &k)? { + if let Some(value) = client.child_storage(hash, &child_info, &k)? { pairs.insert(k.0, value.0); } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e2fd5cda1d2f0..a7851af446b95 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -420,7 +420,8 @@ where /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { - Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? + let hash = self.backend.blockchain().expect_block_hash_from_id(id)?; + Ok(StorageProvider::storage(self, &hash, &StorageKey(well_known_keys::CODE.to_vec()))? .expect( "None is returned if there's no value stored for the given key;\ ':code' key is always defined; qed", @@ -1402,21 +1403,19 @@ where { fn storage_keys( &self, - id: &BlockId, + hash: &Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - let keys = self.state_at(&hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + let keys = self.state_at(hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } fn storage_pairs( &self, - id: &BlockId, + hash: &::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; let keys = state .keys(&key_prefix.0) .into_iter() @@ -1430,37 +1429,34 @@ where fn storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } fn child_storage_keys_iter<'a>( &self, - id: &BlockId, + hash: &::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) } fn storage( &self, - id: &BlockId, + hash: &Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(&hash)? + .state_at(hash)? .storage(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1468,24 +1464,22 @@ where fn storage_hash( &self, - id: &BlockId, + hash: &::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.state_at(&hash)? + self.state_at(hash)? .storage_hash(&key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } fn child_storage_keys( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; let keys = self - .state_at(&hash)? + .state_at(hash)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1495,13 +1489,12 @@ where fn child_storage( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; Ok(self - .state_at(&hash)? + .state_at(hash)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1509,12 +1502,11 @@ where fn child_storage_hash( &self, - id: &BlockId, + hash: &::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.state_at(&hash)? + self.state_at(hash)? .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e))) } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index c6ac1fc7d73d9..4a5b56bd14006 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1602,12 +1602,14 @@ fn storage_keys_iter_prefix_and_start_key_works() { .add_extra_child_storage(&child_info, b"third".to_vec(), vec![0u8; 32]) .build(); + let block_hash = client.info().best_hash; + let child_root = b":child_storage:default:child".to_vec(); let prefix = StorageKey(array_bytes::hex2bytes_unchecked("3a")); let child_prefix = StorageKey(b"sec".to_vec()); let res: Vec<_> = client - .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .storage_keys_iter(&block_hash, Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1622,7 +1624,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &BlockId::Number(0), + &block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1633,7 +1635,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &BlockId::Number(0), + &block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))), ) @@ -1643,7 +1645,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { assert_eq!(res, Vec::>::new()); let res: Vec<_> = client - .child_storage_keys_iter(&BlockId::Number(0), child_info.clone(), Some(&child_prefix), None) + .child_storage_keys_iter(&block_hash, child_info.clone(), Some(&child_prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1651,7 +1653,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .child_storage_keys_iter( - &BlockId::Number(0), + &block_hash, child_info, None, Some(&StorageKey(b"second".to_vec())), @@ -1666,10 +1668,12 @@ fn storage_keys_iter_prefix_and_start_key_works() { fn storage_keys_iter_works() { let client = substrate_test_runtime_client::new(); + let block_hash = client.info().best_hash; + let prefix = StorageKey(array_bytes::hex2bytes_unchecked("")); let res: Vec<_> = client - .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .storage_keys_iter(&block_hash, Some(&prefix), None) .unwrap() .take(9) .map(|x| array_bytes::bytes2hex("", &x.0)) @@ -1691,7 +1695,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &BlockId::Number(0), + &block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1714,7 +1718,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &BlockId::Number(0), + &block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked( "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 318c58de8a553..47cd047e158d0 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -22,7 +22,9 @@ use frame_support::weights::constants::WEIGHT_PER_NANOS; use frame_system::ConsumedWeight; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{Error, Result}; -use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider}; +use sc_client_api::{ + Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider, +}; use sp_api::{ApiExt, Core, HeaderT, ProvideRuntimeApi}; use sp_blockchain::Error::RuntimeApiError; use sp_runtime::{generic::BlockId, traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; @@ -73,7 +75,8 @@ where + ProvideRuntimeApi + StorageProvider + UsageProvider - + BlockBackend, + + BlockBackend + + HeaderBackend, C::Api: ApiExt + BlockBuilderApi, { /// Returns a new [`Self`] from the arguments. @@ -136,9 +139,10 @@ where )?; let key = StorageKey(hash); + let block_hash = self.client.expect_block_hash_from_id(block)?; let mut raw_weight = &self .client - .storage(&block, &key)? + .storage(&block_hash, &key)? .ok_or(format!("Could not find System::BlockWeight for block: {}", block))? .0[..]; diff --git a/utils/frame/benchmarking-cli/src/block/cmd.rs b/utils/frame/benchmarking-cli/src/block/cmd.rs index f5f7e15a316b3..8bac04110f7ab 100644 --- a/utils/frame/benchmarking-cli/src/block/cmd.rs +++ b/utils/frame/benchmarking-cli/src/block/cmd.rs @@ -22,6 +22,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_client_api::{Backend as ClientBackend, BlockBackend, StorageProvider, UsageProvider}; use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::HeaderBackend; use sp_runtime::{traits::Block as BlockT, OpaqueExtrinsic}; use clap::Parser; @@ -87,7 +88,8 @@ impl BlockCmd { + BlockBackend + ProvideRuntimeApi + StorageProvider - + UsageProvider, + + UsageProvider + + HeaderBackend, C::Api: ApiExt + BlockBuilderApi, { // Put everything in the benchmark type to have the generic types handy. diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 86041d9517de6..32fd5da7f95f0 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -191,9 +191,9 @@ impl StorageCmd { B: BlockT + Debug, BA: ClientBackend, { - let block = BlockId::Hash(client.usage_info().chain.best_hash); + let hash = client.usage_info().chain.best_hash; let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&block, &empty_prefix)?; + let mut keys = client.storage_keys(&hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -201,7 +201,7 @@ impl StorageCmd { info!("Warmup round {}/{}", i + 1, self.params.warmups); for key in keys.as_slice() { let _ = client - .storage(&block, &key) + .storage(&hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty"); } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index 5e8a310ea5c5a..2df7e697039e8 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -18,10 +18,7 @@ use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sp_core::storage::StorageKey; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use log::info; use rand::prelude::*; @@ -41,12 +38,12 @@ impl StorageCmd { <::Header as HeaderT>::Number: From, { let mut record = BenchRecord::default(); - let block = BlockId::Hash(client.usage_info().chain.best_hash); + let best_hash = client.usage_info().chain.best_hash; - info!("Preparing keys from block {}", block); + info!("Preparing keys from block {}", best_hash); // Load all keys and randomly shuffle them. let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&block, &empty_prefix)?; + let mut keys = client.storage_keys(&best_hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -58,7 +55,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { (true, Some(info)) => { // child tree key - let child_keys = client.child_storage_keys(&block, &info, &empty_prefix)?; + let child_keys = client.child_storage_keys(&best_hash, &info, &empty_prefix)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -67,7 +64,7 @@ impl StorageCmd { // regular key let start = Instant::now(); let v = client - .storage(&block, &key) + .storage(&best_hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; @@ -82,7 +79,7 @@ impl StorageCmd { for (key, info) in child_nodes.as_slice() { let start = Instant::now(); let v = client - .child_storage(&block, info, key) + .child_storage(&best_hash, info, key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 0ef2a2f9ae113..2ee37a5619136 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -57,12 +57,12 @@ impl StorageCmd { // Store the time that it took to write each value. let mut record = BenchRecord::default(); - let block = BlockId::Hash(client.usage_info().chain.best_hash); - let header = client.header(block)?.ok_or("Header not found")?; + let best_hash = client.usage_info().chain.best_hash; + let header = client.header(BlockId::Hash(best_hash))?.ok_or("Header not found")?; let original_root = *header.state_root(); let trie = DbStateBuilder::::new(storage.clone(), original_root).build(); - info!("Preparing keys from block {}", block); + info!("Preparing keys from block {}", best_hash); // Load all KV pairs and randomly shuffle them. let mut kvs = trie.pairs(); let (mut rng, _) = new_rng(None); @@ -77,7 +77,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { (true, Some(info)) => { let child_keys = - client.child_storage_keys_iter(&block, info.clone(), None, None)?; + client.child_storage_keys_iter(&best_hash, info.clone(), None, None)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -124,7 +124,7 @@ impl StorageCmd { for (key, info) in child_nodes { if let Some(original_v) = client - .child_storage(&block, &info.clone(), &key) + .child_storage(&best_hash, &info.clone(), &key) .expect("Checked above to exist") { let mut new_v = vec![0; original_v.0.len()]; From 487ac5cf3958380a15b5edc599c13ddbb62e76b1 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 18 Oct 2022 17:41:19 +0200 Subject: [PATCH 257/348] Use `ArgAction::Set` for enable-offchain-indexing flag (#12521) * Use ArgAction::Set for enable-offchain-indexing * Provide default value for `enable-offchain-indexing` --- client/cli/src/params/offchain_worker_params.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/cli/src/params/offchain_worker_params.rs b/client/cli/src/params/offchain_worker_params.rs index 8cff48fad600c..a660320fde255 100644 --- a/client/cli/src/params/offchain_worker_params.rs +++ b/client/cli/src/params/offchain_worker_params.rs @@ -23,7 +23,7 @@ //! targeted at handling input parameter parsing providing //! a reasonable abstraction. -use clap::Args; +use clap::{ArgAction, Args}; use sc_network::config::Role; use sc_service::config::OffchainWorkerConfig; @@ -48,7 +48,7 @@ pub struct OffchainWorkerParams { /// /// Enables a runtime to write directly to a offchain workers /// DB during block import. - #[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING")] + #[arg(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING", default_value_t = false, action = ArgAction::Set)] pub indexing_enabled: bool, } From 49734dd1d72a00b9d3b87ba397661a63e0e17af3 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 18 Oct 2022 18:39:19 +0200 Subject: [PATCH 258/348] frame/utils: introduce `substrate-rpc-client` crate for RPC utils (#12212) * hack together a PoC * Update utils/frame/rpc-utils/Cargo.toml Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update utils/frame/rpc-utils/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * rpc_utils -> substrate_rpc_client * try runtime: remove keep connection * make CI happy * cargo fmt * fix ci * update lock file * fix * fix Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: kianenigma --- Cargo.lock | 20 +- Cargo.toml | 1 + bin/node/cli/Cargo.toml | 2 +- bin/node/cli/tests/common.rs | 9 +- frame/bags-list/remote-tests/src/migration.rs | 11 +- frame/bags-list/remote-tests/src/snapshot.rs | 12 +- frame/bags-list/remote-tests/src/try_state.rs | 11 +- frame/state-trie-migration/src/lib.rs | 11 +- utils/frame/remote-externalities/Cargo.toml | 4 +- utils/frame/remote-externalities/src/lib.rs | 137 ++++----- .../frame/remote-externalities/src/rpc_api.rs | 149 ---------- utils/frame/rpc/client/Cargo.toml | 25 ++ utils/frame/rpc/client/src/lib.rs | 265 ++++++++++++++++++ utils/frame/try-runtime/cli/Cargo.toml | 2 +- .../cli/src/commands/execute_block.rs | 20 +- .../cli/src/commands/follow_chain.rs | 236 ++-------------- .../cli/src/commands/offchain_worker.rs | 9 +- .../cli/src/commands/on_runtime_upgrade.rs | 1 + utils/frame/try-runtime/cli/src/lib.rs | 9 +- 19 files changed, 437 insertions(+), 497 deletions(-) delete mode 100644 utils/frame/remote-externalities/src/rpc_api.rs create mode 100644 utils/frame/rpc/client/Cargo.toml create mode 100644 utils/frame/rpc/client/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 7ea4e3e5c048e..397846693e907 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4503,7 +4503,6 @@ dependencies = [ "platforms", "rand 0.8.5", "regex", - "remote-externalities", "sc-authority-discovery", "sc-basic-authorship", "sc-block-builder", @@ -4551,6 +4550,7 @@ dependencies = [ "sp-trie", "substrate-build-script-utils", "substrate-frame-cli", + "substrate-rpc-client", "tempfile", "tokio", "try-runtime-cli", @@ -7278,7 +7278,6 @@ version = "0.10.0-dev" dependencies = [ "env_logger", "frame-support", - "jsonrpsee", "log", "pallet-elections-phragmen", "parity-scale-codec", @@ -7288,6 +7287,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-version", + "substrate-rpc-client", "tokio", ] @@ -10349,6 +10349,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "substrate-rpc-client" +version = "0.10.0-dev" +dependencies = [ + "async-trait", + "jsonrpsee", + "log", + "sc-rpc-api", + "serde", + "sp-core", + "sp-runtime", + "tokio", +] + [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" @@ -11028,7 +11042,6 @@ version = "0.10.0-dev" dependencies = [ "clap 4.0.11", "frame-try-runtime", - "jsonrpsee", "log", "parity-scale-codec", "remote-externalities", @@ -11045,6 +11058,7 @@ dependencies = [ "sp-state-machine", "sp-version", "sp-weights", + "substrate-rpc-client", "tokio", "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index e203cbbee7e0d..d3c801fc2c7be 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -232,6 +232,7 @@ members = [ "utils/frame/rpc/system", "utils/frame/generate-bags", "utils/frame/generate-bags/node-runtime", + "utils/frame/rpc/client", "utils/prometheus", "utils/wasm-builder", ] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 25e7ef095ed3e..4bf991f49320c 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -132,7 +132,7 @@ soketto = "0.7.1" criterion = { version = "0.3.5", features = ["async_tokio"] } tokio = { version = "1.17.0", features = ["macros", "time", "parking_lot"] } wait-timeout = "0.2" -remote-externalities = { path = "../../../utils/frame/remote-externalities" } +substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } [build-dependencies] diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 3b83f4339611d..358c09779d59a 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -23,8 +23,7 @@ use nix::{ sys::signal::{kill, Signal::SIGINT}, unistd::Pid, }; -use node_primitives::Block; -use remote_externalities::rpc_api::RpcService; +use node_primitives::{Hash, Header}; use std::{ io::{BufRead, BufReader, Read}, ops::{Deref, DerefMut}, @@ -69,12 +68,14 @@ pub async fn wait_n_finalized_blocks( /// Wait for at least n blocks to be finalized from a specified node pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { + use substrate_rpc_client::{ws_client, ChainApi}; + let mut built_blocks = std::collections::HashSet::new(); let mut interval = tokio::time::interval(Duration::from_secs(2)); - let rpc_service = RpcService::new(url, false).await.unwrap(); + let rpc = ws_client(url).await.unwrap(); loop { - if let Ok(block) = rpc_service.get_finalized_head::().await { + if let Ok(block) = ChainApi::<(), Hash, Header, ()>::finalized_head(&rpc).await { built_blocks.insert(block); if built_blocks.len() > n { break diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs index 675dfbe072670..b013472b4c90e 100644 --- a/frame/bags-list/remote-tests/src/migration.rs +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -24,14 +24,15 @@ use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Test voter bags migration. `currency_unit` is the number of planks per the the runtimes `UNITS` /// (i.e. number of decimal places per DOT, KSM etc) -pub async fn execute< - Runtime: RuntimeT, - Block: BlockT + DeserializeOwned, ->( +pub async fn execute( currency_unit: u64, currency_name: &'static str, ws_url: String, -) { +) where + Runtime: RuntimeT, + Block: BlockT, + Block::Header: DeserializeOwned, +{ let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index 655de10c4af9b..cfe065924bd92 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -22,14 +22,12 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute create a snapshot from pallet-staking. -pub async fn execute< +pub async fn execute(voter_limit: Option, currency_unit: u64, ws_url: String) +where Runtime: crate::RuntimeT, - Block: BlockT + DeserializeOwned, ->( - voter_limit: Option, - currency_unit: u64, - ws_url: String, -) { + Block: BlockT, + Block::Header: DeserializeOwned, +{ use frame_support::storage::generator::StorageMap; let mut ext = Builder::::new() diff --git a/frame/bags-list/remote-tests/src/try_state.rs b/frame/bags-list/remote-tests/src/try_state.rs index 9817ef4ceb9e4..d3fb63f045a64 100644 --- a/frame/bags-list/remote-tests/src/try_state.rs +++ b/frame/bags-list/remote-tests/src/try_state.rs @@ -25,14 +25,15 @@ use remote_externalities::{Builder, Mode, OnlineConfig}; use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute the sanity check of the bags-list. -pub async fn execute< - Runtime: crate::RuntimeT, - Block: BlockT + DeserializeOwned, ->( +pub async fn execute( currency_unit: u64, currency_name: &'static str, ws_url: String, -) { +) where + Runtime: crate::RuntimeT, + Block: BlockT, + Block::Header: DeserializeOwned, +{ let mut ext = Builder::::new() .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 3f81d3c211a95..5255d4f6f3800 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1640,13 +1640,12 @@ pub(crate) mod remote_tests { /// /// This will print some very useful statistics, make sure [`crate::LOG_TARGET`] is enabled. #[allow(dead_code)] - pub(crate) async fn run_with_limits< + pub(crate) async fn run_with_limits(limits: MigrationLimits, mode: Mode) + where Runtime: crate::Config, - Block: BlockT + serde::de::DeserializeOwned, - >( - limits: MigrationLimits, - mode: Mode, - ) { + Block: BlockT, + Block::Header: serde::de::DeserializeOwned, + { let mut ext = remote_externalities::Builder::::new() .mode(mode) .state_version(sp_core::storage::StateVersion::V0) diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 3121157df68d8..3d7471bf4d680 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "An externalities provided environemnt that can load itself from remote nodes or cache files" +description = "An externalities provided environment that can load itself from remote nodes or cached files" readme = "README.md" [package.metadata.docs.rs] @@ -15,7 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } env_logger = "0.9" -jsonrpsee = { version = "0.15.1", features = ["ws-client", "macros"] } log = "0.4.17" serde = "1.0.136" serde_json = "1.0" @@ -24,6 +23,7 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-io = { version = "6.0.0", path = "../../../primitives/io" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-version = { version = "5.0.0", path = "../../../primitives/version" } +substrate-rpc-client = { path = "../rpc/client" } [dev-dependencies] tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread"] } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 83481e745f5ee..86cfc767bf3b5 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -22,13 +22,6 @@ use codec::{Decode, Encode}; -use jsonrpsee::{ - core::{client::ClientT, Error as RpcError}, - proc_macros::rpc, - rpc_params, - ws_client::{WsClient, WsClientBuilder}, -}; - use log::*; use serde::de::DeserializeOwned; use sp_core::{ @@ -46,8 +39,7 @@ use std::{ path::{Path, PathBuf}, sync::Arc, }; - -pub mod rpc_api; +use substrate_rpc_client::{rpc_params, ws_client, ChainApi, ClientT, StateApi, WsClient}; type KeyValue = (StorageKey, StorageData); type TopKeyValues = Vec; @@ -58,40 +50,6 @@ const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io:443"; const BATCH_SIZE: usize = 1000; const PAGE: u32 = 1000; -#[rpc(client)] -pub trait RpcApi { - #[method(name = "childstate_getKeys")] - fn child_get_keys( - &self, - child_key: PrefixedStorageKey, - prefix: StorageKey, - hash: Option, - ) -> Result, RpcError>; - - #[method(name = "childstate_getStorage")] - fn child_get_storage( - &self, - child_key: PrefixedStorageKey, - prefix: StorageKey, - hash: Option, - ) -> Result; - - #[method(name = "state_getStorage")] - fn get_storage(&self, prefix: StorageKey, hash: Option) -> Result; - - #[method(name = "state_getKeysPaged")] - fn get_keys_paged( - &self, - prefix: Option, - count: u32, - start_key: Option, - hash: Option, - ) -> Result, RpcError>; - - #[method(name = "chain_getFinalizedHead")] - fn finalized_head(&self) -> Result; -} - /// The execution mode. #[derive(Clone)] pub enum Mode { @@ -140,14 +98,10 @@ impl Transport { if let Self::Uri(uri) = self { log::debug!(target: LOG_TARGET, "initializing remote client to {:?}", uri); - let ws_client = WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .build(&uri) - .await - .map_err(|e| { - log::error!(target: LOG_TARGET, "error: {:?}", e); - "failed to build ws client" - })?; + let ws_client = ws_client(uri).await.map_err(|e| { + log::error!(target: LOG_TARGET, "error: {:?}", e); + "failed to build ws client" + })?; *self = Self::RemoteClient(Arc::new(ws_client)) } @@ -258,7 +212,7 @@ pub struct Builder { // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for // that. -impl Default for Builder { +impl Default for Builder { fn default() -> Self { Self { mode: Default::default(), @@ -272,7 +226,7 @@ impl Default for Builder { } // Mode methods -impl Builder { +impl Builder { fn as_online(&self) -> &OnlineConfig { match &self.mode { Mode::Online(config) => config, @@ -291,26 +245,38 @@ impl Builder { } // RPC methods -impl Builder { +impl Builder +where + B::Hash: DeserializeOwned, + B::Header: DeserializeOwned, +{ async fn rpc_get_storage( &self, key: StorageKey, maybe_at: Option, ) -> Result { trace!(target: LOG_TARGET, "rpc: get_storage"); - self.as_online().rpc_client().get_storage(key, maybe_at).await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc get_storage failed." - }) + match self.as_online().rpc_client().storage(key, maybe_at).await { + Ok(Some(res)) => Ok(res), + Ok(None) => Err("get_storage not found"), + Err(e) => { + error!(target: LOG_TARGET, "Error = {:?}", e); + Err("rpc get_storage failed.") + }, + } } /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); - self.as_online().rpc_client().finalized_head().await.map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc finalized_head failed." - }) + + // sadly this pretty much unreadable... + ChainApi::<(), _, B::Header, ()>::finalized_head(self.as_online().rpc_client()) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc finalized_head failed." + }) } /// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods. @@ -325,7 +291,7 @@ impl Builder { let page = self .as_online() .rpc_client() - .get_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at)) + .storage_keys_paged(Some(prefix.clone()), PAGE, last_key.clone(), Some(at)) .await .map_err(|e| { error!(target: LOG_TARGET, "Error = {:?}", e); @@ -471,19 +437,19 @@ impl Builder { child_prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - let child_keys = self - .as_online() - .rpc_client() - .child_get_keys( - PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), - child_prefix, - Some(at), - ) - .await - .map_err(|e| { - error!(target: LOG_TARGET, "Error = {:?}", e); - "rpc child_get_keys failed." - })?; + // This is deprecated and will generate a warning which causes the CI to fail. + #[allow(warnings)] + let child_keys = substrate_rpc_client::ChildStateApi::storage_keys( + self.as_online().rpc_client(), + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_prefix, + Some(at), + ) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; debug!( target: LOG_TARGET, @@ -497,7 +463,11 @@ impl Builder { } // Internal methods -impl Builder { +impl Builder +where + B::Hash: DeserializeOwned, + B::Header: DeserializeOwned, +{ /// Save the given data to the top keys snapshot. fn save_top_snapshot(&self, data: &[KeyValue], path: &PathBuf) -> Result<(), &'static str> { let mut path = path.clone(); @@ -726,12 +696,13 @@ impl Builder { let child_kv = match self.mode.clone() { Mode::Online(_) => self.load_child_remote_and_maybe_save(&top_kv).await?, - Mode::OfflineOrElseOnline(offline_config, _) => + Mode::OfflineOrElseOnline(offline_config, _) => { if let Ok(kv) = self.load_child_snapshot(&offline_config.state_snapshot.path) { kv } else { self.load_child_remote_and_maybe_save(&top_kv).await? - }, + } + }, Mode::Offline(ref config) => self .load_child_snapshot(&config.state_snapshot.path) .map_err(|why| { @@ -749,7 +720,7 @@ impl Builder { } // Public methods -impl Builder { +impl Builder { /// Create a new builder. pub fn new() -> Self { Default::default() @@ -824,7 +795,13 @@ impl Builder { } self } +} +// Public methods +impl Builder +where + B::Header: DeserializeOwned, +{ /// Build the test externalities. pub async fn build(self) -> Result { let state_version = self.state_version; diff --git a/utils/frame/remote-externalities/src/rpc_api.rs b/utils/frame/remote-externalities/src/rpc_api.rs deleted file mode 100644 index 3ea30a30221a2..0000000000000 --- a/utils/frame/remote-externalities/src/rpc_api.rs +++ /dev/null @@ -1,149 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2021-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! WS RPC API for one off RPC calls to a substrate node. -// TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 - -use jsonrpsee::{ - core::client::{Client, ClientT}, - rpc_params, - types::ParamsSer, - ws_client::{WsClient, WsClientBuilder}, -}; -use serde::de::DeserializeOwned; -use sp_runtime::{generic::SignedBlock, traits::Block as BlockT}; -use std::sync::Arc; - -enum RpcCall { - GetHeader, - GetFinalizedHead, - GetBlock, - GetRuntimeVersion, -} - -impl RpcCall { - fn as_str(&self) -> &'static str { - match self { - RpcCall::GetHeader => "chain_getHeader", - RpcCall::GetFinalizedHead => "chain_getFinalizedHead", - RpcCall::GetBlock => "chain_getBlock", - RpcCall::GetRuntimeVersion => "state_getRuntimeVersion", - } - } -} - -/// General purpose method for making RPC calls. -async fn make_request<'a, T: DeserializeOwned>( - client: &Arc, - call: RpcCall, - params: Option>, -) -> Result { - client - .request::(call.as_str(), params) - .await - .map_err(|e| format!("{} request failed: {:?}", call.as_str(), e)) -} - -enum ConnectionPolicy { - Reuse(Arc), - Reconnect, -} - -/// Simple RPC service that is capable of keeping the connection. -/// -/// Service will connect to `uri` for the first time already during initialization. -/// -/// Be careful with reusing the connection in a multithreaded environment. -pub struct RpcService { - uri: String, - policy: ConnectionPolicy, -} - -impl RpcService { - /// Creates a new RPC service. If `keep_connection`, then connects to `uri` right away. - pub async fn new>(uri: S, keep_connection: bool) -> Result { - let policy = if keep_connection { - ConnectionPolicy::Reuse(Arc::new(Self::build_client(uri.as_ref()).await?)) - } else { - ConnectionPolicy::Reconnect - }; - Ok(Self { uri: uri.as_ref().to_string(), policy }) - } - - /// Returns the address at which requests are sent. - pub fn uri(&self) -> String { - self.uri.clone() - } - - /// Build a websocket client that connects to `self.uri`. - async fn build_client>(uri: S) -> Result { - WsClientBuilder::default() - .max_request_body_size(u32::MAX) - .build(uri) - .await - .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) - } - - /// Generic method for making RPC requests. - async fn make_request<'a, T: DeserializeOwned>( - &self, - call: RpcCall, - params: Option>, - ) -> Result { - match self.policy { - // `self.keep_connection` must have been `true`. - ConnectionPolicy::Reuse(ref client) => make_request(client, call, params).await, - ConnectionPolicy::Reconnect => { - let client = Arc::new(Self::build_client(&self.uri).await?); - make_request(&client, call, params).await - }, - } - } - - /// Get the header of the block identified by `at`. - pub async fn get_header(&self, at: Block::Hash) -> Result - where - Block: BlockT, - Block::Header: DeserializeOwned, - { - self.make_request(RpcCall::GetHeader, rpc_params!(at)).await - } - - /// Get the finalized head. - pub async fn get_finalized_head(&self) -> Result { - self.make_request(RpcCall::GetFinalizedHead, None).await - } - - /// Get the signed block identified by `at`. - pub async fn get_block( - &self, - at: Block::Hash, - ) -> Result { - Ok(self - .make_request::>(RpcCall::GetBlock, rpc_params!(at)) - .await? - .block) - } - - /// Get the runtime version of a given chain. - pub async fn get_runtime_version( - &self, - at: Option, - ) -> Result { - self.make_request(RpcCall::GetRuntimeVersion, rpc_params!(at)).await - } -} diff --git a/utils/frame/rpc/client/Cargo.toml b/utils/frame/rpc/client/Cargo.toml new file mode 100644 index 0000000000000..80aa60f199f1f --- /dev/null +++ b/utils/frame/rpc/client/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "substrate-rpc-client" +version = "0.10.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Shared JSON-RPC client" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +jsonrpsee = { version = "0.15.1", features = ["ws-client"] } +sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } +async-trait = "0.1.57" +serde = "1" +sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } +log = "0.4" + +[dev-dependencies] +tokio = { version = "1.17.0", features = ["macros", "rt-multi-thread", "sync"] } +sp-core = { path = "../../../../primitives/core" } \ No newline at end of file diff --git a/utils/frame/rpc/client/src/lib.rs b/utils/frame/rpc/client/src/lib.rs new file mode 100644 index 0000000000000..254cc193c0e67 --- /dev/null +++ b/utils/frame/rpc/client/src/lib.rs @@ -0,0 +1,265 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Shared JSON-RPC client related code and abstractions. +//! +//! It exposes a `WebSocket JSON-RPC` client that implements the RPC interface in [`sc-rpc-api`] +//! along with some abstractions. +//! +//! ## Usage +//! +//! ```no_run +//! # use substrate_rpc_client::{ws_client, StateApi}; +//! # use sp_core::{H256, storage::StorageKey}; +//! +//! #[tokio::main] +//! async fn main() { +//! +//! let client = ws_client("ws://127.0.0.1:9944").await.unwrap(); +//! client.storage(StorageKey(vec![]), Some(H256::zero())).await.unwrap(); +//! +//! // if all type params are not known you need to provide type params +//! StateApi::::storage(&client, StorageKey(vec![]), None).await.unwrap(); +//! } +//! ``` + +use async_trait::async_trait; +use serde::de::DeserializeOwned; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use std::collections::VecDeque; + +pub use jsonrpsee::{ + core::client::{ClientT, Subscription, SubscriptionClientT}, + rpc_params, + ws_client::{WsClient, WsClientBuilder}, +}; +pub use sc_rpc_api::{ + author::AuthorApiClient as AuthorApi, chain::ChainApiClient as ChainApi, + child_state::ChildStateApiClient as ChildStateApi, dev::DevApiClient as DevApi, + offchain::OffchainApiClient as OffchainApi, state::StateApiClient as StateApi, + system::SystemApiClient as SystemApi, +}; + +/// Create a new `WebSocket` connection with shared settings. +pub async fn ws_client(uri: impl AsRef) -> Result { + WsClientBuilder::default() + .max_request_body_size(u32::MAX) + .request_timeout(std::time::Duration::from_secs(60 * 10)) + .connection_timeout(std::time::Duration::from_secs(60)) + .max_notifs_per_subscription(1024) + .build(uri) + .await + .map_err(|e| format!("`WsClientBuilder` failed to build: {:?}", e)) +} + +/// Abstraction over RPC calling for headers. +#[async_trait] +pub trait HeaderProvider +where + Block::Header: HeaderT, +{ + /// Awaits for the header of the block with hash `hash`. + async fn get_header(&self, hash: Block::Hash) -> Block::Header; +} + +#[async_trait] +impl HeaderProvider for WsClient +where + Block::Header: DeserializeOwned, +{ + async fn get_header(&self, hash: Block::Hash) -> Block::Header { + ChainApi::<(), Block::Hash, Block::Header, ()>::header(self, Some(hash)) + .await + .unwrap() + .unwrap() + } +} + +/// Abstraction over RPC subscription for finalized headers. +#[async_trait] +pub trait HeaderSubscription +where + Block::Header: HeaderT, +{ + /// Await for the next finalized header from the subscription. + /// + /// Returns `None` if either the subscription has been closed or there was an error when reading + /// an object from the client. + async fn next_header(&mut self) -> Option; +} + +#[async_trait] +impl HeaderSubscription for Subscription +where + Block::Header: DeserializeOwned, +{ + async fn next_header(&mut self) -> Option { + match self.next().await { + Some(Ok(header)) => Some(header), + None => { + log::warn!("subscription closed"); + None + }, + Some(Err(why)) => { + log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); + None + }, + } + } +} + +/// Stream of all finalized headers. +/// +/// Returned headers are guaranteed to be ordered. There are no missing headers (even if some of +/// them lack justification). +pub struct FinalizedHeaders< + 'a, + Block: BlockT, + HP: HeaderProvider, + HS: HeaderSubscription, +> { + header_provider: &'a HP, + subscription: HS, + fetched_headers: VecDeque, + last_returned: Option<::Hash>, +} + +impl<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> + FinalizedHeaders<'a, Block, HP, HS> +where + ::Header: DeserializeOwned, +{ + pub fn new(header_provider: &'a HP, subscription: HS) -> Self { + Self { + header_provider, + subscription, + fetched_headers: VecDeque::new(), + last_returned: None, + } + } + + /// Reads next finalized header from the subscription. If some headers (without justification) + /// have been skipped, fetches them as well. Returns number of headers that have been fetched. + /// + /// All fetched headers are stored in `self.fetched_headers`. + async fn fetch(&mut self) -> usize { + let last_finalized = match self.subscription.next_header().await { + Some(header) => header, + None => return 0, + }; + + self.fetched_headers.push_front(last_finalized.clone()); + + let mut last_finalized_parent = *last_finalized.parent_hash(); + let last_returned = self.last_returned.unwrap_or(last_finalized_parent); + + while last_finalized_parent != last_returned { + let parent_header = self.header_provider.get_header(last_finalized_parent).await; + self.fetched_headers.push_front(parent_header.clone()); + last_finalized_parent = *parent_header.parent_hash(); + } + + self.fetched_headers.len() + } + + /// Get the next finalized header. + pub async fn next(&mut self) -> Option { + if self.fetched_headers.is_empty() { + self.fetch().await; + } + + if let Some(header) = self.fetched_headers.pop_front() { + self.last_returned = Some(header.hash()); + Some(header) + } else { + None + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header, H256}; + use std::sync::Arc; + use tokio::sync::Mutex; + + type Block = TBlock>; + type BlockNumber = u64; + type Hash = H256; + + struct MockHeaderProvider(pub Arc>>); + + fn headers() -> Vec

{ + let mut headers = vec![Header::new_from_number(0)]; + for n in 1..11 { + headers.push(Header { + parent_hash: headers.last().unwrap().hash(), + ..Header::new_from_number(n) + }) + } + headers + } + + #[async_trait] + impl HeaderProvider for MockHeaderProvider { + async fn get_header(&self, _hash: Hash) -> Header { + let height = self.0.lock().await.pop_front().unwrap(); + headers()[height as usize].clone() + } + } + + struct MockHeaderSubscription(pub VecDeque); + + #[async_trait] + impl HeaderSubscription for MockHeaderSubscription { + async fn next_header(&mut self) -> Option
{ + self.0.pop_front().map(|h| headers()[h as usize].clone()) + } + } + + #[tokio::test] + async fn finalized_headers_works_when_every_block_comes_from_subscription() { + let heights = vec![4, 5, 6, 7]; + + let provider = MockHeaderProvider(Default::default()); + let subscription = MockHeaderSubscription(heights.clone().into()); + let mut headers = FinalizedHeaders::new(&provider, subscription); + + for h in heights { + assert_eq!(h, headers.next().await.unwrap().number); + } + assert_eq!(None, headers.next().await); + } + + #[tokio::test] + async fn finalized_headers_come_from_subscription_and_provider_if_in_need() { + let all_heights = 3..11; + let heights_in_subscription = vec![3, 4, 6, 10]; + // Consecutive headers will be requested in the reversed order. + let heights_not_in_subscription = vec![5, 9, 8, 7]; + + let provider = MockHeaderProvider(Arc::new(Mutex::new(heights_not_in_subscription.into()))); + let subscription = MockHeaderSubscription(heights_in_subscription.into()); + let mut headers = FinalizedHeaders::new(&provider, subscription); + + for h in all_heights { + assert_eq!(h, headers.next().await.unwrap().number); + } + assert_eq!(None, headers.next().await); + } +} diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 8a9d5a5f8979a..24d64d4aa4373 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -19,7 +19,6 @@ parity-scale-codec = "3.0.0" serde = "1.0.136" zstd = { version = "0.11.2", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } -jsonrpsee = { version = "0.15.1", default-features = false, features = ["ws-client"] } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" } @@ -33,6 +32,7 @@ sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-ma sp-version = { version = "5.0.0", path = "../../../../primitives/version" } sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } frame-try-runtime = { path = "../../../../frame/try-runtime" } +substrate-rpc-client = { path = "../../rpc/client" } [dev-dependencies] tokio = "1.17.0" diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index df8c51b5a92d4..56d88b9cb8919 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -20,11 +20,11 @@ use crate::{ state_machine_call_with_proof, SharedParams, State, LOG_TARGET, }; use parity_scale_codec::Encode; -use remote_externalities::rpc_api; use sc_service::{Configuration, NativeExecutionDispatch}; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::{fmt::Debug, str::FromStr}; +use substrate_rpc_client::{ws_client, ChainApi}; /// Configurations of the [`Command::ExecuteBlock`]. /// @@ -84,10 +84,11 @@ pub struct ExecuteBlockCmd { impl ExecuteBlockCmd { async fn block_at(&self, ws_uri: String) -> sc_cli::Result where - Block::Hash: FromStr, + Block::Hash: FromStr + serde::de::DeserializeOwned, ::Err: Debug, + Block::Header: serde::de::DeserializeOwned, { - let rpc_service = rpc_api::RpcService::new(ws_uri, false).await?; + let rpc = ws_client(&ws_uri).await?; match (&self.block_at, &self.state) { (Some(block_at), State::Snap { .. }) => hash_of::(block_at), @@ -100,7 +101,9 @@ impl ExecuteBlockCmd { target: LOG_TARGET, "No --block-at or --at provided, using the latest finalized block instead" ); - rpc_service.get_finalized_head::().await.map_err(Into::into) + ChainApi::<(), Block::Hash, Block::Header, ()>::finalized_head(&rpc) + .await + .map_err(|e| e.to_string().into()) }, (None, State::Live { at: Some(at), .. }) => hash_of::(at), _ => { @@ -137,6 +140,8 @@ where Block: BlockT + serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, + Block::Hash: serde::de::DeserializeOwned, + Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, @@ -146,8 +151,11 @@ where let block_ws_uri = command.block_ws_uri::(); let block_at = command.block_at::(block_ws_uri.clone()).await?; - let rpc_service = rpc_api::RpcService::new(block_ws_uri.clone(), false).await?; - let block: Block = rpc_service.get_block::(block_at).await?; + let rpc = ws_client(&block_ws_uri).await?; + let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(block_at)) + .await + .unwrap() + .unwrap(); let parent_hash = block.header().parent_hash(); log::info!( target: LOG_TARGET, diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index df6d40b2b8ab1..1cc371c8f22fd 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -19,22 +19,15 @@ use crate::{ build_executor, ensure_matching_spec, extract_code, full_extensions, local_spec, parse, state_machine_call_with_proof, SharedParams, LOG_TARGET, }; -use jsonrpsee::{ - core::{ - async_trait, - client::{Client, Subscription, SubscriptionClientT}, - }, - ws_client::WsClientBuilder, -}; use parity_scale_codec::{Decode, Encode}; -use remote_externalities::{rpc_api::RpcService, Builder, Mode, OnlineConfig}; +use remote_externalities::{Builder, Mode, OnlineConfig}; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; -use serde::de::DeserializeOwned; +use serde::{de::DeserializeOwned, Serialize}; use sp_core::H256; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_weights::Weight; -use std::{collections::VecDeque, fmt::Debug, str::FromStr}; +use std::{fmt::Debug, str::FromStr}; +use substrate_rpc_client::{ws_client, ChainApi, FinalizedHeaders, Subscription, WsClient}; const SUB: &str = "chain_subscribeFinalizedHeads"; const UN_SUB: &str = "chain_unsubscribeFinalizedHeads"; @@ -71,144 +64,19 @@ pub struct FollowChainCmd { /// /// Returns a pair `(client, subscription)` - `subscription` alone will be useless, because it /// relies on the related alive `client`. -async fn start_subscribing( +async fn start_subscribing( url: &str, -) -> sc_cli::Result<(Client, Subscription
)> { - let client = WsClientBuilder::default() - .connection_timeout(std::time::Duration::new(20, 0)) - .max_notifs_per_subscription(1024) - .max_request_body_size(u32::MAX) - .build(url) - .await - .map_err(|e| sc_cli::Error::Application(e.into()))?; +) -> sc_cli::Result<(WsClient, Subscription
)> { + let client = ws_client(url).await.map_err(|e| sc_cli::Error::Application(e.into()))?; log::info!(target: LOG_TARGET, "subscribing to {:?} / {:?}", SUB, UN_SUB); - let sub = client - .subscribe(SUB, None, UN_SUB) + let sub = ChainApi::<(), (), Header, ()>::subscribe_finalized_heads(&client) .await .map_err(|e| sc_cli::Error::Application(e.into()))?; Ok((client, sub)) } -/// Abstraction over RPC calling for headers. -#[async_trait] -trait HeaderProvider -where - Block::Header: HeaderT, -{ - /// Awaits for the header of the block with hash `hash`. - async fn get_header(&self, hash: Block::Hash) -> Block::Header; -} - -#[async_trait] -impl HeaderProvider for RpcService -where - Block::Header: DeserializeOwned, -{ - async fn get_header(&self, hash: Block::Hash) -> Block::Header { - self.get_header::(hash).await.unwrap() - } -} - -/// Abstraction over RPC subscription for finalized headers. -#[async_trait] -trait HeaderSubscription -where - Block::Header: HeaderT, -{ - /// Await for the next finalized header from the subscription. - /// - /// Returns `None` if either the subscription has been closed or there was an error when reading - /// an object from the client. - async fn next_header(&mut self) -> Option; -} - -#[async_trait] -impl HeaderSubscription for Subscription -where - Block::Header: DeserializeOwned, -{ - async fn next_header(&mut self) -> Option { - match self.next().await { - Some(Ok(header)) => Some(header), - None => { - log::warn!("subscription closed"); - None - }, - Some(Err(why)) => { - log::warn!("subscription returned error: {:?}. Probably decoding has failed.", why); - None - }, - } - } -} - -/// Stream of all finalized headers. -/// -/// Returned headers are guaranteed to be ordered. There are no missing headers (even if some of -/// them lack justification). -struct FinalizedHeaders<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> -{ - header_provider: &'a HP, - subscription: HS, - fetched_headers: VecDeque, - last_returned: Option<::Hash>, -} - -impl<'a, Block: BlockT, HP: HeaderProvider, HS: HeaderSubscription> - FinalizedHeaders<'a, Block, HP, HS> -where - ::Header: DeserializeOwned, -{ - pub fn new(header_provider: &'a HP, subscription: HS) -> Self { - Self { - header_provider, - subscription, - fetched_headers: VecDeque::new(), - last_returned: None, - } - } - - /// Reads next finalized header from the subscription. If some headers (without justification) - /// have been skipped, fetches them as well. Returns number of headers that have been fetched. - /// - /// All fetched headers are stored in `self.fetched_headers`. - async fn fetch(&mut self) -> usize { - let last_finalized = match self.subscription.next_header().await { - Some(header) => header, - None => return 0, - }; - - self.fetched_headers.push_front(last_finalized.clone()); - - let mut last_finalized_parent = *last_finalized.parent_hash(); - let last_returned = self.last_returned.unwrap_or(last_finalized_parent); - - while last_finalized_parent != last_returned { - let parent_header = self.header_provider.get_header(last_finalized_parent).await; - self.fetched_headers.push_front(parent_header.clone()); - last_finalized_parent = *parent_header.parent_hash(); - } - - self.fetched_headers.len() - } - - /// Get the next finalized header. - pub async fn next(&mut self) -> Option { - if self.fetched_headers.is_empty() { - self.fetch().await; - } - - if let Some(header) = self.fetched_headers.pop_front() { - self.last_returned = Some(header.hash()); - Some(header) - } else { - None - } - } -} - pub(crate) async fn follow_chain( shared: SharedParams, command: FollowChainCmd, @@ -224,22 +92,23 @@ where ExecDispatch: NativeExecutionDispatch + 'static, { let mut maybe_state_ext = None; - let (_client, subscription) = start_subscribing::(&command.uri).await?; + let (rpc, subscription) = start_subscribing::(&command.uri).await?; let (code_key, code) = extract_code(&config.chain_spec)?; let executor = build_executor::(&shared, &config); let execution = shared.execution; - let rpc_service = RpcService::new(&command.uri, command.keep_connection).await?; - - let mut finalized_headers: FinalizedHeaders> = - FinalizedHeaders::new(&rpc_service, subscription); + let mut finalized_headers: FinalizedHeaders = + FinalizedHeaders::new(&rpc, subscription); while let Some(header) = finalized_headers.next().await { let hash = header.hash(); let number = header.number(); - let block = rpc_service.get_block::(hash).await.unwrap(); + let block: Block = ChainApi::<(), Block::Hash, Block::Header, _>::block(&rpc, Some(hash)) + .await + .unwrap() + .unwrap(); log::debug!( target: LOG_TARGET, @@ -295,7 +164,7 @@ where full_extensions(), )?; - let consumed_weight = ::decode(&mut &*encoded_result) + let consumed_weight = ::decode(&mut &*encoded_result) .map_err(|e| format!("failed to decode weight: {:?}", e))?; let storage_changes = changes @@ -326,76 +195,3 @@ where log::error!(target: LOG_TARGET, "ws subscription must have terminated."); Ok(()) } - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::testing::{Block as TBlock, ExtrinsicWrapper, Header}; - use std::sync::Arc; - use tokio::sync::Mutex; - - type Block = TBlock>; - type BlockNumber = u64; - type Hash = H256; - - struct MockHeaderProvider(pub Arc>>); - - fn headers() -> Vec
{ - let mut headers = vec![Header::new_from_number(0)]; - for n in 1..11 { - headers.push(Header { - parent_hash: headers.last().unwrap().hash(), - ..Header::new_from_number(n) - }) - } - headers - } - - #[async_trait] - impl HeaderProvider for MockHeaderProvider { - async fn get_header(&self, _hash: Hash) -> Header { - let height = self.0.lock().await.pop_front().unwrap(); - headers()[height as usize].clone() - } - } - - struct MockHeaderSubscription(pub VecDeque); - - #[async_trait] - impl HeaderSubscription for MockHeaderSubscription { - async fn next_header(&mut self) -> Option
{ - self.0.pop_front().map(|h| headers()[h as usize].clone()) - } - } - - #[tokio::test] - async fn finalized_headers_works_when_every_block_comes_from_subscription() { - let heights = vec![4, 5, 6, 7]; - - let provider = MockHeaderProvider(Default::default()); - let subscription = MockHeaderSubscription(heights.clone().into()); - let mut headers = FinalizedHeaders::new(&provider, subscription); - - for h in heights { - assert_eq!(h, headers.next().await.unwrap().number); - } - assert_eq!(None, headers.next().await); - } - - #[tokio::test] - async fn finalized_headers_come_from_subscription_and_provider_if_in_need() { - let all_heights = 3..11; - let heights_in_subscription = vec![3, 4, 6, 10]; - // Consecutive headers will be requested in the reversed order. - let heights_not_in_subscription = vec![5, 9, 8, 7]; - - let provider = MockHeaderProvider(Arc::new(Mutex::new(heights_not_in_subscription.into()))); - let subscription = MockHeaderSubscription(heights_in_subscription.into()); - let mut headers = FinalizedHeaders::new(&provider, subscription); - - for h in all_heights { - assert_eq!(h, headers.next().await.unwrap().number); - } - assert_eq!(None, headers.next().await); - } -} diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 6f48d1f8f68a7..8d2585372b4a8 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -20,12 +20,12 @@ use crate::{ parse, state_machine_call, SharedParams, State, LOG_TARGET, }; use parity_scale_codec::Encode; -use remote_externalities::rpc_api; use sc_executor::NativeExecutionDispatch; use sc_service::Configuration; use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{fmt::Debug, str::FromStr}; +use substrate_rpc_client::{ws_client, ChainApi}; /// Configurations of the [`Command::OffchainWorker`]. #[derive(Debug, Clone, clap::Parser)] @@ -117,8 +117,11 @@ where let header_at = command.header_at::()?; let header_ws_uri = command.header_ws_uri::(); - let rpc_service = rpc_api::RpcService::new(header_ws_uri.clone(), false).await?; - let header = rpc_service.get_header::(header_at).await?; + let rpc = ws_client(&header_ws_uri).await?; + let header = ChainApi::<(), Block::Hash, Block::Header, ()>::header(&rpc, Some(header_at)) + .await + .unwrap() + .unwrap(); log::info!( target: LOG_TARGET, "fetched header from {:?}, block number: {:?}", diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 5e11123526532..fba34ddfb5060 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -45,6 +45,7 @@ where Block: BlockT + serde::de::DeserializeOwned, Block::Hash: FromStr, ::Err: Debug, + Block::Header: serde::de::DeserializeOwned, NumberFor: FromStr, as FromStr>::Err: Debug, ExecDispatch: NativeExecutionDispatch + 'static, diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 9034f356a9f67..0732c8618fc15 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -267,8 +267,7 @@ use parity_scale_codec::Decode; use remote_externalities::{ - rpc_api::RpcService, Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, - TestExternalities, + Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, }; use sc_chain_spec::ChainSpec; use sc_cli::{ @@ -297,6 +296,7 @@ use sp_runtime::{ use sp_state_machine::{OverlayedChanges, StateMachine, TrieBackendBuilder}; use sp_version::StateVersion; use std::{fmt::Debug, path::PathBuf, str::FromStr}; +use substrate_rpc_client::{ws_client, StateApi}; mod commands; pub(crate) mod parse; @@ -633,9 +633,8 @@ pub(crate) async fn ensure_matching_spec( expected_spec_version: u32, relaxed: bool, ) { - let rpc_service = RpcService::new(uri.clone(), false).await.unwrap(); - match rpc_service - .get_runtime_version::(None) + let rpc = ws_client(&uri).await.unwrap(); + match StateApi::::runtime_version(&rpc, None) .await .map(|version| (String::from(version.spec_name.clone()), version.spec_version)) .map(|(spec_name, spec_version)| (spec_name.to_lowercase(), spec_version)) From e791929a7b95b10f80ae0340bebd21de1e6bcf08 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 19 Oct 2022 12:13:57 +0200 Subject: [PATCH 259/348] BlockId removal: refactor: ProofProvider (#12519) * BlockId removal: refactor: ProofProvider It changes the arguments of methods of `ProofProvider` trait from: block: `BlockId` to: hash: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * LightClientRequestHandler: excessive BlockIdTo bound removed * imports cleanup * formatting * args tyeps cleanup --- client/api/src/proof_provider.rs | 16 ++-- .../src/light_client_requests/handler.rs | 76 +++++++++---------- .../network/sync/src/state_request_handler.rs | 6 +- client/rpc/src/state/state_full.rs | 4 +- client/service/src/client/client.rs | 22 +++--- 5 files changed, 57 insertions(+), 67 deletions(-) diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 3aad4af1befb5..4ddbf883b83f2 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -18,7 +18,7 @@ //! Proof utilities use crate::{CompactProof, StorageProof}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; use sp_storage::ChildInfo; @@ -27,7 +27,7 @@ pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, - id: &BlockId, + hash: &Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -35,7 +35,7 @@ pub trait ProofProvider { /// read proof. fn read_child_proof( &self, - id: &BlockId, + hash: &Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -46,12 +46,12 @@ pub trait ProofProvider { /// No changes are made. fn execution_proof( &self, - id: &BlockId, + hash: &Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; - /// Given a `BlockId` iterate over all storage values starting at `start_keys`. + /// Given a `Hash` iterate over all storage values starting at `start_keys`. /// Last `start_keys` element contains last accessed key value. /// With multiple `start_keys`, first `start_keys` element is /// the current storage key of of the last accessed child trie. @@ -61,12 +61,12 @@ pub trait ProofProvider { /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, - id: &BlockId, + hash: &Block::Hash, start_keys: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)>; - /// Given a `BlockId` iterate over all storage values starting at `start_key`. + /// Given a `Hash` iterate over all storage values starting at `start_key`. /// Returns collected keys and values. /// Returns the collected keys values content of the top trie followed by the /// collected keys values of child tries. @@ -76,7 +76,7 @@ pub trait ProofProvider { /// end. fn storage_collection( &self, - id: &BlockId, + hash: &Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result>; diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 9dc02eb9ff291..7156545fbd9aa 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -38,7 +38,7 @@ use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, }; -use sp_runtime::{generic::BlockId, traits::Block}; +use sp_runtime::traits::Block; use std::{marker::PhantomData, sync::Arc}; const LOG_TARGET: &str = "light-client-request-handler"; @@ -172,26 +172,22 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = - match self - .client - .execution_proof(&BlockId::Hash(block), &request.method, &request.data) - { - Ok((_, proof)) => { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - Some(schema::v1::light::response::Response::RemoteCallResponse(r)) - }, - Err(e) => { - trace!( - "remote call request from {} ({} at {:?}) failed with: {}", - peer, - request.method, - request.block, - e, - ); - None - }, - }; + let response = match self.client.execution_proof(&block, &request.method, &request.data) { + Ok((_, proof)) => { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteCallResponse(r)) + }, + Err(e) => { + trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + None + }, + }; Ok(schema::v1::light::Response { response }) } @@ -215,25 +211,23 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = match self - .client - .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) - { - Ok(proof) => { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - Some(schema::v1::light::response::Response::RemoteReadResponse(r)) - }, - Err(error) => { - trace!( - "remote read request from {} ({} at {:?}) failed with: {}", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error, - ); - None - }, - }; + let response = + match self.client.read_proof(&block, &mut request.keys.iter().map(AsRef::as_ref)) { + Ok(proof) => { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteReadResponse(r)) + }, + Err(error) => { + trace!( + "remote read request from {} ({} at {:?}) failed with: {}", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, + ); + None + }, + }; Ok(schema::v1::light::Response { response }) } @@ -265,7 +259,7 @@ where }; let response = match child_info.and_then(|child_info| { self.client.read_child_proof( - &BlockId::Hash(block), + &block, &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index abbbcad2e378f..0a369c998dbd7 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -32,7 +32,7 @@ use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, }; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::{ hash::{Hash, Hasher}, sync::Arc, @@ -205,14 +205,14 @@ where if !request.no_proof { let (proof, _count) = self.client.read_proof_collection( - &BlockId::hash(block), + &block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); } else { let entries = self.client.storage_collection( - &BlockId::hash(block), + &block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index d6ab93f7680b0..44e7d03bc1a0e 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -345,7 +345,7 @@ where self.block_or_best(block) .and_then(|block| { self.client - .read_proof(&BlockId::Hash(block), &mut keys.iter().map(|key| key.0.as_ref())) + .read_proof(&block, &mut keys.iter().map(|key| key.0.as_ref())) .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) @@ -494,7 +494,7 @@ where }; self.client .read_child_proof( - &BlockId::Hash(block), + &block, &child_info, &mut keys.iter().map(|key| key.0.as_ref()), ) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a7851af446b95..a12b7177db47c 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1152,42 +1152,39 @@ where { fn read_proof( &self, - id: &BlockId, + hash: &Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.state_at(&hash) + self.state_at(hash) .and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( &self, - id: &BlockId, + hash: &Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.state_at(&hash) + self.state_at(hash) .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } fn execution_proof( &self, - id: &BlockId, + hash: &Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - self.executor.prove_execution(id, method, call_data) + self.executor.prove_execution(&BlockId::Hash(*hash), method, call_data) } fn read_proof_collection( &self, - id: &BlockId, + hash: &Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; // this is a read proof, using version V0 or V1 is equivalent. let root = state.storage_root(std::iter::empty(), StateVersion::V0).0; @@ -1202,14 +1199,13 @@ where fn storage_collection( &self, - id: &BlockId, + hash: &Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; let state = self.state_at(&hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); From 07e1b6d72747f3cde49e85e4ff68ff158daebe16 Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Wed, 19 Oct 2022 14:29:20 +0100 Subject: [PATCH 260/348] registrar: Avoid freebies in provide_judgement (#12465) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * evaluate repatriate reserved error in pallet identity * fix benchmarks * add repatriate reserved error test * benchmark fix * undo lock * include balance to use for benchmarks * rename test * Update frame/identity/src/benchmarking.rs * Update frame/identity/src/benchmarking.rs Co-authored-by: Bastian Köcher --- frame/identity/src/benchmarking.rs | 19 ++++++++++++++++--- frame/identity/src/lib.rs | 7 +++++-- frame/identity/src/tests.rs | 25 +++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index c628387a4d22e..3584eb954b399 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -144,9 +144,14 @@ benchmarks! { // User requests judgement from all the registrars, and they approve for i in 0..r { + let registrar: T::AccountId = account("registrar", i, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); + Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account("registrar", i, SEED)).into(), + RawOrigin::Signed(registrar).into(), i, caller_lookup.clone(), Judgement::Reasonable, @@ -213,9 +218,13 @@ benchmarks! { // User requests judgement from all the registrars, and they approve for i in 0..r { + let registrar: T::AccountId = account("registrar", i, SEED); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); + Identity::::request_judgement(caller_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account("registrar", i, SEED)).into(), + RawOrigin::Signed(registrar).into(), i, caller_lookup.clone(), Judgement::Reasonable, @@ -362,9 +371,13 @@ benchmarks! { // User requests judgement from all the registrars, and they approve for i in 0..r { + let registrar: T::AccountId = account("registrar", i, SEED); + let balance_to_use = T::Currency::minimum_balance() * 10u32.into(); + let _ = T::Currency::make_free_balance_be(®istrar, balance_to_use); + Identity::::request_judgement(target_origin.clone(), i, 10u32.into())?; Identity::::provide_judgement( - RawOrigin::Signed(account("registrar", i, SEED)).into(), + RawOrigin::Signed(registrar).into(), i, target_lookup.clone(), Judgement::Reasonable, diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 8b22ecedaec19..95f5a84d8abb7 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -238,6 +238,8 @@ pub mod pallet { NotOwned, /// The provided judgement was for a different identity. JudgementForDifferentIdentity, + /// Error that occurs when there is an issue paying for judgement. + JudgementPaymentFailed, } #[pallet::event] @@ -788,12 +790,13 @@ pub mod pallet { match id.judgements.binary_search_by_key(®_index, |x| x.0) { Ok(position) => { if let Judgement::FeePaid(fee) = id.judgements[position].1 { - let _ = T::Currency::repatriate_reserved( + T::Currency::repatriate_reserved( &target, &sender, fee, BalanceStatus::Free, - ); + ) + .map_err(|_| Error::::JudgementPaymentFailed)?; } id.judgements[position] = item }, diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 20628da50937a..6b0006971f0e6 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -540,6 +540,31 @@ fn requesting_judgement_should_work() { }); } +#[test] +fn provide_judgement_should_return_judgement_payment_failed_error() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(RuntimeOrigin::signed(1), 3)); + assert_ok!(Identity::set_fee(RuntimeOrigin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); + assert_ok!(Identity::request_judgement(RuntimeOrigin::signed(10), 0, 10)); + // 10 for the judgement request, 10 for the identity. + assert_eq!(Balances::free_balance(10), 80); + + // This forces judgement payment failed error + Balances::make_free_balance_be(&3, 0); + assert_noop!( + Identity::provide_judgement( + RuntimeOrigin::signed(3), + 0, + 10, + Judgement::Erroneous, + BlakeTwo256::hash_of(&ten()) + ), + Error::::JudgementPaymentFailed + ); + }); +} + #[test] fn field_deposit_should_work() { new_test_ext().execute_with(|| { From 076be6d28cbe3acea5a67d668f1cf545d5c13064 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 19 Oct 2022 21:05:27 +0100 Subject: [PATCH 261/348] EPM: allow duplicate submissions (#12237) * allow for duplicate signed submissions * Fix a bunch of things, seems all good now * fmt * Fix * Update frame/election-provider-multi-phase/src/signed.rs Co-authored-by: Niklas Adolfsson * Update frame/election-provider-multi-phase/src/signed.rs Co-authored-by: Niklas Adolfsson * add migratin * fmt * comment typo * some review comments * fix bench Co-authored-by: Niklas Adolfsson Co-authored-by: Ross Bulat --- .../src/benchmarking.rs | 17 +- .../election-provider-multi-phase/src/lib.rs | 12 +- .../src/migrations.rs | 78 +++++ .../election-provider-multi-phase/src/mock.rs | 6 + .../src/signed.rs | 284 ++++++++++++------ 5 files changed, 293 insertions(+), 104 deletions(-) create mode 100644 frame/election-provider-multi-phase/src/migrations.rs diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index a8195df7305ff..a0fd0a9a0512f 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -320,21 +320,14 @@ frame_benchmarking::benchmarks! { } submit { - // the solution will be worse than all of them meaning the score need to be checked against - // ~ log2(c) - let solution = RawSolution { - score: ElectionScore { minimal_stake: 10_000_000u128 - 1, ..Default::default() }, - ..Default::default() - }; - + // the queue is full and the solution is only better than the worse. >::create_snapshot().map_err(<&str>::from)?; MultiPhase::::on_initialize_open_signed(); >::put(1); let mut signed_submissions = SignedSubmissions::::get(); - // Insert `max - 1` submissions because the call to `submit` will insert another - // submission and the score is worse then the previous scores. + // Insert `max` submissions for i in 0..(T::SignedMaxSubmissions::get() - 1) { let raw_solution = RawSolution { score: ElectionScore { minimal_stake: 10_000_000u128 + (i as u128), ..Default::default() }, @@ -350,6 +343,12 @@ frame_benchmarking::benchmarks! { } signed_submissions.put(); + // this score will eject the weakest one. + let solution = RawSolution { + score: ElectionScore { minimal_stake: 10_000_000u128 + 1, ..Default::default() }, + ..Default::default() + }; + let caller = frame_benchmarking::whitelisted_caller(); let deposit = MultiPhase::::deposit_for( &solution, diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 3dc6161bb202a..3e5a6d12f575c 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -267,6 +267,7 @@ pub mod helpers; const LOG_TARGET: &str = "runtime::election-provider"; +pub mod migrations; pub mod signed; pub mod unsigned; pub mod weights; @@ -1265,8 +1266,8 @@ pub mod pallet { #[pallet::storage] pub type SignedSubmissionNextIndex = StorageValue<_, u32, ValueQuery>; - /// A sorted, bounded set of `(score, index)`, where each `index` points to a value in - /// `SignedSubmissions`. + /// A sorted, bounded vector of `(score, block_number, index)`, where each `index` points to a + /// value in `SignedSubmissions`. /// /// We never need to process more than a single signed submission at a time. Signed submissions /// can be quite large, so we're willing to pay the cost of multiple database accesses to access @@ -1296,9 +1297,14 @@ pub mod pallet { #[pallet::getter(fn minimum_untrusted_score)] pub type MinimumUntrustedScore = StorageValue<_, ElectionScore>; + /// The current storage version. + /// + /// v1: https://github.com/paritytech/substrate/pull/12237/ + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); } diff --git a/frame/election-provider-multi-phase/src/migrations.rs b/frame/election-provider-multi-phase/src/migrations.rs new file mode 100644 index 0000000000000..77efe0d0c5e92 --- /dev/null +++ b/frame/election-provider-multi-phase/src/migrations.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod v1 { + use frame_support::{ + storage::unhashed, + traits::{Defensive, GetStorageVersion, OnRuntimeUpgrade}, + BoundedVec, + }; + use sp_std::collections::btree_map::BTreeMap; + + use crate::*; + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); + + if current == 1 && onchain == 0 { + if SignedSubmissionIndices::::exists() { + // This needs to be tested at a both a block height where this value exists, and + // when it doesn't. + let now = frame_system::Pallet::::block_number(); + let map = unhashed::get::>( + &SignedSubmissionIndices::::hashed_key(), + ) + .defensive_unwrap_or_default(); + let vector = map + .into_iter() + .map(|(score, index)| (score, now, index)) + .collect::>(); + + log!( + debug, + "{:?} SignedSubmissionIndices read from storage (max: {:?})", + vector.len(), + T::SignedMaxSubmissions::get() + ); + + // defensive-only, assuming a constant `SignedMaxSubmissions`. + let bounded = BoundedVec::<_, _>::truncate_from(vector); + SignedSubmissionIndices::::put(bounded); + + log!(info, "SignedSubmissionIndices existed and got migrated"); + } else { + log!(info, "SignedSubmissionIndices did NOT exist."); + } + + current.put::>(); + T::DbWeight::get().reads_writes(2, 1) + } else { + log!(info, "Migration did not execute. This probably should be removed"); + T::DbWeight::get().reads(1) + } + } + } +} diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index e04e0bf474caf..f645886d78899 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -571,6 +571,12 @@ impl ExtBuilder { balances: vec![ // bunch of account for submitting stuff only. (99, 100), + (100, 100), + (101, 100), + (102, 100), + (103, 100), + (104, 100), + (105, 100), (999, 100), (9999, 100), ], diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 175c92757f35e..cda87dd6c839a 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -24,11 +24,11 @@ use crate::{ }; use codec::{Decode, Encode, HasCompact}; use frame_election_provider_support::NposSolution; -use frame_support::{ - storage::bounded_btree_map::BoundedBTreeMap, - traits::{defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency}, +use frame_support::traits::{ + defensive_prelude::*, Currency, Get, OnUnbalanced, ReservableCurrency, }; use sp_arithmetic::traits::SaturatedConversion; +use sp_core::bounded::BoundedVec; use sp_npos_elections::ElectionScore; use sp_runtime::{ traits::{Saturating, Zero}, @@ -37,7 +37,6 @@ use sp_runtime::{ use sp_std::{ cmp::Ordering, collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - ops::Deref, vec::Vec, }; @@ -99,8 +98,12 @@ pub type SignedSubmissionOf = SignedSubmission< <::MinerConfig as MinerConfig>::Solution, >; -pub type SubmissionIndicesOf = - BoundedBTreeMap::SignedMaxSubmissions>; +/// Always sorted vector of a score, submitted at the given block number, which can be found at the +/// given index (`u32`) of the `SignedSubmissionsMap`. +pub type SubmissionIndicesOf = BoundedVec< + (ElectionScore, ::BlockNumber, u32), + ::SignedMaxSubmissions, +>; /// Outcome of [`SignedSubmissions::insert`]. pub enum InsertResult { @@ -126,6 +129,16 @@ pub struct SignedSubmissions { } impl SignedSubmissions { + /// `true` if the structure is empty. + pub fn is_empty(&self) -> bool { + self.indices.is_empty() + } + + /// Get the length of submitted solutions. + pub fn len(&self) -> usize { + self.indices.len() + } + /// Get the signed submissions from storage. pub fn get() -> Self { let submissions = SignedSubmissions { @@ -134,10 +147,12 @@ impl SignedSubmissions { insertion_overlay: BTreeMap::new(), deletion_overlay: BTreeSet::new(), }; + // validate that the stored state is sane debug_assert!(submissions .indices - .values() + .iter() + .map(|(_, _, index)| index) .copied() .max() .map_or(true, |max_idx| submissions.next_idx > max_idx,)); @@ -155,7 +170,8 @@ impl SignedSubmissions { .map_or(true, |max_idx| self.next_idx > max_idx,)); debug_assert!(self .indices - .values() + .iter() + .map(|(_, _, index)| index) .copied() .max() .map_or(true, |max_idx| self.next_idx > max_idx,)); @@ -174,9 +190,9 @@ impl SignedSubmissions { /// Get the submission at a particular index. fn get_submission(&self, index: u32) -> Option> { if self.deletion_overlay.contains(&index) { - // Note: can't actually remove the item from the insertion overlay (if present) - // because we don't want to use `&mut self` here. There may be some kind of - // `RefCell` optimization possible here in the future. + // Note: can't actually remove the item from the insertion overlay (if present) because + // we don't want to use `&mut self` here. There may be some kind of `RefCell` + // optimization possible here in the future. None } else { self.insertion_overlay @@ -188,27 +204,30 @@ impl SignedSubmissions { /// Perform three operations: /// - /// - Remove a submission (identified by score) - /// - Insert a new submission (identified by score and insertion index) - /// - Return the submission which was removed. - /// - /// Note: in the case that `weakest_score` is not present in `self.indices`, this will return - /// `None` without inserting the new submission and without further notice. + /// - Remove the solution at the given position of `self.indices`. + /// - Insert a new submission (identified by score and insertion index), if provided. + /// - Return the submission which was removed, if any. /// - /// Note: this does not enforce any ordering relation between the submission removed and that - /// inserted. + /// The call site must ensure that `remove_pos` is a valid index. If otherwise, `None` is + /// silently returned. /// /// Note: this doesn't insert into `insertion_overlay`, the optional new insertion must be - /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. + /// inserted into `insertion_overlay` to keep the variable `self` in a valid state. fn swap_out_submission( &mut self, - remove_score: ElectionScore, - insert: Option<(ElectionScore, u32)>, + remove_pos: usize, + insert: Option<(ElectionScore, T::BlockNumber, u32)>, ) -> Option> { - let remove_index = self.indices.remove(&remove_score)?; - if let Some((insert_score, insert_idx)) = insert { + if remove_pos >= self.indices.len() { + return None + } + + // safe: index was just checked in the line above. + let (_, _, remove_index) = self.indices.remove(remove_pos); + + if let Some((insert_score, block_number, insert_idx)) = insert { self.indices - .try_insert(insert_score, insert_idx) + .try_push((insert_score, block_number, insert_idx)) .expect("just removed an item, we must be under capacity; qed"); } @@ -222,20 +241,17 @@ impl SignedSubmissions { }) } + /// Remove the signed submission with the highest score from the set. + pub fn pop_last(&mut self) -> Option> { + let best_index = self.indices.len().checked_sub(1)?; + self.swap_out_submission(best_index, None) + } + /// Iterate through the set of signed submissions in order of increasing score. pub fn iter(&self) -> impl '_ + Iterator> { - self.indices.iter().filter_map(move |(_score, &idx)| { - let maybe_submission = self.get_submission(idx); - if maybe_submission.is_none() { - log!( - error, - "SignedSubmissions internal state is invalid (idx {}); \ - there is a logic error in code handling signed solution submissions", - idx, - ) - } - maybe_submission - }) + self.indices + .iter() + .filter_map(move |(_score, _bn, idx)| self.get_submission(*idx).defensive()) } /// Empty the set of signed submissions, returning an iterator of signed submissions in @@ -283,68 +299,54 @@ impl SignedSubmissions { /// to `is_score_better`, we do not change anything. pub fn insert(&mut self, submission: SignedSubmissionOf) -> InsertResult { // verify the expectation that we never reuse an index - debug_assert!(!self.indices.values().any(|&idx| idx == self.next_idx)); - - let weakest = match self.indices.try_insert(submission.raw_solution.score, self.next_idx) { - Ok(Some(prev_idx)) => { - // a submission of equal score was already present in the set; - // no point editing the actual backing map as we know that the newer solution can't - // be better than the old. However, we do need to put the old value back. - self.indices - .try_insert(submission.raw_solution.score, prev_idx) - .expect("didn't change the map size; qed"); - return InsertResult::NotInserted - }, - Ok(None) => { - // successfully inserted into the set; no need to take out weakest member - None - }, - Err((insert_score, insert_idx)) => { - // could not insert into the set because it is full. - // note that we short-circuit return here in case the iteration produces `None`. - // If there wasn't a weakest entry to remove, then there must be a capacity of 0, - // which means that we can't meaningfully proceed. - let weakest_score = match self.indices.iter().next() { + debug_assert!(!self.indices.iter().map(|(_, _, x)| x).any(|&idx| idx == self.next_idx)); + let block_number = frame_system::Pallet::::block_number(); + + let maybe_weakest = match self.indices.try_push(( + submission.raw_solution.score, + block_number, + self.next_idx, + )) { + Ok(_) => None, + Err(_) => { + // the queue is full -- if this is better, insert it. + let weakest_score = match self.indices.iter().next().defensive() { None => return InsertResult::NotInserted, - Some((score, _)) => *score, + Some((score, _, _)) => *score, }; let threshold = T::BetterSignedThreshold::get(); // if we haven't improved on the weakest score, don't change anything. - if !insert_score.strict_threshold_better(weakest_score, threshold) { + if !submission.raw_solution.score.strict_threshold_better(weakest_score, threshold) + { return InsertResult::NotInserted } - self.swap_out_submission(weakest_score, Some((insert_score, insert_idx))) + self.swap_out_submission( + 0, // swap out the worse one, which is always index 0. + Some((submission.raw_solution.score, block_number, self.next_idx)), + ) }, }; + // this is the ONLY place that we insert, and we sort post insertion. If scores are the + // same, we sort based on reverse of submission block number. + self.indices + .sort_by(|(score1, bn1, _), (score2, bn2, _)| match score1.cmp(score2) { + Ordering::Equal => bn1.cmp(&bn2).reverse(), + x => x, + }); + // we've taken out the weakest, so update the storage map and the next index debug_assert!(!self.insertion_overlay.contains_key(&self.next_idx)); self.insertion_overlay.insert(self.next_idx, submission); debug_assert!(!self.deletion_overlay.contains(&self.next_idx)); self.next_idx += 1; - match weakest { + match maybe_weakest { Some(weakest) => InsertResult::InsertedEjecting(weakest), None => InsertResult::Inserted, } } - - /// Remove the signed submission with the highest score from the set. - pub fn pop_last(&mut self) -> Option> { - let (score, _) = self.indices.iter().rev().next()?; - // deref in advance to prevent mutable-immutable borrow conflict - let score = *score; - self.swap_out_submission(score, None) - } -} - -impl Deref for SignedSubmissions { - type Target = SubmissionIndicesOf; - - fn deref(&self) -> &Self::Target { - &self.indices - } } impl Pallet { @@ -379,6 +381,12 @@ impl Pallet { Self::snapshot_metadata().unwrap_or_default(); while let Some(best) = all_submissions.pop_last() { + log!( + debug, + "finalized_signed: trying to verify from {:?} score {:?}", + best.who, + best.raw_solution.score + ); let SignedSubmission { raw_solution, who, deposit, call_fee } = best; let active_voters = raw_solution.solution.voter_count() as u32; let feasibility_weight = { @@ -386,6 +394,7 @@ impl Pallet { let desired_targets = Self::desired_targets().defensive_unwrap_or_default(); T::WeightInfo::feasibility_check(voters, targets, active_voters, desired_targets) }; + // the feasibility check itself has some weight weight = weight.saturating_add(feasibility_weight); match Self::feasibility_check(raw_solution, ElectionCompute::Signed) { @@ -397,12 +406,14 @@ impl Pallet { call_fee, ); found_solution = true; + log!(debug, "finalized_signed: found a valid solution"); weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); break }, Err(_) => { + log!(warn, "finalized_signed: invalid signed submission found, slashing."); Self::finalize_signed_phase_reject_solution(&who, deposit); weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_reject_solution()); @@ -526,14 +537,7 @@ impl Pallet { #[cfg(test)] mod tests { use super::*; - use crate::{ - mock::{ - balances, multi_phase_events, raw_solution, roll_to, roll_to_signed, Balances, - ExtBuilder, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxRefunds, - SignedMaxSubmissions, SignedMaxWeight, - }, - Error, Event, Perbill, Phase, - }; + use crate::{mock::*, ElectionCompute, Error, Event, Perbill, Phase}; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; #[test] @@ -868,8 +872,8 @@ mod tests { } #[test] - fn replace_weakest_works() { - ExtBuilder::default().build_and_execute(|| { + fn replace_weakest_by_score_works() { + ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); @@ -893,7 +897,7 @@ mod tests { .iter() .map(|s| s.raw_solution.score.minimal_stake) .collect::>(), - vec![4, 6, 7, 8, 9], + vec![4, 6, 7], ); // better. @@ -909,7 +913,7 @@ mod tests { .iter() .map(|s| s.raw_solution.score.minimal_stake) .collect::>(), - vec![5, 6, 7, 8, 9], + vec![5, 6, 7], ); }) } @@ -946,7 +950,8 @@ mod tests { } #[test] - fn equally_good_solution_is_not_accepted() { + fn equally_good_solution_is_not_accepted_when_queue_full() { + // because in ordering of solutions, an older solution has higher priority and should stay. ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); @@ -958,6 +963,7 @@ mod tests { }; assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); } + assert_eq!( MultiPhase::signed_submissions() .iter() @@ -978,6 +984,99 @@ mod tests { }) } + #[test] + fn equally_good_solution_is_accepted_when_queue_not_full() { + // because in ordering of solutions, an older solution has higher priority and should stay. + ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = RawSolution { + score: ElectionScore { minimal_stake: 5, ..Default::default() }, + ..Default::default() + }; + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) + .collect::>(), + vec![(99, 5)] + ); + + roll_to(16); + let solution = RawSolution { + score: ElectionScore { minimal_stake: 5, ..Default::default() }, + ..Default::default() + }; + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(999), Box::new(solution))); + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) + .collect::>(), + vec![(999, 5), (99, 5)] + ); + + let solution = RawSolution { + score: ElectionScore { minimal_stake: 6, ..Default::default() }, + ..Default::default() + }; + assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(9999), Box::new(solution))); + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| (s.who, s.raw_solution.score.minimal_stake,)) + .collect::>(), + vec![(999, 5), (99, 5), (9999, 6)] + ); + }) + } + + #[test] + fn all_equal_score() { + // because in ordering of solutions, an older solution has higher priority and should stay. + ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + for i in 0..SignedMaxSubmissions::get() { + roll_to((15 + i).into()); + let solution = raw_solution(); + assert_ok!(MultiPhase::submit( + RuntimeOrigin::signed(100 + i as AccountId), + Box::new(solution) + )); + } + + assert_eq!( + MultiPhase::signed_submissions() + .iter() + .map(|s| (s.who, s.raw_solution.score.minimal_stake)) + .collect::>(), + vec![(102, 40), (101, 40), (100, 40)] + ); + + roll_to(25); + + // The first one that will actually get verified is the last one. + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 100, value: 7 }, + Event::UnsignedPhaseStarted { round: 1 } + ] + ); + }) + } + #[test] fn all_in_one_signed_submission_scenario() { // a combination of: @@ -991,6 +1090,7 @@ mod tests { assert_eq!(balances(&99), (100, 0)); assert_eq!(balances(&999), (100, 0)); assert_eq!(balances(&9999), (100, 0)); + let solution = raw_solution(); // submit a correct one. From c11b2621e6e0e20263a230b783d68820370f9ee1 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 19 Oct 2022 22:55:20 +0200 Subject: [PATCH 262/348] CI check against Rust feature bleed (#12341) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * CI check for rust feature bleed Signed-off-by: Oliver Tale-Yazdi * Cargo not available Signed-off-by: Oliver Tale-Yazdi * Handle missing programs Signed-off-by: Oliver Tale-Yazdi * Check for deps Signed-off-by: Oliver Tale-Yazdi * Add doc Signed-off-by: Oliver Tale-Yazdi * Use correct CI image Signed-off-by: Oliver Tale-Yazdi * Remove --offline Signed-off-by: Oliver Tale-Yazdi * Install cargo-workspaces Signed-off-by: Oliver Tale-Yazdi * Remove cargo-workspaces dep Signed-off-by: Oliver Tale-Yazdi * Fix try-runtime feature Signed-off-by: Oliver Tale-Yazdi * Fix features Signed-off-by: Oliver Tale-Yazdi * Fix features Signed-off-by: Oliver Tale-Yazdi * Fix more features... Signed-off-by: Oliver Tale-Yazdi * Use pipeline-script Signed-off-by: Oliver Tale-Yazdi * 🤡 Signed-off-by: Oliver Tale-Yazdi * Make stupid change to test the CI Signed-off-by: Oliver Tale-Yazdi * This reverts commit ad2746aa117fa7cb473521113a9bec89aaf26484. * Use correct branch Signed-off-by: Oliver Tale-Yazdi * Allow failure Signed-off-by: Oliver Tale-Yazdi * Make stupid change to test the CI Signed-off-by: Oliver Tale-Yazdi * Revert "Make stupid change to test the CI" This reverts commit 16ec00e1675c7ec57c988315549ff71e832a3093. Signed-off-by: Oliver Tale-Yazdi --- bin/node-template/node/Cargo.toml | 2 +- bin/node-template/runtime/Cargo.toml | 2 +- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- frame/executive/Cargo.toml | 2 +- .../procedural/src/pallet/expand/genesis_build.rs | 2 +- frame/try-runtime/Cargo.toml | 5 ++++- frame/try-runtime/src/lib.rs | 1 + scripts/ci/gitlab/pipeline/check.yml | 14 +++++++++++++- utils/frame/try-runtime/cli/Cargo.toml | 7 ++++++- utils/frame/try-runtime/cli/src/lib.rs | 2 ++ 11 files changed, 32 insertions(+), 9 deletions(-) diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index c60018e14969c..d94955f722605 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -75,4 +75,4 @@ runtime-benchmarks = [ ] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli"] +try-runtime = ["node-template-runtime/try-runtime", "try-runtime-cli/try-runtime"] diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 45ab4939e311c..139264657f89d 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -99,7 +99,7 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "frame-try-runtime", + "frame-try-runtime/try-runtime", "frame-executive/try-runtime", "frame-system/try-runtime", "frame-support/try-runtime", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4bf991f49320c..07208abdd089b 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -165,7 +165,7 @@ runtime-benchmarks = [ ] # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. -try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli"] +try-runtime = ["kitchensink-runtime/try-runtime", "try-runtime-cli/try-runtime"] [[bench]] name = "transaction_pool" diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 6940e968e28e7..39364961d57e2 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -254,7 +254,7 @@ runtime-benchmarks = [ "frame-system-benchmarking/runtime-benchmarks", ] try-runtime = [ - "frame-try-runtime", + "frame-try-runtime/try-runtime", "frame-executive/try-runtime", "frame-system/try-runtime", "frame-support/try-runtime", diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index cd5c793ee2ee0..f6f5175d63bb9 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -49,4 +49,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -try-runtime = ["frame-support/try-runtime", "frame-try-runtime" ] +try-runtime = ["frame-support/try-runtime", "frame-try-runtime/try-runtime" ] diff --git a/frame/support/procedural/src/pallet/expand/genesis_build.rs b/frame/support/procedural/src/pallet/expand/genesis_build.rs index 53d0695e5f971..d19476779011b 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -19,7 +19,7 @@ use crate::pallet::Def; /// /// * implement the trait `sp_runtime::BuildModuleGenesisStorage` -/// * add #[cfg(features = "std")] to GenesisBuild implementation. +/// * add #[cfg(feature = "std")] to GenesisBuild implementation. pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config = if let Some(genesis_config) = &def.genesis_config { genesis_config diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index dd77c0438d71f..51b6f91784594 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"]} -frame-support = { version = "4.0.0-dev", default-features = false, features = [ "try-runtime" ], path = "../support" } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -28,3 +28,6 @@ std = [ "sp-runtime/std", "sp-std/std", ] +try-runtime = [ + "frame-support/try-runtime", +] diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs index 7fec92712cd19..ed1247bd8e6f2 100644 --- a/frame/try-runtime/src/lib.rs +++ b/frame/try-runtime/src/lib.rs @@ -18,6 +18,7 @@ //! Supporting types for try-runtime, testing and dry-running commands. #![cfg_attr(not(feature = "std"), no_std)] +#![cfg(feature = "try-runtime")] pub use frame_support::traits::TryStateSelect; use frame_support::weights::Weight; diff --git a/scripts/ci/gitlab/pipeline/check.yml b/scripts/ci/gitlab/pipeline/check.yml index 3166e13313f2a..878c46f32e850 100644 --- a/scripts/ci/gitlab/pipeline/check.yml +++ b/scripts/ci/gitlab/pipeline/check.yml @@ -35,6 +35,19 @@ test-dependency-rules: script: - ./scripts/ci/gitlab/ensure-deps.sh +test-rust-features: + stage: check + extends: + - .kubernetes-env + - .test-refs-no-trigger-prs-only + allow_failure: true + script: + - git clone + --depth=1 + --branch="$PIPELINE_SCRIPTS_TAG" + https://github.com/paritytech/pipeline-scripts + - bash ./pipeline-scripts/rust-features.sh . + test-prometheus-alerting-rules: stage: check extends: .kubernetes-env @@ -51,4 +64,3 @@ test-prometheus-alerting-rules: - promtool check rules ./scripts/ci/monitoring/alerting-rules/alerting-rules.yaml - cat ./scripts/ci/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules ./scripts/ci/monitoring/alerting-rules/alerting-rule-tests.yaml - diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 24d64d4aa4373..c7191b7eb7f5f 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -31,8 +31,13 @@ sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../../primitives/state-machine" } sp-version = { version = "5.0.0", path = "../../../../primitives/version" } sp-weights = { version = "4.0.0", path = "../../../../primitives/weights" } -frame-try-runtime = { path = "../../../../frame/try-runtime" } +frame-try-runtime = { optional = true, path = "../../../../frame/try-runtime" } substrate-rpc-client = { path = "../../rpc/client" } [dev-dependencies] tokio = "1.17.0" + +[features] +try-runtime = [ + "frame-try-runtime/try-runtime", +] diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 0732c8618fc15..f54354342bf28 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -265,6 +265,8 @@ //! -s snap \ //! ``` +#![cfg(feature = "try-runtime")] + use parity_scale_codec::Decode; use remote_externalities::{ Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, TestExternalities, From 2eb8ad273eeabea380fce00c10893ac788b1cbde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 20 Oct 2022 00:34:03 +0200 Subject: [PATCH 263/348] contracts: Decrease the interation count on slow benchmarks (#12526) * Decrease amount of benchmark iterations for long slow ones * ".git/.scripts/bench-bot.sh" pallet dev pallet_contracts Co-authored-by: command-bot <> --- frame/contracts/src/benchmarking/mod.rs | 12 +- frame/contracts/src/weights.rs | 1525 ++++++++++++----------- 2 files changed, 772 insertions(+), 765 deletions(-) diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 186ac6f63503e..86c7d2df674c7 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -1890,7 +1890,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_sha2_256 { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let instance = Contract::::new(WasmModule::hasher( "seal_hash_sha2_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1908,7 +1908,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_keccak_256 { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let instance = Contract::::new(WasmModule::hasher( "seal_hash_keccak_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1926,7 +1926,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_256 { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_256", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1944,7 +1944,7 @@ benchmarks! { // Only the overhead of calling the function itself with minimal arguments. seal_hash_blake2_128 { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let instance = Contract::::new(WasmModule::hasher( "seal_hash_blake2_128", r * API_BENCHMARK_BATCH_SIZE, 0, ), vec![])?; @@ -1963,7 +1963,7 @@ benchmarks! { // Only calling the function itself with valid arguments. // It generates different private keys and signatures for the message "Hello world". seal_ecdsa_recover { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let key_type = sp_core::crypto::KeyTypeId(*b"code"); @@ -2011,7 +2011,7 @@ benchmarks! { // Only calling the function itself for the list of // generated different ECDSA keys. seal_ecdsa_to_eth_address { - let r in 0 .. API_BENCHMARK_BATCHES; + let r in 0 .. 1; let key_type = sp_core::crypto::KeyTypeId(*b"code"); let pub_keys_bytes = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|_| { diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 4e4a6b6e6a2b7..46c86b670a7fe 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -32,6 +32,7 @@ // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json // --pallet=pallet_contracts // --chain=dev // --output=./frame/contracts/src/weights.rs @@ -166,15 +167,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(3_089_000 as u64) + Weight::from_ref_time(3_108_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(13_917_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(k as u64)) + Weight::from_ref_time(15_377_000 as u64) + // Standard Error: 414 + .saturating_add(Weight::from_ref_time(892_761 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -182,19 +183,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(14_172_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_301_000 as u64).saturating_mul(q as u64)) + Weight::from_ref_time(3_127_000 as u64) + // Standard Error: 3_813 + .saturating_add(Weight::from_ref_time(1_379_402 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(21_644_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(22_849_000 as u64) + // Standard Error: 26 + .saturating_add(Weight::from_ref_time(45_513 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -205,9 +205,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(234_349_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(258_275_000 as u64) + // Standard Error: 30 + .saturating_add(Weight::from_ref_time(45_885 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -222,11 +222,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(294_077_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(2_115_889_000 as u64) + // Standard Error: 323 + .saturating_add(Weight::from_ref_time(89_541 as u64).saturating_mul(c as u64)) + // Standard Error: 19 + .saturating_add(Weight::from_ref_time(664 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(9 as u64)) } @@ -239,9 +239,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(199_028_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(211_349_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_532 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -251,7 +251,7 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(176_247_000 as u64) + Weight::from_ref_time(181_771_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -261,9 +261,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(54_917_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(55_917_000 as u64) + // Standard Error: 25 + .saturating_add(Weight::from_ref_time(46_148 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -272,7 +272,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_611_000 as u64) + Weight::from_ref_time(37_966_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -280,7 +280,7 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_121_000 as u64) + Weight::from_ref_time(40_963_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -291,9 +291,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(241_129_000 as u64) - // Standard Error: 54_000 - .saturating_add(Weight::from_ref_time(35_413_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_526_000 as u64) + // Standard Error: 29_505 + .saturating_add(Weight::from_ref_time(35_107_467 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -304,9 +304,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(189_418_000 as u64) - // Standard Error: 419_000 - .saturating_add(Weight::from_ref_time(207_107_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(253_782_000 as u64) + // Standard Error: 256_004 + .saturating_add(Weight::from_ref_time(201_621_188 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -318,9 +318,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(203_928_000 as u64) - // Standard Error: 439_000 - .saturating_add(Weight::from_ref_time(268_983_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_892_000 as u64) + // Standard Error: 234_750 + .saturating_add(Weight::from_ref_time(258_209_168 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -332,9 +332,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(243_800_000 as u64) - // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(38_797_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_674_000 as u64) + // Standard Error: 23_962 + .saturating_add(Weight::from_ref_time(38_319_695 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -345,9 +345,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(239_667_000 as u64) - // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(15_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_345_000 as u64) + // Standard Error: 15_429 + .saturating_add(Weight::from_ref_time(14_751_125 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -358,9 +358,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(241_116_000 as u64) - // Standard Error: 41_000 - .saturating_add(Weight::from_ref_time(35_402_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_045_000 as u64) + // Standard Error: 25_949 + .saturating_add(Weight::from_ref_time(35_001_004 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -371,9 +371,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(240_515_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_144_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_176_000 as u64) + // Standard Error: 24_500 + .saturating_add(Weight::from_ref_time(34_447_506 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -384,10 +384,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(244_087_000 as u64) - // Standard Error: 87_000 - .saturating_add(Weight::from_ref_time(110_236_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_278_000 as u64) + // Standard Error: 48_613 + .saturating_add(Weight::from_ref_time(108_381_432 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -397,9 +397,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(241_774_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_216_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_996_000 as u64) + // Standard Error: 24_585 + .saturating_add(Weight::from_ref_time(34_615_777 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -410,9 +410,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(241_146_000 as u64) - // Standard Error: 54_000 - .saturating_add(Weight::from_ref_time(35_101_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_207_000 as u64) + // Standard Error: 30_691 + .saturating_add(Weight::from_ref_time(34_626_552 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -423,9 +423,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(244_096_000 as u64) - // Standard Error: 55_000 - .saturating_add(Weight::from_ref_time(34_612_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_538_000 as u64) + // Standard Error: 27_128 + .saturating_add(Weight::from_ref_time(34_277_292 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -436,9 +436,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(242_978_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(34_780_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_398_000 as u64) + // Standard Error: 28_375 + .saturating_add(Weight::from_ref_time(34_573_437 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -450,10 +450,10 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(246_175_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(99_827_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_732_000 as u64) + // Standard Error: 42_671 + .saturating_add(Weight::from_ref_time(123_110_886 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -463,9 +463,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(168_655_000 as u64) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(15_635_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(170_547_000 as u64) + // Standard Error: 9_163 + .saturating_add(Weight::from_ref_time(15_589_088 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -476,9 +476,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(239_729_000 as u64) - // Standard Error: 52_000 - .saturating_add(Weight::from_ref_time(33_477_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_264_000 as u64) + // Standard Error: 23_913 + .saturating_add(Weight::from_ref_time(32_753_431 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -489,9 +489,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(296_718_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_616_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(282_721_000 as u64) + // Standard Error: 1_978 + .saturating_add(Weight::from_ref_time(9_625_699 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -502,9 +502,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(237_666_000 as u64) - // Standard Error: 497_000 - .saturating_add(Weight::from_ref_time(2_090_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_356_000 as u64) + // Standard Error: 184_002 + .saturating_add(Weight::from_ref_time(2_423_400 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -515,9 +515,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(239_842_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(246_157_000 as u64) + // Standard Error: 324 + .saturating_add(Weight::from_ref_time(188_687 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -530,9 +530,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(240_563_000 as u64) - // Standard Error: 519_000 - .saturating_add(Weight::from_ref_time(52_855_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_356_000 as u64) + // Standard Error: 232_552 + .saturating_add(Weight::from_ref_time(55_027_099 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -546,10 +546,10 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(248_136_000 as u64) - // Standard Error: 94_000 - .saturating_add(Weight::from_ref_time(137_406_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_069_000 as u64) + // Standard Error: 59_538 + .saturating_add(Weight::from_ref_time(128_858_347 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -559,9 +559,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(253_433_000 as u64) - // Standard Error: 105_000 - .saturating_add(Weight::from_ref_time(242_337_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_659_000 as u64) + // Standard Error: 70_677 + .saturating_add(Weight::from_ref_time(239_930_533 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -573,11 +573,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(478_106_000 as u64) - // Standard Error: 557_000 - .saturating_add(Weight::from_ref_time(176_325_000 as u64).saturating_mul(t as u64)) - // Standard Error: 153_000 - .saturating_add(Weight::from_ref_time(67_413_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(1_198_350_000 as u64) + // Standard Error: 2_049_556 + .saturating_add(Weight::from_ref_time(70_072_141 as u64).saturating_mul(t as u64)) + // Standard Error: 496_378 + .saturating_add(Weight::from_ref_time(35_566_752 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -590,18 +590,18 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(172_751_000 as u64) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(26_536_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(177_240_000 as u64) + // Standard Error: 18_991 + .saturating_add(Weight::from_ref_time(26_377_453 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(196_276_000 as u64) - // Standard Error: 428_000 - .saturating_add(Weight::from_ref_time(416_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_795_000 as u64) + // Standard Error: 349_627 + .saturating_add(Weight::from_ref_time(411_421_066 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -610,53 +610,53 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_439_000 as u64) - // Standard Error: 1_323_000 - .saturating_add(Weight::from_ref_time(93_843_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(52 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(50 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(405_147_000 as u64) + // Standard Error: 1_074_466 + .saturating_add(Weight::from_ref_time(120_331_835 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(6 as u64)) + .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(511_358_000 as u64) - // Standard Error: 1_144_000 - .saturating_add(Weight::from_ref_time(68_754_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(52 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(50 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(403_403_000 as u64) + // Standard Error: 890_083 + .saturating_add(Weight::from_ref_time(88_023_518 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(6 as u64)) + .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(204_133_000 as u64) - // Standard Error: 498_000 - .saturating_add(Weight::from_ref_time(406_798_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_929_000 as u64) + // Standard Error: 311_780 + .saturating_add(Weight::from_ref_time(400_904_526 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(489_339_000 as u64) - // Standard Error: 1_269_000 - .saturating_add(Weight::from_ref_time(70_700_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(49 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(372_378_000 as u64) + // Standard Error: 1_007_061 + .saturating_add(Weight::from_ref_time(92_326_546 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(211_344_000 as u64) - // Standard Error: 399_000 - .saturating_add(Weight::from_ref_time(330_244_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(250_165_000 as u64) + // Standard Error: 300_205 + .saturating_add(Weight::from_ref_time(339_092_950 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -664,19 +664,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(449_353_000 as u64) - // Standard Error: 1_027_000 - .saturating_add(Weight::from_ref_time(153_022_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(352_873_000 as u64) + // Standard Error: 908_425 + .saturating_add(Weight::from_ref_time(176_951_688 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(216_197_000 as u64) - // Standard Error: 341_000 - .saturating_add(Weight::from_ref_time(305_401_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_980_000 as u64) + // Standard Error: 295_923 + .saturating_add(Weight::from_ref_time(306_145_709 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -684,34 +684,34 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_033_000 as u64) - // Standard Error: 878_000 - .saturating_add(Weight::from_ref_time(61_940_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(51 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(340_418_000 as u64) + // Standard Error: 749_537 + .saturating_add(Weight::from_ref_time(80_040_174 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(204_244_000 as u64) - // Standard Error: 448_000 - .saturating_add(Weight::from_ref_time(429_399_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(250_254_000 as u64) + // Standard Error: 328_900 + .saturating_add(Weight::from_ref_time(424_144_552 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(516_945_000 as u64) - // Standard Error: 1_412_000 - .saturating_add(Weight::from_ref_time(162_098_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(52 as u64)) - .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(49 as u64)) - .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(377_849_000 as u64) + // Standard Error: 1_130_582 + .saturating_add(Weight::from_ref_time(188_240_273 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) + .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -720,12 +720,12 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(170_412_000 as u64) - // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(1_367_307_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(249_701_000 as u64) + // Standard Error: 449_896 + .saturating_add(Weight::from_ref_time(1_370_712_846 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) @@ -735,10 +735,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 9_269_000 - .saturating_add(Weight::from_ref_time(17_505_281_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) + Weight::from_ref_time(251_015_000 as u64) + // Standard Error: 7_081_378 + .saturating_add(Weight::from_ref_time(17_258_935_295 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((160 as u64).saturating_mul(r as u64))) @@ -750,11 +750,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 8_780_000 - .saturating_add(Weight::from_ref_time(17_368_867_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes((79 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(250_660_000 as u64) + // Standard Error: 6_402_989 + .saturating_add(Weight::from_ref_time(17_044_780_887 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((75 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -764,11 +766,11 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_076_579_000 as u64) - // Standard Error: 6_568_000 - .saturating_add(Weight::from_ref_time(1_158_818_000 as u64).saturating_mul(t as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(9_731_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_920_718_000 as u64) + // Standard Error: 11_123_062 + .saturating_add(Weight::from_ref_time(642_326_761 as u64).saturating_mul(t as u64)) + // Standard Error: 9_490 + .saturating_add(Weight::from_ref_time(8_845_752 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(163 as u64)) @@ -783,12 +785,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 24_125_000 - .saturating_add(Weight::from_ref_time(22_830_521_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) + Weight::from_ref_time(253_804_000 as u64) + // Standard Error: 19_742_198 + .saturating_add(Weight::from_ref_time(22_250_412_835 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) @@ -801,9 +803,9 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_739_440_000 as u64) - // Standard Error: 79_000 - .saturating_add(Weight::from_ref_time(126_148_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_921_591_000 as u64) + // Standard Error: 22_601 + .saturating_add(Weight::from_ref_time(125_945_348 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(247 as u64)) @@ -814,11 +816,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(241_753_000 as u64) - // Standard Error: 60_000 - .saturating_add(Weight::from_ref_time(55_067_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_842_000 as u64) + // Standard Error: 219_853 + .saturating_add(Weight::from_ref_time(58_013_100 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -829,9 +831,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(320_367_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(305_607_000 as u64) + // Standard Error: 71_234 + .saturating_add(Weight::from_ref_time(323_093_184 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -840,11 +842,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(239_849_000 as u64) - // Standard Error: 80_000 - .saturating_add(Weight::from_ref_time(67_626_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_351_000 as u64) + // Standard Error: 271_656 + .saturating_add(Weight::from_ref_time(74_344_000 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -855,9 +857,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 106_000 - .saturating_add(Weight::from_ref_time(247_771_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(315_626_000 as u64) + // Standard Error: 56_955 + .saturating_add(Weight::from_ref_time(246_316_261 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -866,11 +868,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_162_000 as u64) - // Standard Error: 58_000 - .saturating_add(Weight::from_ref_time(45_169_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_887_000 as u64) + // Standard Error: 286_822 + .saturating_add(Weight::from_ref_time(52_242_599 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -881,9 +883,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(97_479_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(294_818_000 as u64) + // Standard Error: 52_394 + .saturating_add(Weight::from_ref_time(96_353_967 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -892,11 +894,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(240_072_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(44_847_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_334_000 as u64) + // Standard Error: 303_979 + .saturating_add(Weight::from_ref_time(50_180_800 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -907,9 +909,9 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(97_432_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(288_995_000 as u64) + // Standard Error: 51_161 + .saturating_add(Weight::from_ref_time(96_331_905 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -918,11 +920,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(374_614_000 as u64) - // Standard Error: 634_000 - .saturating_add(Weight::from_ref_time(2_968_637_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_935_000 as u64) + // Standard Error: 662_188 + .saturating_add(Weight::from_ref_time(3_025_889_600 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -931,11 +933,11 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(249_022_000 as u64) - // Standard Error: 408_000 - .saturating_add(Weight::from_ref_time(2_062_013_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_094_000 as u64) + // Standard Error: 535_446 + .saturating_add(Weight::from_ref_time(2_086_979_500 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -947,317 +949,319 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_536_000 - .saturating_add(Weight::from_ref_time(1_099_219_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads((158 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes((158 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(248_254_000 as u64) + // Standard Error: 1_340_955 + .saturating_add(Weight::from_ref_time(1_065_939_603 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(6 as u64)) + .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes((150 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_276_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(933_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_705_000 as u64) + // Standard Error: 7_175 + .saturating_add(Weight::from_ref_time(983_586 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_309_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(2_977_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_659_000 as u64) + // Standard Error: 1_140 + .saturating_add(Weight::from_ref_time(3_004_307 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(71_165_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_686_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_621_000 as u64) + // Standard Error: 2_871 + .saturating_add(Weight::from_ref_time(2_715_451 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(69_872_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_374_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_459_000 as u64) + // Standard Error: 3_284 + .saturating_add(Weight::from_ref_time(2_581_283 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_891_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_629_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_558_000 as u64) + // Standard Error: 2_509 + .saturating_add(Weight::from_ref_time(2_906_689 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_747_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_639_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_407_000 as u64) + // Standard Error: 1_697 + .saturating_add(Weight::from_ref_time(1_702_534 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_262_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_477_000 as u64) + // Standard Error: 1_733 + .saturating_add(Weight::from_ref_time(2_192_641 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(68_808_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_342_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_424_000 as u64) + // Standard Error: 2_096 + .saturating_add(Weight::from_ref_time(2_435_708 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(73_245_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) + Weight::from_ref_time(72_883_000 as u64) + // Standard Error: 209 + .saturating_add(Weight::from_ref_time(6_874 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(71_308_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(7_333_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_422_000 as u64) + // Standard Error: 5_030 + .saturating_add(Weight::from_ref_time(7_638_087 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(83_967_000 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(9_205_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(83_136_000 as u64) + // Standard Error: 7_751 + .saturating_add(Weight::from_ref_time(9_543_944 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_600_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(546_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_281_000 as u64) + // Standard Error: 1_967 + .saturating_add(Weight::from_ref_time(596_591 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_449_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_052_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_702_000 as u64) + // Standard Error: 1_112 + .saturating_add(Weight::from_ref_time(1_094_685 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_326_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(998_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_720_000 as u64) + // Standard Error: 1_768 + .saturating_add(Weight::from_ref_time(1_045_163 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_525_000 as u64) + Weight::from_ref_time(69_716_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_467_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_541_622 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(73_703_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_495_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_315_000 as u64) + // Standard Error: 1_081 + .saturating_add(Weight::from_ref_time(1_646_437 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(73_578_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_546_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_256_000 as u64) + // Standard Error: 1_749 + .saturating_add(Weight::from_ref_time(1_664_891 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_379_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(934_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_616_000 as u64) + // Standard Error: 1_303 + .saturating_add(Weight::from_ref_time(1_017_254 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(71_069_000 as u64) - // Standard Error: 114_000 - .saturating_add(Weight::from_ref_time(182_540_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_943_000 as u64) + // Standard Error: 120_139 + .saturating_add(Weight::from_ref_time(183_262_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_188_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_953_000 as u64) + // Standard Error: 2_591 + .saturating_add(Weight::from_ref_time(1_458_584 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_970_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_366_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_439_000 as u64) + // Standard Error: 1_822 + .saturating_add(Weight::from_ref_time(1_486_431 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(70_352_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_366_000 as u64) + // Standard Error: 2_674 + .saturating_add(Weight::from_ref_time(1_506_755 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(70_229_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_354_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_386_000 as u64) + // Standard Error: 3_040 + .saturating_add(Weight::from_ref_time(1_509_928 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(70_202_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_432_000 as u64) + // Standard Error: 1_875 + .saturating_add(Weight::from_ref_time(1_439_336 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_065_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_488_000 as u64) + // Standard Error: 1_654 + .saturating_add(Weight::from_ref_time(1_429_318 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(70_252_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_414_000 as u64) + // Standard Error: 2_773 + .saturating_add(Weight::from_ref_time(1_499_116 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(70_049_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_823_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_427_000 as u64) + // Standard Error: 2_404 + .saturating_add(Weight::from_ref_time(1_969_697 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_519_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_815_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_793_000 as u64) + // Standard Error: 1_765 + .saturating_add(Weight::from_ref_time(1_924_169 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_953_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_834_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_551_000 as u64) + // Standard Error: 3_955 + .saturating_add(Weight::from_ref_time(1_988_825 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(70_299_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_818_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_510_000 as u64) + // Standard Error: 3_797 + .saturating_add(Weight::from_ref_time(1_976_317 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(70_141_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_399_000 as u64) + // Standard Error: 3_255 + .saturating_add(Weight::from_ref_time(1_975_054 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(70_209_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_827_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_425_000 as u64) + // Standard Error: 1_738 + .saturating_add(Weight::from_ref_time(1_959_864 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_980_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_514_000 as u64) + // Standard Error: 9_227 + .saturating_add(Weight::from_ref_time(1_984_750 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(70_022_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_415_000 as u64) + // Standard Error: 2_052 + .saturating_add(Weight::from_ref_time(1_967_146 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(70_030_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_348_000 as u64) + // Standard Error: 3_647 + .saturating_add(Weight::from_ref_time(1_996_141 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(70_170_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_833_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_448_000 as u64) + // Standard Error: 1_960 + .saturating_add(Weight::from_ref_time(1_956_889 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_895_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_443_000 as u64) + // Standard Error: 20_301 + .saturating_add(Weight::from_ref_time(2_072_285 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_932_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_830_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_458_000 as u64) + // Standard Error: 3_210 + .saturating_add(Weight::from_ref_time(1_970_367 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(70_091_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_429_000 as u64) + // Standard Error: 3_475 + .saturating_add(Weight::from_ref_time(1_977_800 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(70_025_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_556_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_414_000 as u64) + // Standard Error: 2_659 + .saturating_add(Weight::from_ref_time(2_797_503 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(71_910_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(2_290_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_460_000 as u64) + // Standard Error: 2_399 + .saturating_add(Weight::from_ref_time(2_646_574 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(70_268_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_550_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_441_000 as u64) + // Standard Error: 2_179 + .saturating_add(Weight::from_ref_time(2_860_283 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(70_126_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_340_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_401_000 as u64) + // Standard Error: 2_639 + .saturating_add(Weight::from_ref_time(2_582_137 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(70_381_000 as u64) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_426_000 as u64) + // Standard Error: 2_379 + .saturating_add(Weight::from_ref_time(1_986_578 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(70_095_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_599_000 as u64) + // Standard Error: 2_918 + .saturating_add(Weight::from_ref_time(1_991_834 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(70_471_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_142_000 as u64) + // Standard Error: 2_659 + .saturating_add(Weight::from_ref_time(1_948_956 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(70_302_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_407_000 as u64) + // Standard Error: 4_100 + .saturating_add(Weight::from_ref_time(2_030_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(70_097_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_534_000 as u64) + // Standard Error: 2_305 + .saturating_add(Weight::from_ref_time(1_989_346 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_166_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_845_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_432_000 as u64) + // Standard Error: 2_747 + .saturating_add(Weight::from_ref_time(2_002_548 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_630_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_879_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_465_000 as u64) + // Standard Error: 9_644 + .saturating_add(Weight::from_ref_time(2_045_830 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(70_101_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_861_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_435_000 as u64) + // Standard Error: 2_271 + .saturating_add(Weight::from_ref_time(2_001_986 as u64).saturating_mul(r as u64)) } } @@ -1265,15 +1269,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(3_089_000 as u64) + Weight::from_ref_time(3_108_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(13_917_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(901_000 as u64).saturating_mul(k as u64)) + Weight::from_ref_time(15_377_000 as u64) + // Standard Error: 414 + .saturating_add(Weight::from_ref_time(892_761 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -1281,19 +1285,18 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(14_172_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_301_000 as u64).saturating_mul(q as u64)) + Weight::from_ref_time(3_127_000 as u64) + // Standard Error: 3_813 + .saturating_add(Weight::from_ref_time(1_379_402 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(21_644_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(22_849_000 as u64) + // Standard Error: 26 + .saturating_add(Weight::from_ref_time(45_513 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1304,9 +1307,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(234_349_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(258_275_000 as u64) + // Standard Error: 30 + .saturating_add(Weight::from_ref_time(45_885 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1321,11 +1324,11 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(294_077_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(c as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(2_115_889_000 as u64) + // Standard Error: 323 + .saturating_add(Weight::from_ref_time(89_541 as u64).saturating_mul(c as u64)) + // Standard Error: 19 + .saturating_add(Weight::from_ref_time(664 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(9 as u64)) } @@ -1338,9 +1341,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(199_028_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(211_349_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_532 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } @@ -1350,7 +1353,7 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(176_247_000 as u64) + Weight::from_ref_time(181_771_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1360,9 +1363,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(54_917_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(46_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(55_917_000 as u64) + // Standard Error: 25 + .saturating_add(Weight::from_ref_time(46_148 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1371,7 +1374,7 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_611_000 as u64) + Weight::from_ref_time(37_966_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1379,7 +1382,7 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_121_000 as u64) + Weight::from_ref_time(40_963_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -1390,9 +1393,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(241_129_000 as u64) - // Standard Error: 54_000 - .saturating_add(Weight::from_ref_time(35_413_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_526_000 as u64) + // Standard Error: 29_505 + .saturating_add(Weight::from_ref_time(35_107_467 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1403,9 +1406,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(189_418_000 as u64) - // Standard Error: 419_000 - .saturating_add(Weight::from_ref_time(207_107_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(253_782_000 as u64) + // Standard Error: 256_004 + .saturating_add(Weight::from_ref_time(201_621_188 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1417,9 +1420,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(203_928_000 as u64) - // Standard Error: 439_000 - .saturating_add(Weight::from_ref_time(268_983_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_892_000 as u64) + // Standard Error: 234_750 + .saturating_add(Weight::from_ref_time(258_209_168 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1431,9 +1434,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(243_800_000 as u64) - // Standard Error: 40_000 - .saturating_add(Weight::from_ref_time(38_797_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_674_000 as u64) + // Standard Error: 23_962 + .saturating_add(Weight::from_ref_time(38_319_695 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1444,9 +1447,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(239_667_000 as u64) - // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(15_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_345_000 as u64) + // Standard Error: 15_429 + .saturating_add(Weight::from_ref_time(14_751_125 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1457,9 +1460,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(241_116_000 as u64) - // Standard Error: 41_000 - .saturating_add(Weight::from_ref_time(35_402_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_045_000 as u64) + // Standard Error: 25_949 + .saturating_add(Weight::from_ref_time(35_001_004 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1470,9 +1473,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(240_515_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_144_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_176_000 as u64) + // Standard Error: 24_500 + .saturating_add(Weight::from_ref_time(34_447_506 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1483,10 +1486,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(244_087_000 as u64) - // Standard Error: 87_000 - .saturating_add(Weight::from_ref_time(110_236_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_278_000 as u64) + // Standard Error: 48_613 + .saturating_add(Weight::from_ref_time(108_381_432 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1496,9 +1499,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(241_774_000 as u64) - // Standard Error: 50_000 - .saturating_add(Weight::from_ref_time(35_216_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_996_000 as u64) + // Standard Error: 24_585 + .saturating_add(Weight::from_ref_time(34_615_777 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1509,9 +1512,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(241_146_000 as u64) - // Standard Error: 54_000 - .saturating_add(Weight::from_ref_time(35_101_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_207_000 as u64) + // Standard Error: 30_691 + .saturating_add(Weight::from_ref_time(34_626_552 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1522,9 +1525,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(244_096_000 as u64) - // Standard Error: 55_000 - .saturating_add(Weight::from_ref_time(34_612_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_538_000 as u64) + // Standard Error: 27_128 + .saturating_add(Weight::from_ref_time(34_277_292 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1535,9 +1538,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(242_978_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(34_780_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_398_000 as u64) + // Standard Error: 28_375 + .saturating_add(Weight::from_ref_time(34_573_437 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1549,10 +1552,10 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(246_175_000 as u64) - // Standard Error: 86_000 - .saturating_add(Weight::from_ref_time(99_827_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_732_000 as u64) + // Standard Error: 42_671 + .saturating_add(Weight::from_ref_time(123_110_886 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1562,9 +1565,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(168_655_000 as u64) - // Standard Error: 16_000 - .saturating_add(Weight::from_ref_time(15_635_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(170_547_000 as u64) + // Standard Error: 9_163 + .saturating_add(Weight::from_ref_time(15_589_088 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1575,9 +1578,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(239_729_000 as u64) - // Standard Error: 52_000 - .saturating_add(Weight::from_ref_time(33_477_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_264_000 as u64) + // Standard Error: 23_913 + .saturating_add(Weight::from_ref_time(32_753_431 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1588,9 +1591,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(296_718_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(9_616_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(282_721_000 as u64) + // Standard Error: 1_978 + .saturating_add(Weight::from_ref_time(9_625_699 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1601,9 +1604,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(237_666_000 as u64) - // Standard Error: 497_000 - .saturating_add(Weight::from_ref_time(2_090_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_356_000 as u64) + // Standard Error: 184_002 + .saturating_add(Weight::from_ref_time(2_423_400 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1614,9 +1617,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(239_842_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(246_157_000 as u64) + // Standard Error: 324 + .saturating_add(Weight::from_ref_time(188_687 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1629,9 +1632,9 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(240_563_000 as u64) - // Standard Error: 519_000 - .saturating_add(Weight::from_ref_time(52_855_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_356_000 as u64) + // Standard Error: 232_552 + .saturating_add(Weight::from_ref_time(55_027_099 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1645,10 +1648,10 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(248_136_000 as u64) - // Standard Error: 94_000 - .saturating_add(Weight::from_ref_time(137_406_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_069_000 as u64) + // Standard Error: 59_538 + .saturating_add(Weight::from_ref_time(128_858_347 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1658,9 +1661,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(253_433_000 as u64) - // Standard Error: 105_000 - .saturating_add(Weight::from_ref_time(242_337_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(244_659_000 as u64) + // Standard Error: 70_677 + .saturating_add(Weight::from_ref_time(239_930_533 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1672,11 +1675,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(478_106_000 as u64) - // Standard Error: 557_000 - .saturating_add(Weight::from_ref_time(176_325_000 as u64).saturating_mul(t as u64)) - // Standard Error: 153_000 - .saturating_add(Weight::from_ref_time(67_413_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(1_198_350_000 as u64) + // Standard Error: 2_049_556 + .saturating_add(Weight::from_ref_time(70_072_141 as u64).saturating_mul(t as u64)) + // Standard Error: 496_378 + .saturating_add(Weight::from_ref_time(35_566_752 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1689,18 +1692,18 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(172_751_000 as u64) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(26_536_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(177_240_000 as u64) + // Standard Error: 18_991 + .saturating_add(Weight::from_ref_time(26_377_453 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(196_276_000 as u64) - // Standard Error: 428_000 - .saturating_add(Weight::from_ref_time(416_783_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_795_000 as u64) + // Standard Error: 349_627 + .saturating_add(Weight::from_ref_time(411_421_066 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1709,53 +1712,53 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(532_439_000 as u64) - // Standard Error: 1_323_000 - .saturating_add(Weight::from_ref_time(93_843_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(52 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(50 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(405_147_000 as u64) + // Standard Error: 1_074_466 + .saturating_add(Weight::from_ref_time(120_331_835 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) + .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(511_358_000 as u64) - // Standard Error: 1_144_000 - .saturating_add(Weight::from_ref_time(68_754_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(52 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(50 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(403_403_000 as u64) + // Standard Error: 890_083 + .saturating_add(Weight::from_ref_time(88_023_518 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) + .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(204_133_000 as u64) - // Standard Error: 498_000 - .saturating_add(Weight::from_ref_time(406_798_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(247_929_000 as u64) + // Standard Error: 311_780 + .saturating_add(Weight::from_ref_time(400_904_526 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(489_339_000 as u64) - // Standard Error: 1_269_000 - .saturating_add(Weight::from_ref_time(70_700_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(49 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(372_378_000 as u64) + // Standard Error: 1_007_061 + .saturating_add(Weight::from_ref_time(92_326_546 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(211_344_000 as u64) - // Standard Error: 399_000 - .saturating_add(Weight::from_ref_time(330_244_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(250_165_000 as u64) + // Standard Error: 300_205 + .saturating_add(Weight::from_ref_time(339_092_950 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1763,19 +1766,19 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(449_353_000 as u64) - // Standard Error: 1_027_000 - .saturating_add(Weight::from_ref_time(153_022_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(352_873_000 as u64) + // Standard Error: 908_425 + .saturating_add(Weight::from_ref_time(176_951_688 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(216_197_000 as u64) - // Standard Error: 341_000 - .saturating_add(Weight::from_ref_time(305_401_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_980_000 as u64) + // Standard Error: 295_923 + .saturating_add(Weight::from_ref_time(306_145_709 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1783,34 +1786,34 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(423_033_000 as u64) - // Standard Error: 878_000 - .saturating_add(Weight::from_ref_time(61_940_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(51 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(340_418_000 as u64) + // Standard Error: 749_537 + .saturating_add(Weight::from_ref_time(80_040_174 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(204_244_000 as u64) - // Standard Error: 448_000 - .saturating_add(Weight::from_ref_time(429_399_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(250_254_000 as u64) + // Standard Error: 328_900 + .saturating_add(Weight::from_ref_time(424_144_552 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(516_945_000 as u64) - // Standard Error: 1_412_000 - .saturating_add(Weight::from_ref_time(162_098_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(52 as u64)) - .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(49 as u64)) - .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) + Weight::from_ref_time(377_849_000 as u64) + // Standard Error: 1_130_582 + .saturating_add(Weight::from_ref_time(188_240_273 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) + .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1819,12 +1822,12 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(170_412_000 as u64) - // Standard Error: 761_000 - .saturating_add(Weight::from_ref_time(1_367_307_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(249_701_000 as u64) + // Standard Error: 449_896 + .saturating_add(Weight::from_ref_time(1_370_712_846 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) @@ -1834,10 +1837,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 9_269_000 - .saturating_add(Weight::from_ref_time(17_505_281_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) + Weight::from_ref_time(251_015_000 as u64) + // Standard Error: 7_081_378 + .saturating_add(Weight::from_ref_time(17_258_935_295 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((160 as u64).saturating_mul(r as u64))) @@ -1849,11 +1852,13 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 8_780_000 - .saturating_add(Weight::from_ref_time(17_368_867_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes((79 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(250_660_000 as u64) + // Standard Error: 6_402_989 + .saturating_add(Weight::from_ref_time(17_044_780_887 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((75 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:81 w:81) @@ -1863,11 +1868,11 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_076_579_000 as u64) - // Standard Error: 6_568_000 - .saturating_add(Weight::from_ref_time(1_158_818_000 as u64).saturating_mul(t as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(9_731_000 as u64).saturating_mul(c as u64)) + Weight::from_ref_time(11_920_718_000 as u64) + // Standard Error: 11_123_062 + .saturating_add(Weight::from_ref_time(642_326_761 as u64).saturating_mul(t as u64)) + // Standard Error: 9_490 + .saturating_add(Weight::from_ref_time(8_845_752 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(163 as u64)) @@ -1882,12 +1887,12 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 24_125_000 - .saturating_add(Weight::from_ref_time(22_830_521_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) + Weight::from_ref_time(253_804_000 as u64) + // Standard Error: 19_742_198 + .saturating_add(Weight::from_ref_time(22_250_412_835 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) @@ -1900,9 +1905,9 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_739_440_000 as u64) - // Standard Error: 79_000 - .saturating_add(Weight::from_ref_time(126_148_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(13_921_591_000 as u64) + // Standard Error: 22_601 + .saturating_add(Weight::from_ref_time(125_945_348 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(247 as u64)) @@ -1913,11 +1918,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(241_753_000 as u64) - // Standard Error: 60_000 - .saturating_add(Weight::from_ref_time(55_067_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_842_000 as u64) + // Standard Error: 219_853 + .saturating_add(Weight::from_ref_time(58_013_100 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1928,9 +1933,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(320_367_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(305_607_000 as u64) + // Standard Error: 71_234 + .saturating_add(Weight::from_ref_time(323_093_184 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1939,11 +1944,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(239_849_000 as u64) - // Standard Error: 80_000 - .saturating_add(Weight::from_ref_time(67_626_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(247_351_000 as u64) + // Standard Error: 271_656 + .saturating_add(Weight::from_ref_time(74_344_000 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1954,9 +1959,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 106_000 - .saturating_add(Weight::from_ref_time(247_771_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(315_626_000 as u64) + // Standard Error: 56_955 + .saturating_add(Weight::from_ref_time(246_316_261 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1965,11 +1970,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(242_162_000 as u64) - // Standard Error: 58_000 - .saturating_add(Weight::from_ref_time(45_169_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(246_887_000 as u64) + // Standard Error: 286_822 + .saturating_add(Weight::from_ref_time(52_242_599 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1980,9 +1985,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(97_479_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(294_818_000 as u64) + // Standard Error: 52_394 + .saturating_add(Weight::from_ref_time(96_353_967 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1991,11 +1996,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(240_072_000 as u64) - // Standard Error: 53_000 - .saturating_add(Weight::from_ref_time(44_847_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(245_334_000 as u64) + // Standard Error: 303_979 + .saturating_add(Weight::from_ref_time(50_180_800 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2006,9 +2011,9 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 95_000 - .saturating_add(Weight::from_ref_time(97_432_000 as u64).saturating_mul(n as u64)) + Weight::from_ref_time(288_995_000 as u64) + // Standard Error: 51_161 + .saturating_add(Weight::from_ref_time(96_331_905 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2017,11 +2022,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(374_614_000 as u64) - // Standard Error: 634_000 - .saturating_add(Weight::from_ref_time(2_968_637_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(249_935_000 as u64) + // Standard Error: 662_188 + .saturating_add(Weight::from_ref_time(3_025_889_600 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2030,11 +2035,11 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:1 w:0) // Storage: Timestamp Now (r:1 w:0) // Storage: System EventTopics (r:2 w:2) - /// The range of component `r` is `[0, 20]`. + /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(249_022_000 as u64) - // Standard Error: 408_000 - .saturating_add(Weight::from_ref_time(2_062_013_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(248_094_000 as u64) + // Standard Error: 535_446 + .saturating_add(Weight::from_ref_time(2_086_979_500 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2046,316 +2051,318 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_536_000 - .saturating_add(Weight::from_ref_time(1_099_219_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads((158 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes((158 as u64).saturating_mul(r as u64))) + Weight::from_ref_time(248_254_000 as u64) + // Standard Error: 1_340_955 + .saturating_add(Weight::from_ref_time(1_065_939_603 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(6 as u64)) + .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes((150 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_276_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(933_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_705_000 as u64) + // Standard Error: 7_175 + .saturating_add(Weight::from_ref_time(983_586 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(70_309_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(2_977_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_659_000 as u64) + // Standard Error: 1_140 + .saturating_add(Weight::from_ref_time(3_004_307 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(71_165_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_686_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_621_000 as u64) + // Standard Error: 2_871 + .saturating_add(Weight::from_ref_time(2_715_451 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(69_872_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_374_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_459_000 as u64) + // Standard Error: 3_284 + .saturating_add(Weight::from_ref_time(2_581_283 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_891_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_629_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_558_000 as u64) + // Standard Error: 2_509 + .saturating_add(Weight::from_ref_time(2_906_689 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_747_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_639_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_407_000 as u64) + // Standard Error: 1_697 + .saturating_add(Weight::from_ref_time(1_702_534 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_262_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_477_000 as u64) + // Standard Error: 1_733 + .saturating_add(Weight::from_ref_time(2_192_641 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(68_808_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_342_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_424_000 as u64) + // Standard Error: 2_096 + .saturating_add(Weight::from_ref_time(2_435_708 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(73_245_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(3_000 as u64).saturating_mul(e as u64)) + Weight::from_ref_time(72_883_000 as u64) + // Standard Error: 209 + .saturating_add(Weight::from_ref_time(6_874 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(71_308_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(7_333_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_422_000 as u64) + // Standard Error: 5_030 + .saturating_add(Weight::from_ref_time(7_638_087 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(83_967_000 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(9_205_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(83_136_000 as u64) + // Standard Error: 7_751 + .saturating_add(Weight::from_ref_time(9_543_944 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_600_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(546_000 as u64).saturating_mul(p as u64)) + Weight::from_ref_time(93_281_000 as u64) + // Standard Error: 1_967 + .saturating_add(Weight::from_ref_time(596_591 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(70_449_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_052_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_702_000 as u64) + // Standard Error: 1_112 + .saturating_add(Weight::from_ref_time(1_094_685 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(70_326_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(998_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_720_000 as u64) + // Standard Error: 1_768 + .saturating_add(Weight::from_ref_time(1_045_163 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(70_525_000 as u64) + Weight::from_ref_time(69_716_000 as u64) // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_467_000 as u64).saturating_mul(r as u64)) + .saturating_add(Weight::from_ref_time(1_541_622 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(73_703_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_495_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_315_000 as u64) + // Standard Error: 1_081 + .saturating_add(Weight::from_ref_time(1_646_437 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(73_578_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(1_546_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(72_256_000 as u64) + // Standard Error: 1_749 + .saturating_add(Weight::from_ref_time(1_664_891 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(70_379_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(934_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_616_000 as u64) + // Standard Error: 1_303 + .saturating_add(Weight::from_ref_time(1_017_254 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(71_069_000 as u64) - // Standard Error: 114_000 - .saturating_add(Weight::from_ref_time(182_540_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_943_000 as u64) + // Standard Error: 120_139 + .saturating_add(Weight::from_ref_time(183_262_000 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_188_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_953_000 as u64) + // Standard Error: 2_591 + .saturating_add(Weight::from_ref_time(1_458_584 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_970_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_366_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_439_000 as u64) + // Standard Error: 1_822 + .saturating_add(Weight::from_ref_time(1_486_431 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(70_352_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_366_000 as u64) + // Standard Error: 2_674 + .saturating_add(Weight::from_ref_time(1_506_755 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(70_229_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_354_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_386_000 as u64) + // Standard Error: 3_040 + .saturating_add(Weight::from_ref_time(1_509_928 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(70_202_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_355_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_432_000 as u64) + // Standard Error: 1_875 + .saturating_add(Weight::from_ref_time(1_439_336 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(70_065_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_358_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_488_000 as u64) + // Standard Error: 1_654 + .saturating_add(Weight::from_ref_time(1_429_318 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(70_252_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_356_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_414_000 as u64) + // Standard Error: 2_773 + .saturating_add(Weight::from_ref_time(1_499_116 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(70_049_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_823_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_427_000 as u64) + // Standard Error: 2_404 + .saturating_add(Weight::from_ref_time(1_969_697 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_519_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_815_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(70_793_000 as u64) + // Standard Error: 1_765 + .saturating_add(Weight::from_ref_time(1_924_169 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_953_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_834_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_551_000 as u64) + // Standard Error: 3_955 + .saturating_add(Weight::from_ref_time(1_988_825 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(70_299_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_818_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_510_000 as u64) + // Standard Error: 3_797 + .saturating_add(Weight::from_ref_time(1_976_317 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(70_141_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_399_000 as u64) + // Standard Error: 3_255 + .saturating_add(Weight::from_ref_time(1_975_054 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(70_209_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_827_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_425_000 as u64) + // Standard Error: 1_738 + .saturating_add(Weight::from_ref_time(1_959_864 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_980_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_831_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_514_000 as u64) + // Standard Error: 9_227 + .saturating_add(Weight::from_ref_time(1_984_750 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(70_022_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_829_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_415_000 as u64) + // Standard Error: 2_052 + .saturating_add(Weight::from_ref_time(1_967_146 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(70_030_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_348_000 as u64) + // Standard Error: 3_647 + .saturating_add(Weight::from_ref_time(1_996_141 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(70_170_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_833_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_448_000 as u64) + // Standard Error: 1_960 + .saturating_add(Weight::from_ref_time(1_956_889 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_895_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_826_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_443_000 as u64) + // Standard Error: 20_301 + .saturating_add(Weight::from_ref_time(2_072_285 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_932_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_830_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_458_000 as u64) + // Standard Error: 3_210 + .saturating_add(Weight::from_ref_time(1_970_367 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(70_091_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_825_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_429_000 as u64) + // Standard Error: 3_475 + .saturating_add(Weight::from_ref_time(1_977_800 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(70_025_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_556_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_414_000 as u64) + // Standard Error: 2_659 + .saturating_add(Weight::from_ref_time(2_797_503 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(71_910_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(2_290_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_460_000 as u64) + // Standard Error: 2_399 + .saturating_add(Weight::from_ref_time(2_646_574 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(70_268_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_550_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_441_000 as u64) + // Standard Error: 2_179 + .saturating_add(Weight::from_ref_time(2_860_283 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(70_126_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_340_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_401_000 as u64) + // Standard Error: 2_639 + .saturating_add(Weight::from_ref_time(2_582_137 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(70_381_000 as u64) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_426_000 as u64) + // Standard Error: 2_379 + .saturating_add(Weight::from_ref_time(1_986_578 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(70_095_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_844_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_599_000 as u64) + // Standard Error: 2_918 + .saturating_add(Weight::from_ref_time(1_991_834 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(70_471_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(1_836_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(71_142_000 as u64) + // Standard Error: 2_659 + .saturating_add(Weight::from_ref_time(1_948_956 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(70_302_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_841_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_407_000 as u64) + // Standard Error: 4_100 + .saturating_add(Weight::from_ref_time(2_030_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(70_097_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_850_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_534_000 as u64) + // Standard Error: 2_305 + .saturating_add(Weight::from_ref_time(1_989_346 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(70_166_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_845_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_432_000 as u64) + // Standard Error: 2_747 + .saturating_add(Weight::from_ref_time(2_002_548 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_630_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(1_879_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_465_000 as u64) + // Standard Error: 9_644 + .saturating_add(Weight::from_ref_time(2_045_830 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(70_101_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(1_861_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(69_435_000 as u64) + // Standard Error: 2_271 + .saturating_add(Weight::from_ref_time(2_001_986 as u64).saturating_mul(r as u64)) } } From f5ed51def33950ac24709eb11940790ca446421d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 20 Oct 2022 08:44:04 +0200 Subject: [PATCH 264/348] BlockId removal: refactor: Finalizer (#12528) * BlockId removal: refactor: Finalizer It changes the arguments of methods of `Finalizer` trait from: block: `BlockId` to: hash: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * minor corrections * failing test corrected * minor rework --- client/api/src/backend.rs | 4 +- client/beefy/src/tests.rs | 70 ++++++++----------- client/beefy/src/worker.rs | 8 +-- client/consensus/babe/src/tests.rs | 10 +-- .../manual-seal/src/finalize_block.rs | 4 +- client/finality-grandpa/src/environment.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 5 +- client/finality-grandpa/src/tests.rs | 14 +++- client/finality-grandpa/src/warp_proof.rs | 7 +- client/network/sync/src/lib.rs | 8 +-- client/network/test/src/lib.rs | 6 +- client/network/test/src/sync.rs | 42 ++++++----- client/rpc/src/chain/tests.rs | 8 ++- client/service/src/client/client.rs | 23 +++--- client/service/test/src/client/mod.rs | 12 ++-- test-utils/client/src/client_ext.rs | 8 +-- 16 files changed, 113 insertions(+), 118 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index f358385acd708..5b4acb0be8bda 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -252,7 +252,7 @@ pub trait Finalizer> { fn apply_finality( &self, operation: &mut ClientImportOperation, - id: BlockId, + block: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -272,7 +272,7 @@ pub trait Finalizer> { /// while performing major synchronization work. fn finalize_block( &self, - id: BlockId, + block: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 89be1cac4f886..d280e4f08a531 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -509,14 +509,10 @@ fn finalize_block_and_wait_for_beefy( let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); for block in finalize_targets { - let finalize = BlockId::number(*block); peers.clone().for_each(|(index, _)| { - net.lock() - .peer(index) - .client() - .as_client() - .finalize_block(finalize, None) - .unwrap(); + let client = net.lock().peer(index).client().as_client(); + let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); + client.finalize_block(&finalize, None).unwrap(); }) } @@ -604,9 +600,15 @@ fn lagging_validators() { ); // Alice finalizes #25, Bob lags behind - let finalize = BlockId::number(25); + let finalize = net + .lock() + .peer(0) + .client() + .as_client() + .expect_block_hash_from_id(&BlockId::number(25)) + .unwrap(); let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -614,7 +616,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #25 let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); @@ -628,8 +630,14 @@ fn lagging_validators() { // Alice finalizes session-boundary mandatory block #60, Bob lags behind let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - let finalize = BlockId::number(60); - net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); + let finalize = net + .lock() + .peer(0) + .client() + .as_client() + .expect_block_hash_from_id(&BlockId::number(60)) + .unwrap(); + net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -637,7 +645,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); - net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); @@ -681,24 +689,16 @@ fn correct_beefy_payload() { get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting - net.lock() + let hashof11 = net + .lock() .peer(0) .client() .as_client() - .finalize_block(BlockId::number(11), None) - .unwrap(); - net.lock() - .peer(1) - .client() - .as_client() - .finalize_block(BlockId::number(11), None) - .unwrap(); - net.lock() - .peer(3) - .client() - .as_client() - .finalize_block(BlockId::number(11), None) + .expect_block_hash_from_id(&BlockId::number(11)) .unwrap(); + net.lock().peer(0).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(3).client().as_client().finalize_block(&hashof11, None).unwrap(); // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); @@ -708,12 +708,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); - net.lock() - .peer(2) - .client() - .as_client() - .finalize_block(BlockId::number(11), None) - .unwrap(); + net.lock().peer(2).client().as_client().finalize_block(&hashof11, None).unwrap(); // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); @@ -923,12 +918,9 @@ fn on_demand_beefy_justification_sync() { let (dave_best_blocks, _) = get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); - net.lock() - .peer(dave_index) - .client() - .as_client() - .finalize_block(BlockId::number(1), None) - .unwrap(); + let client = net.lock().peer(dave_index).client().as_client(); + let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); + client.finalize_block(&hashof1, None).unwrap(); // Give Dave task some cpu cycles to process the finality notification, run_for(Duration::from_millis(100), &net, &mut runtime); // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 4381081f74ebd..6efebd131d6da 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1506,11 +1506,9 @@ pub(crate) mod tests { // push 15 blocks with `AuthorityChange` digests every 10 blocks net.generate_blocks_and_sync(15, 10, &validator_set, false); // finalize 13 without justifications - net.peer(0) - .client() - .as_client() - .finalize_block(BlockId::number(13), None) - .unwrap(); + let hashof13 = + backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); + net.peer(0).client().as_client().finalize_block(&hashof13, None).unwrap(); // Test initialization at session boundary. { diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 58f5e7b8eb6d4..24185dbce6795 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -820,7 +820,7 @@ fn revert_not_allowed_for_finalized() { let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3); // Finalize best block - client.finalize_block(BlockId::Hash(canon[2]), None, false).unwrap(); + client.finalize_block(&canon[2], None, false).unwrap(); // Revert canon chain to last finalized block revert(client.clone(), backend, 100).expect("revert should work for baked test scenario"); @@ -882,7 +882,7 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). - client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); + client.finalize_block(&canon_hashes[12], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); // at this point no hashes from the first fork must exist on the tree @@ -909,7 +909,7 @@ fn importing_epoch_change_block_prunes_tree() { .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); + client.finalize_block(&canon_hashes[24], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); // at this point no hashes from the second fork must exist on the tree @@ -1049,7 +1049,7 @@ fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork3_hashes, true)); // Finalize A3 - client.finalize_block(BlockId::Number(3), None, true).unwrap(); + client.finalize_block(&fork1_hashes[2], None, true).unwrap(); // Wiped: A1, A2 assert!(aux_data_check(&fork1_hashes[..2], false)); @@ -1060,7 +1060,7 @@ fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); - client.finalize_block(BlockId::Number(4), None, true).unwrap(); + client.finalize_block(&fork1_hashes[3], None, true).unwrap(); // Wiped: A3 assert!(aux_data_check(&fork1_hashes[2..3], false)); diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index d134ce7734571..e11353e2da611 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -20,7 +20,7 @@ use crate::rpc; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; +use sp_runtime::{traits::Block as BlockT, Justification}; use std::{marker::PhantomData, sync::Arc}; /// params for block finalization. @@ -46,7 +46,7 @@ where { let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; - match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { + match finalizer.finalize_block(&hash, justification, true) { Err(e) => { log::warn!("Failed to finalize block {}", e); rpc::send_result(&mut sender, Err(e.into())) diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 3d708a95f41cb..60720494a9f9a 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1352,7 +1352,7 @@ where // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. client - .apply_finality(import_op, BlockId::Hash(hash), persisted_justification, true) + .apply_finality(import_op, &hash, persisted_justification, true) .map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {}", (hash, number), e); e diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index e7578fa669463..a7042e26b1a71 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -309,7 +309,8 @@ mod tests { } for block in to_finalize { - client.finalize_block(BlockId::Number(*block), None).unwrap(); + let hash = blocks[*block as usize - 1].hash(); + client.finalize_block(&hash, None).unwrap(); } (client, backend, blocks) } @@ -489,7 +490,7 @@ mod tests { let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); client - .finalize_block(BlockId::Number(8), Some((ID, grandpa_just8.encode().clone()))) + .finalize_block(&block8.hash(), Some((ID, grandpa_just8.encode().clone()))) .unwrap(); // Authority set change at block 8, so the justification stored there will be used in the diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index b1e46be5cabde..c04754411af1a 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -1457,7 +1457,12 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 19 with block 21 being the best block - peer.client().finalize_block(BlockId::Number(19), None, false).unwrap(); + let hashof19 = peer + .client() + .as_client() + .expect_block_hash_from_id(&BlockId::Number(19)) + .unwrap(); + peer.client().finalize_block(&hashof19, None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1479,7 +1484,12 @@ fn grandpa_environment_respects_voting_rules() { ); // we finalize block 21 with block 21 being the best block - peer.client().finalize_block(BlockId::Number(21), None, false).unwrap(); + let hashof21 = peer + .client() + .as_client() + .expect_block_hash_from_id(&BlockId::Number(21)) + .unwrap(); + peer.client().finalize_block(&hashof21, None, false).unwrap(); // even though the default environment will always try to not vote on the // best block, there's a hard rule that we can't cast any votes lower than diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index a31a0a8b91908..10d02f790a0dc 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -327,7 +327,7 @@ mod tests { use sp_consensus::BlockOrigin; use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_keyring::Ed25519Keyring; - use sp_runtime::{generic::BlockId, traits::Header as _}; + use sp_runtime::traits::Header as _; use std::sync::Arc; use substrate_test_runtime_client::{ ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, @@ -412,10 +412,7 @@ mod tests { let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client - .finalize_block( - BlockId::Hash(target_hash), - Some((GRANDPA_ENGINE_ID, justification.encode())), - ) + .finalize_block(&target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) .unwrap(); authority_set_changes.push((current_set_id, n)); diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index e9c2b24b2ba95..76d7d624be523 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -3172,9 +3172,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client - .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) - .unwrap(); + client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); @@ -3303,9 +3301,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client - .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) - .unwrap(); + client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index d4bee77b54aff..5460cc7d52461 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -190,11 +190,11 @@ impl PeersClient { pub fn finalize_block( &self, - id: BlockId, + hash: &::Hash, justification: Option, notify: bool, ) -> ClientResult<()> { - self.client.finalize_block(id, justification, notify) + self.client.finalize_block(hash, justification, notify) } } @@ -1113,7 +1113,7 @@ impl JustificationImport for ForceFinalized { justification: Justification, ) -> Result<(), Self::Error> { self.0 - .finalize_block(BlockId::Hash(hash), Some(justification), true) + .finalize_block(&hash, Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification) } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 6115e0e3b2c86..399062a88d269 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -251,18 +251,22 @@ fn sync_justifications() { assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification + let backend = net.peer(0).client().as_backend(); + let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); + let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); + let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); let just = (*b"FRNK", Vec::new()); net.peer(0) .client() - .finalize_block(BlockId::Number(10), Some(just.clone()), true) + .finalize_block(&hashof10, Some(just.clone()), true) .unwrap(); net.peer(0) .client() - .finalize_block(BlockId::Number(15), Some(just.clone()), true) + .finalize_block(&hashof15, Some(just.clone()), true) .unwrap(); net.peer(0) .client() - .finalize_block(BlockId::Number(20), Some(just.clone()), true) + .finalize_block(&hashof20, Some(just.clone()), true) .unwrap(); let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); @@ -309,10 +313,7 @@ fn sync_justifications_across_forks() { net.block_until_sync(); let just = (*b"FRNK", Vec::new()); - net.peer(0) - .client() - .finalize_block(BlockId::Hash(f1_best), Some(just), true) - .unwrap(); + net.peer(0).client().finalize_block(&f1_best, Some(just), true).unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -655,14 +656,8 @@ fn can_sync_to_peers_with_wrong_common_block() { // both peers re-org to the same fork without notifying each other let just = Some((*b"FRNK", Vec::new())); - net.peer(0) - .client() - .finalize_block(BlockId::Hash(fork_hash), just.clone(), true) - .unwrap(); - net.peer(1) - .client() - .finalize_block(BlockId::Hash(fork_hash), just, true) - .unwrap(); + net.peer(0).client().finalize_block(&fork_hash, just.clone(), true).unwrap(); + net.peer(1).client().finalize_block(&fork_hash, just, true).unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); @@ -976,10 +971,17 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { assert_eq!(1, net.peer(0).num_peers()); } + let hashof10 = net + .peer(0) + .client() + .as_backend() + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(10)) + .unwrap(); // Finalize the block and make the justification available. net.peer(0) .client() - .finalize_block(BlockId::Number(10), Some((*b"FRNK", Vec::new())), true) + .finalize_block(&hashof10, Some((*b"FRNK", Vec::new())), true) .unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { @@ -1100,10 +1102,14 @@ fn syncs_state() { assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); let just = (*b"FRNK", Vec::new()); - net.peer(1) + let hashof60 = net + .peer(0) .client() - .finalize_block(BlockId::Number(60), Some(just), true) + .as_backend() + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(60)) .unwrap(); + net.peer(1).client().finalize_block(&hashof60, Some(just), true).unwrap(); // Wait for state sync. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 7d12458511cfd..a41d8e41b8fa7 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -198,6 +198,7 @@ async fn should_return_finalized_hash() { // import new block let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); // no finalization yet @@ -205,9 +206,9 @@ async fn should_return_finalized_hash() { assert_eq!(res, client.genesis_hash()); // finalize - client.finalize_block(BlockId::number(1), None).unwrap(); + client.finalize_block(&block_hash, None).unwrap(); let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); - assert_eq!(res, client.block_hash(1).unwrap().unwrap()); + assert_eq!(res, block_hash); } #[tokio::test] @@ -232,8 +233,9 @@ async fn test_head_subscription(method: &str) { let api = new_full(client.clone(), test_executor()).into_rpc(); let sub = api.subscribe(method, EmptyParams::new()).await.unwrap(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); - client.finalize_block(BlockId::number(1), None).unwrap(); + client.finalize_block(&block_hash, None).unwrap(); sub }; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a12b7177db47c..5d73ef4911dcb 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1845,29 +1845,22 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - id: BlockId, + hash: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { let last_best = self.backend.blockchain().info().best_hash; - let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.apply_finality_with_block_hash( - operation, - to_finalize_hash, - justification, - last_best, - notify, - ) + self.apply_finality_with_block_hash(operation, *hash, justification, last_best, notify) } fn finalize_block( &self, - id: BlockId, + hash: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { self.lock_import_and_run(|operation| { - self.apply_finality(operation, id, justification, notify) + self.apply_finality(operation, hash, justification, notify) }) } } @@ -1881,20 +1874,20 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - id: BlockId, + hash: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { - (**self).apply_finality(operation, id, justification, notify) + (**self).apply_finality(operation, hash, justification, notify) } fn finalize_block( &self, - id: BlockId, + hash: &Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { - (**self).finalize_block(id, justification, notify) + (**self).finalize_block(hash, justification, notify) } } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 4a5b56bd14006..9937c436a33ff 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -867,7 +867,7 @@ fn import_with_justification() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - client.finalize_block(BlockId::hash(a2.hash()), None).unwrap(); + client.finalize_block(&a2.hash(), None).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -996,7 +996,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. - ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); + ClientExt::finalize_block(&client, &b1.hash(), None).unwrap(); // B1 should now be the latest finalized assert_eq!(client.chain_info().finalized_hash, b1.hash()); @@ -1020,7 +1020,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { assert_eq!(client.chain_info().best_hash, b3.hash()); - ClientExt::finalize_block(&client, BlockId::Hash(b3.hash()), None).unwrap(); + ClientExt::finalize_block(&client, &b3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); @@ -1118,7 +1118,7 @@ fn finality_notifications_content() { // Postpone import to test behavior of import of finalized block. - ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); @@ -1274,7 +1274,7 @@ fn doesnt_import_blocks_that_revert_finality() { // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = @@ -1309,7 +1309,7 @@ fn doesnt_import_blocks_that_revert_finality() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - ClientExt::finalize_block(&client, BlockId::Hash(a3.hash()), None).unwrap(); + ClientExt::finalize_block(&client, &a3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index f2b99a5b355f0..dd416b9102fc0 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -22,14 +22,14 @@ use sc_client_api::{backend::Finalizer, client::BlockBackend}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_service::client::Client; use sp_consensus::{BlockOrigin, Error as ConsensusError}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification, Justifications}; +use sp_runtime::{traits::Block as BlockT, Justification, Justifications}; /// Extension trait for a test client. pub trait ClientExt: Sized { /// Finalize a block. fn finalize_block( &self, - id: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; @@ -75,10 +75,10 @@ where { fn finalize_block( &self, - id: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - Finalizer::finalize_block(self, id, justification, true) + Finalizer::finalize_block(self, hash, justification, true) } fn genesis_hash(&self) -> ::Hash { From 967080b44e0709ef48cf1096abe47ce731d7e670 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 20 Oct 2022 17:50:59 +0200 Subject: [PATCH 265/348] BlockId removal: refactor: BlockImportOperation+Bknd::finalize_block (#12535) * BlockId removal: refactor: BlockImportOperation+Bknd::finalize_block It changes the arguments of methods of `BlockImportOperation` trait from: block: `BlockId` to: hash: `&Block::Hash` `Backend::finalize_block` was also changed. This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * Review suggestion applied thx to @davxy * trigger CI job --- client/api/src/backend.rs | 8 +-- client/api/src/in_mem.rs | 35 +++++------ client/beefy/src/worker.rs | 6 +- client/db/src/lib.rs | 97 +++++++++++++++-------------- client/service/src/client/client.rs | 6 +- 5 files changed, 75 insertions(+), 77 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 5b4acb0be8bda..864a9af5685d8 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -216,13 +216,13 @@ pub trait BlockImportOperation { /// Mark a block as finalized. fn mark_finalized( &mut self, - id: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. - fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; + fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()>; /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) @@ -476,12 +476,12 @@ pub trait Backend: AuxStore + Send + Sync { transaction: Self::BlockImportOperation, ) -> sp_blockchain::Result<()>; - /// Finalize block with given Id. + /// Finalize block with given `hash`. /// /// This should only be called if the parent of the given block has been finalized. fn finalize_block( &self, - block: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 99efdd3bf1d22..1cb61ba1a0b0a 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -223,10 +223,10 @@ impl Blockchain { } /// Set an existing block as head. - pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { + pub fn set_head(&self, hash: Block::Hash) -> sp_blockchain::Result<()> { let header = self - .header(id)? - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))?; + .header(BlockId::Hash(hash))? + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash)))?; self.apply_head(&header) } @@ -271,21 +271,16 @@ impl Blockchain { fn finalize_header( &self, - id: BlockId, + block: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - let hash = match self.header(id)? { - Some(h) => h.hash(), - None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), - }; - let mut storage = self.storage.write(); - storage.finalized_hash = hash; + storage.finalized_hash = *block; if justification.is_some() { let block = storage .blocks - .get_mut(&hash) + .get_mut(block) .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { @@ -500,8 +495,8 @@ pub struct BlockImportOperation { new_state: Option<> as StateBackend>>::Transaction>, aux: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, + finalized_blocks: Vec<(Block::Hash, Option)>, + set_head: Option, } impl BlockImportOperation @@ -605,16 +600,16 @@ where fn mark_finalized( &mut self, - block: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - self.finalized_blocks.push((block, justification)); + self.finalized_blocks.push((*hash, justification)); Ok(()) } - fn mark_head(&mut self, block: BlockId) -> sp_blockchain::Result<()> { + fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(block); + self.set_head = Some(*hash); Ok(()) } @@ -710,7 +705,7 @@ where fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { - self.blockchain.finalize_header(block, justification)?; + self.blockchain.finalize_header(&block, justification)?; } } @@ -743,10 +738,10 @@ where fn finalize_block( &self, - block: BlockId, + hash: &Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - self.blockchain.finalize_header(block, justification) + self.blockchain.finalize_header(hash, justification) } fn append_justification( diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 6efebd131d6da..9b331b73ed093 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -1371,8 +1371,10 @@ pub(crate) mod tests { let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); net.peer(0).push_blocks(2, false); // finalize 1 and 2 without justifications - backend.finalize_block(BlockId::number(1), None).unwrap(); - backend.finalize_block(BlockId::number(2), None).unwrap(); + let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); + let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); + backend.finalize_block(&hashof1, None).unwrap(); + backend.finalize_block(&hashof2, None).unwrap(); let justif = create_finality_proof(2); // create new session at block #2 diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 1a80c16c4e59d..14ebafa01bec1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -756,8 +756,8 @@ pub struct BlockImportOperation { offchain_storage_updates: OffchainChangesCollection, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, + finalized_blocks: Vec<(Block::Hash, Option)>, + set_head: Option, commit_state: bool, index_ops: Vec, } @@ -897,16 +897,16 @@ impl sc_client_api::backend::BlockImportOperation fn mark_finalized( &mut self, - block: BlockId, + block: &Block::Hash, justification: Option, ) -> ClientResult<()> { - self.finalized_blocks.push((block, justification)); + self.finalized_blocks.push((*block, justification)); Ok(()) } - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { + fn mark_head(&mut self, hash: &Block::Hash) -> ClientResult<()> { assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(block); + self.set_head = Some(*hash); Ok(()) } @@ -1351,8 +1351,7 @@ impl Backend { (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap) }; - for (block, justification) in operation.finalized_blocks { - let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; + for (block_hash, justification) in operation.finalized_blocks { let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, @@ -1624,9 +1623,10 @@ impl Backend { }; if let Some(set_head) = operation.set_head { - if let Some(header) = - sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? - { + if let Some(header) = sc_client_api::blockchain::HeaderBackend::header( + &self.blockchain, + BlockId::Hash(set_head), + )? { let number = header.number(); let hash = header.hash(); @@ -1992,17 +1992,16 @@ impl sc_client_api::backend::Backend for Backend { fn finalize_block( &self, - block: BlockId, + hash: &Block::Hash, justification: Option, ) -> ClientResult<()> { let mut transaction = Transaction::new(); - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - let header = self.blockchain.expect_header(block)?; + let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; let mut displaced = None; let m = self.finalize_block_with_transaction( &mut transaction, - &hash, + hash, &header, None, justification, @@ -2605,13 +2604,12 @@ pub(crate) mod tests { header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) .unwrap(); db.commit_operation(op).unwrap(); - let hash = db.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); - let state = db.state_at(&hash).unwrap(); + let state = db.state_at(&header.hash()).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2665,7 +2663,7 @@ pub(crate) mod tests { hash }; - let hash = { + let hashof1 = { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); let mut header = Header { @@ -2702,12 +2700,12 @@ pub(crate) mod tests { hash }; - let hash = { + let hashof2 = { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); let mut header = Header { number: 2, - parent_hash: hash, + parent_hash: hashof1, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2736,12 +2734,12 @@ pub(crate) mod tests { hash }; - { + let hashof3 = { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); let mut header = Header { number: 3, - parent_hash: hash, + parent_hash: hashof2, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2754,6 +2752,7 @@ pub(crate) mod tests { .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) .0 .into(); + let hash = header.hash(); op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2764,11 +2763,12 @@ pub(crate) mod tests { .db .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) .is_none()); - } + hash + }; - backend.finalize_block(BlockId::Number(1), None).unwrap(); - backend.finalize_block(BlockId::Number(2), None).unwrap(); - backend.finalize_block(BlockId::Number(3), None).unwrap(); + backend.finalize_block(&hashof1, None).unwrap(); + backend.finalize_block(&hashof2, None).unwrap(); + backend.finalize_block(&hashof3, None).unwrap(); assert!(backend .storage .db @@ -2991,8 +2991,8 @@ pub(crate) mod tests { vec![block2_a, block2_b, block2_c, block1_c] ); - backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); - backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); + backend.finalize_block(&block1_a, None).unwrap(); + backend.finalize_block(&block2_a, None).unwrap(); // leaves at same height stay. Leaves at lower heights pruned. assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); @@ -3016,10 +3016,10 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let _ = insert_header(&backend, 1, block0, None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); - backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); + backend.finalize_block(&block1, justification.clone()).unwrap(); assert_eq!( backend.blockchain().justifications(BlockId::Number(1)).unwrap(), @@ -3034,10 +3034,10 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let _ = insert_header(&backend, 1, block0, None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block(BlockId::Number(1), Some(just0.clone().into())).unwrap(); + backend.finalize_block(&block1, Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); @@ -3071,15 +3071,15 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + op.mark_finalized(&block1, None).unwrap(); + op.mark_finalized(&block2, None).unwrap(); backend.commit_operation(op).unwrap(); } { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block3), None).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + op.mark_finalized(&block3, None).unwrap(); + op.mark_finalized(&block4, None).unwrap(); backend.commit_operation(op).unwrap(); } } @@ -3181,7 +3181,7 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + op.mark_finalized(&block2, None).unwrap(); backend.commit_operation(op).unwrap_err(); } } @@ -3210,7 +3210,7 @@ pub(crate) mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); for i in 1..5 { - op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + op.mark_finalized(&blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); } @@ -3245,7 +3245,7 @@ pub(crate) mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); for i in 1..3 { - op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + op.mark_finalized(&blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); @@ -3301,7 +3301,7 @@ pub(crate) mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); - op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(&blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); @@ -3310,7 +3310,7 @@ pub(crate) mod tests { for i in 1..5 { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap(); - op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } @@ -3370,13 +3370,13 @@ pub(crate) mod tests { .unwrap(); let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); - op.mark_head(BlockId::Hash(blocks[4])).unwrap(); + op.mark_head(&blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); - op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } @@ -3423,8 +3423,9 @@ pub(crate) mod tests { assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]); // Push one more blocks and make sure block is pruned and transaction index is cleared. - insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); - backend.finalize_block(BlockId::Number(1), None).unwrap(); + let block1 = + insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); + backend.finalize_block(&block1, None).unwrap(); assert_eq!(bc.body(BlockId::Number(0)).unwrap(), None); assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None); assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); @@ -3501,7 +3502,7 @@ pub(crate) mod tests { for i in 1..10 { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); - op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); + op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); if i < 6 { @@ -3676,7 +3677,7 @@ pub(crate) mod tests { let block1_a = insert_header(&backend, 1, block0, None, Default::default()); let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); + backend.finalize_block(&block1_a, None).unwrap(); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); // Insert a fork prior to finalization point. Leave should not be created. diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 5d73ef4911dcb..b18c6d226706b 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -878,17 +878,17 @@ where // plugable we cannot make a better choice here. usages that need // an accurate "best" block need to go through `SelectChain` // instead. - operation.op.mark_head(BlockId::Hash(block))?; + operation.op.mark_head(&block)?; } let enacted = route_from_finalized.enacted(); assert!(enacted.len() > 0); for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?; + operation.op.mark_finalized(&finalize_new.hash, None)?; } assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(BlockId::Hash(block), justification)?; + operation.op.mark_finalized(&block, justification)?; if notify { let finalized = From ab09d4471bae3873b949734998aca9f1aea3f835 Mon Sep 17 00:00:00 2001 From: Dmitrii Markin Date: Thu, 20 Oct 2022 19:30:01 +0300 Subject: [PATCH 266/348] Remove multiple DHTs support from `Discovery` (#12524) --- client/network/src/behaviour.rs | 21 +- client/network/src/discovery.rs | 682 ++++++++++---------------- client/network/src/service.rs | 32 +- client/network/src/service/metrics.rs | 35 +- 4 files changed, 297 insertions(+), 473 deletions(-) diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index fa130fb4baacd..2e646956e9d8c 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -34,7 +34,6 @@ use libp2p::{ use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ - config::ProtocolId, protocol::{ event::DhtEvent, role::{ObservedRole, Roles}, @@ -79,7 +78,7 @@ pub enum BehaviourOut { JustificationImport(RuntimeOrigin, B::Hash, NumberFor, Justifications), /// Started a random iterative Kademlia discovery query. - RandomKademliaStarted(Vec), + RandomKademliaStarted, /// We have received a request from a peer and answered it. /// @@ -267,25 +266,20 @@ where self.discovery.add_known_address(peer_id, addr) } - /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. + /// Returns the number of nodes in each Kademlia kbucket. /// - /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm - /// of their lower bound. - pub fn num_entries_per_kbucket( - &mut self, - ) -> impl ExactSizeIterator)> { + /// Identifies kbuckets by the base 2 logarithm of their lower bound. + pub fn num_entries_per_kbucket(&mut self) -> Option> { self.discovery.num_entries_per_kbucket() } /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + pub fn num_kademlia_records(&mut self) -> Option { self.discovery.num_kademlia_records() } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size( - &mut self, - ) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size(&mut self) -> Option { self.discovery.kademlia_records_total_size() } @@ -438,8 +432,7 @@ impl From for BehaviourOut { BehaviourOut::Dht(DhtEvent::ValuePut(key), duration), DiscoveryOut::ValuePutFailed(key, duration) => BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration), - DiscoveryOut::RandomKademliaStarted(protocols) => - BehaviourOut::RandomKademliaStarted(protocols), + DiscoveryOut::RandomKademliaStarted => BehaviourOut::RandomKademliaStarted, } } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index da4dec70b29ab..712c3af97f58e 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -66,11 +66,12 @@ use libp2p::{ mdns::{Mdns, MdnsConfig, MdnsEvent}, multiaddr::Protocol, swarm::{ - handler::multi::IntoMultiHandler, ConnectionHandler, DialError, IntoConnectionHandler, - NetworkBehaviour, NetworkBehaviourAction, PollParameters, + behaviour::toggle::{Toggle, ToggleIntoConnectionHandler}, + ConnectionHandler, DialError, IntoConnectionHandler, NetworkBehaviour, + NetworkBehaviourAction, PollParameters, }, }; -use log::{debug, error, info, trace, warn}; +use log::{debug, info, trace, warn}; use sc_network_common::{config::ProtocolId, utils::LruHashSet}; use sp_core::hexdisplay::HexDisplay; use std::{ @@ -89,8 +90,8 @@ const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; /// `DiscoveryBehaviour` configuration. /// -/// Note: In order to discover nodes or load and store values via Kademlia one has to add at least -/// one protocol via [`DiscoveryConfig::add_protocol`]. +/// Note: In order to discover nodes or load and store values via Kademlia one has to add +/// Kademlia protocol via [`DiscoveryConfig::with_kademlia`]. pub struct DiscoveryConfig { local_peer_id: PeerId, permanent_addresses: Vec<(PeerId, Multiaddr)>, @@ -100,7 +101,7 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - protocol_ids: HashSet, + kademlia_protocol_id: Option, } impl DiscoveryConfig { @@ -115,7 +116,7 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - protocol_ids: HashSet::new(), + kademlia_protocol_id: None, } } @@ -160,13 +161,8 @@ impl DiscoveryConfig { } /// Add discovery via Kademlia for the given protocol. - pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { - if self.protocol_ids.contains(&id) { - warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); - return self - } - - self.protocol_ids.insert(id); + pub fn with_kademlia(&mut self, id: ProtocolId) -> &mut Self { + self.kademlia_protocol_id = Some(id); self } @@ -189,37 +185,34 @@ impl DiscoveryConfig { discovery_only_if_under_num, enable_mdns, kademlia_disjoint_query_paths, - protocol_ids, + kademlia_protocol_id, } = self; - let kademlias = protocol_ids - .into_iter() - .map(|protocol_id| { - let proto_name = protocol_name_from_protocol_id(&protocol_id); + let kademlia = kademlia_protocol_id.map(|protocol_id| { + let proto_name = protocol_name_from_protocol_id(&protocol_id); - let mut config = KademliaConfig::default(); - config.set_protocol_names(std::iter::once(proto_name.into()).collect()); - // By default Kademlia attempts to insert all peers into its routing table once a - // dialing attempt succeeds. In order to control which peer is added, disable the - // auto-insertion and instead add peers manually. - config.set_kbucket_inserts(KademliaBucketInserts::Manual); - config.disjoint_query_paths(kademlia_disjoint_query_paths); + let mut config = KademliaConfig::default(); + config.set_protocol_names(std::iter::once(proto_name.into()).collect()); + // By default Kademlia attempts to insert all peers into its routing table once a + // dialing attempt succeeds. In order to control which peer is added, disable the + // auto-insertion and instead add peers manually. + config.set_kbucket_inserts(KademliaBucketInserts::Manual); + config.disjoint_query_paths(kademlia_disjoint_query_paths); - let store = MemoryStore::new(local_peer_id); - let mut kad = Kademlia::with_config(local_peer_id, store, config); + let store = MemoryStore::new(local_peer_id); + let mut kad = Kademlia::with_config(local_peer_id, store, config); - for (peer_id, addr) in &permanent_addresses { - kad.add_address(peer_id, addr.clone()); - } + for (peer_id, addr) in &permanent_addresses { + kad.add_address(peer_id, addr.clone()); + } - (protocol_id, kad) - }) - .collect(); + kad + }); DiscoveryBehaviour { permanent_addresses, ephemeral_addresses: HashMap::new(), - kademlias, + kademlia: Toggle::from(kademlia), next_kad_random_query: if dht_random_walk { Some(Delay::new(Duration::new(0, 0))) } else { @@ -259,8 +252,9 @@ pub struct DiscoveryBehaviour { /// Same as `permanent_addresses`, except that addresses that fail to reach a peer are /// removed. ephemeral_addresses: HashMap>, - /// Kademlia requests and answers. - kademlias: HashMap>, + /// Kademlia requests and answers. Even though it's wrapped in `Toggle`, currently + /// it's always enabled in `NetworkWorker::new()`. + kademlia: Toggle>, /// Discovers nodes on the local network. mdns: Option, /// Stream that fires when we need to perform the next random Kademlia query. `None` if @@ -289,7 +283,7 @@ impl DiscoveryBehaviour { /// Returns the list of nodes that we know exist in the network. pub fn known_peers(&mut self) -> HashSet { let mut peers = HashSet::new(); - for k in self.kademlias.values_mut() { + if let Some(k) = self.kademlia.as_mut() { for b in k.kbuckets() { for e in b.iter() { if !peers.contains(e.node.key.preimage()) { @@ -309,7 +303,7 @@ impl DiscoveryBehaviour { pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { let addrs_list = self.ephemeral_addresses.entry(peer_id).or_default(); if !addrs_list.iter().any(|a| *a == addr) { - for k in self.kademlias.values_mut() { + if let Some(k) = self.kademlia.as_mut() { k.add_address(&peer_id, addr.clone()); } @@ -318,8 +312,8 @@ impl DiscoveryBehaviour { } } - /// Add a self-reported address of a remote peer to the k-buckets of the supported - /// DHTs (`supported_protocols`). + /// Add a self-reported address of a remote peer to the k-buckets of the DHT + /// if it has compatible `supported_protocols`. /// /// **Note**: It is important that you call this method. The discovery mechanism will not /// automatically add connecting peers to the Kademlia k-buckets. @@ -329,13 +323,15 @@ impl DiscoveryBehaviour { supported_protocols: &[impl AsRef<[u8]>], addr: Multiaddr, ) { - if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { - trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); - return - } + if let Some(kademlia) = self.kademlia.as_mut() { + if !self.allow_non_globals_in_dht && !Self::can_add_to_dht(&addr) { + trace!( + target: "sub-libp2p", + "Ignoring self-reported non-global address {} from {}.", addr, peer_id + ); + return + } - let mut added = false; - for kademlia in self.kademlias.values_mut() { if let Some(matching_protocol) = supported_protocols .iter() .find(|p| kademlia.protocol_names().iter().any(|k| k.as_ref() == p.as_ref())) @@ -346,24 +342,21 @@ impl DiscoveryBehaviour { addr, peer_id, String::from_utf8_lossy(matching_protocol.as_ref()), ); kademlia.add_address(peer_id, addr.clone()); - added = true; + } else { + trace!( + target: "sub-libp2p", + "Ignoring self-reported address {} from {} as remote node is not part of the \ + Kademlia DHT supported by the local node.", addr, peer_id, + ); } } - - if !added { - trace!( - target: "sub-libp2p", - "Ignoring self-reported address {} from {} as remote node is not part of any \ - Kademlia DHTs supported by the local node.", addr, peer_id, - ); - } } /// Start fetching a record from the DHT. /// /// A corresponding `ValueFound` or `ValueNotFound` event will later be generated. pub fn get_value(&mut self, key: record::Key) { - for k in self.kademlias.values_mut() { + if let Some(k) = self.kademlia.as_mut() { k.get_record(key.clone(), Quorum::One); } } @@ -373,7 +366,7 @@ impl DiscoveryBehaviour { /// /// A corresponding `ValuePut` or `ValuePutFailed` event will later be generated. pub fn put_value(&mut self, key: record::Key, value: Vec) { - for k in self.kademlias.values_mut() { + if let Some(k) = self.kademlia.as_mut() { if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); self.pending_events @@ -386,37 +379,27 @@ impl DiscoveryBehaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket( - &mut self, - ) -> impl ExactSizeIterator)> { - self.kademlias.iter_mut().map(|(id, kad)| { - let buckets = kad - .kbuckets() + pub fn num_entries_per_kbucket(&mut self) -> Option> { + self.kademlia.as_mut().map(|kad| { + kad.kbuckets() .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) - .collect(); - (id, buckets) + .collect() }) } /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + pub fn num_kademlia_records(&mut self) -> Option { // Note that this code is ok only because we use a `MemoryStore`. - self.kademlias.iter_mut().map(|(id, kad)| { - let num = kad.store_mut().records().count(); - (id, num) - }) + self.kademlia.as_mut().map(|kad| kad.store_mut().records().count()) } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size( - &mut self, - ) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size(&mut self) -> Option { // Note that this code is ok only because we use a `MemoryStore`. If the records were // for example stored on disk, this would load every single one of them every single time. - self.kademlias.iter_mut().map(|(id, kad)| { - let size = kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len()); - (id, size) - }) + self.kademlia + .as_mut() + .map(|kad| kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len())) } /// Can the given `Multiaddr` be put into the DHT? @@ -425,7 +408,7 @@ impl DiscoveryBehaviour { // NB: Currently all DNS names are allowed and no check for TLD suffixes is done // because the set of valid domains is highly dynamic and would require frequent // updates, for example by utilising publicsuffix.org or IANA. - pub fn can_add_to_dht(&self, addr: &Multiaddr) -> bool { + pub fn can_add_to_dht(addr: &Multiaddr) -> bool { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), @@ -435,29 +418,6 @@ impl DiscoveryBehaviour { }; ip.is_global() } - - fn new_handler_with_replacement( - &mut self, - pid: ProtocolId, - handler: KademliaHandlerProto, - ) -> ::ConnectionHandler { - let mut handlers: HashMap<_, _> = self - .kademlias - .iter_mut() - .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))) - .collect(); - - if let Some(h) = handlers.get_mut(&pid) { - *h = handler - } - - IntoMultiHandler::try_from_iter(handlers).expect( - "There can be at most one handler per `ProtocolId` and protocol names contain the \ - `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ - only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ - qed", - ) - } } /// Event generated by the `DiscoveryBehaviour`. @@ -498,28 +458,18 @@ pub enum DiscoveryOut { /// Returning the corresponding key as well as the request duration. ValuePutFailed(record::Key, Duration), - /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. + /// Started a random Kademlia query. /// /// Only happens if [`DiscoveryConfig::with_dht_random_walk`] has been configured to `true`. - RandomKademliaStarted(Vec), + RandomKademliaStarted, } impl NetworkBehaviour for DiscoveryBehaviour { - type ConnectionHandler = IntoMultiHandler>; + type ConnectionHandler = ToggleIntoConnectionHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ConnectionHandler { - let iter = self - .kademlias - .iter_mut() - .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - - IntoMultiHandler::try_from_iter(iter).expect( - "There can be at most one handler per `ProtocolId` and protocol names contain the \ - `ProtocolId` so no two protocol names in `self.kademlias` can be equal which is the \ - only error `try_from_iter` can return, therefore this call is guaranteed to succeed; \ - qed", - ) + self.kademlia.new_handler() } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { @@ -534,10 +484,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } { - let mut list_to_filter = Vec::new(); - for k in self.kademlias.values_mut() { - list_to_filter.extend(k.addresses_of_peer(peer_id)) - } + let mut list_to_filter = self.kademlia.addresses_of_peer(peer_id); if let Some(ref mut mdns) = self.mdns { list_to_filter.extend(mdns.addresses_of_peer(peer_id)); @@ -566,9 +513,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { old: &ConnectedPoint, new: &ConnectedPoint, ) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_address_change(k, peer_id, connection_id, old, new); - } + self.kademlia.inject_address_change(peer_id, connection_id, old, new) } fn inject_connection_established( @@ -580,16 +525,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { other_established: usize, ) { self.num_connections += 1; - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connection_established( - k, - peer_id, - conn, - endpoint, - failed_addresses, - other_established, - ) - } + self.kademlia.inject_connection_established( + peer_id, + conn, + endpoint, + failed_addresses, + other_established, + ) } fn inject_connection_closed( @@ -601,23 +543,19 @@ impl NetworkBehaviour for DiscoveryBehaviour { remaining_established: usize, ) { self.num_connections -= 1; - for (pid, event) in handler.into_iter() { - if let Some(kad) = self.kademlias.get_mut(&pid) { - kad.inject_connection_closed(peer_id, conn, endpoint, event, remaining_established) - } else { - error!( - target: "sub-libp2p", - "inject_connection_closed: no kademlia instance registered for protocol {:?}", - pid, - ) - } - } + self.kademlia.inject_connection_closed( + peer_id, + conn, + endpoint, + handler, + remaining_established, + ) } fn inject_dial_failure( &mut self, peer_id: Option, - _: Self::ConnectionHandler, + handler: Self::ConnectionHandler, error: &DialError, ) { if let Some(peer_id) = peer_id { @@ -630,32 +568,22 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - for k in self.kademlias.values_mut() { - let handler = k.new_handler(); - NetworkBehaviour::inject_dial_failure(k, peer_id, handler, error); - } + self.kademlia.inject_dial_failure(peer_id, handler, error) } fn inject_event( &mut self, peer_id: PeerId, connection: ConnectionId, - (pid, event): <::Handler as ConnectionHandler>::OutEvent, + event: <::Handler as ConnectionHandler>::OutEvent, ) { - if let Some(kad) = self.kademlias.get_mut(&pid) { - return kad.inject_event(peer_id, connection, event) - } - error!( - target: "sub-libp2p", - "inject_node_event: no kademlia instance registered for protocol {:?}", - pid, - ) + self.kademlia.inject_event(peer_id, connection, event) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.into())); - if self.can_add_to_dht(addr) { + if Self::can_add_to_dht(addr) { // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. if self.known_external_addresses.insert(new_addr.clone()) { @@ -667,36 +595,26 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_external_addr(k, addr) - } + self.kademlia.inject_new_external_addr(addr) } fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { // We intentionally don't remove the element from `known_external_addresses` in order // to not print the log line again. - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_expired_external_addr(k, addr) - } + self.kademlia.inject_expired_external_addr(addr) } fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(k, id, addr) - } + self.kademlia.inject_expired_listen_addr(id, addr) } fn inject_new_listener(&mut self, id: ListenerId) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_listener(k, id) - } + self.kademlia.inject_new_listener(id) } fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_listen_addr(k, id, addr) - } + self.kademlia.inject_new_listen_addr(id, addr) } fn inject_listen_failure(&mut self, _: &Multiaddr, _: &Multiaddr, _: Self::ConnectionHandler) { @@ -704,15 +622,11 @@ impl NetworkBehaviour for DiscoveryBehaviour { } fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_listener_error(k, id, err) - } + self.kademlia.inject_listener_error(id, err) } fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_listener_closed(k, id, reason) - } + self.kademlia.inject_listener_closed(id, reason) } fn poll( @@ -726,198 +640,189 @@ impl NetworkBehaviour for DiscoveryBehaviour { } // Poll the stream that fires when we need to start a random Kademlia query. - if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { - while next_kad_random_query.poll_unpin(cx).is_ready() { - let actually_started = if self.num_connections < self.discovery_only_if_under_num { - let random_peer_id = PeerId::random(); - debug!( - target: "sub-libp2p", - "Libp2p <= Starting random Kademlia request for {:?}", - random_peer_id, - ); - for k in self.kademlias.values_mut() { - k.get_closest_peers(random_peer_id); + if let Some(kademlia) = self.kademlia.as_mut() { + if let Some(next_kad_random_query) = self.next_kad_random_query.as_mut() { + while next_kad_random_query.poll_unpin(cx).is_ready() { + let actually_started = + if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!( + target: "sub-libp2p", + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id, + ); + kademlia.get_closest_peers(random_peer_id); + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + *next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } - true - } else { - debug!( - target: "sub-libp2p", - "Kademlia paused due to high number of connections ({})", - self.num_connections - ); - false - }; - - // Schedule the next random query with exponentially increasing delay, - // capped at 60 seconds. - *next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = - cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); - - if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted( - self.kademlias.keys().cloned().collect(), - ); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } } } - // Poll Kademlias. - for (pid, kademlia) in &mut self.kademlias { - while let Poll::Ready(ev) = kademlia.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(ev) => match ev { - KademliaEvent::RoutingUpdated { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::UnroutablePeer { peer, .. } => { - let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::RoutablePeer { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::PendingRoutablePeer { .. } | - KademliaEvent::InboundRequest { .. } => { - // We are not interested in this event at the moment. + while let Poll::Ready(ev) = self.kademlia.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(ev) => match ev { + KademliaEvent::RoutingUpdated { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::UnroutablePeer { peer, .. } => { + let ev = DiscoveryOut::UnroutablePeer(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::RoutablePeer { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::PendingRoutablePeer { .. } | + KademliaEvent::InboundRequest { .. } => { + // We are not interested in this event at the moment. + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetClosestPeers(res), + .. + } => match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { + debug!( + target: "sub-libp2p", + "Libp2p => Query for {:?} timed out with {} results", + HexDisplay::from(&key), peers.len(), + ); }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::GetClosestPeers(res), - .. - } => match res { - Err(GetClosestPeersError::Timeout { key, peers }) => { + Ok(ok) => { + trace!( + target: "sub-libp2p", + "Libp2p => Query for {:?} yielded {:?} results", + HexDisplay::from(&ok.key), ok.peers.len(), + ); + if ok.peers.is_empty() && self.num_connections != 0 { debug!( target: "sub-libp2p", - "Libp2p => Query for {:?} timed out with {} results", - HexDisplay::from(&key), peers.len(), + "Libp2p => Random Kademlia query has yielded empty results", ); - }, + } + }, + }, + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::GetRecord(res), + stats, + .. + } => { + let ev = match res { Ok(ok) => { + let results = ok + .records + .into_iter() + .map(|r| (r.record.key, r.record.value)) + .collect(); + + DiscoveryOut::ValueFound( + results, + stats.duration().unwrap_or_default(), + ) + }, + Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { trace!( target: "sub-libp2p", - "Libp2p => Query for {:?} yielded {:?} results", - HexDisplay::from(&ok.key), ok.peers.len(), + "Libp2p => Failed to get record: {:?}", + e, ); - if ok.peers.is_empty() && self.num_connections != 0 { - debug!( - target: "sub-libp2p", - "Libp2p => Random Kademlia query has yielded empty results", - ); - } + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_default(), + ) }, - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::GetRecord(res), - stats, - .. - } => { - let ev = match res { - Ok(ok) => { - let results = ok - .records - .into_iter() - .map(|r| (r.record.key, r.record.value)) - .collect(); - - DiscoveryOut::ValueFound( - results, - stats.duration().unwrap_or_default(), - ) - }, - Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { - trace!( - target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", - e, - ); - DiscoveryOut::ValueNotFound( - e.into_key(), - stats.duration().unwrap_or_default(), - ) - }, - Err(e) => { - debug!( - target: "sub-libp2p", - "Libp2p => Failed to get record: {:?}", - e, - ); - DiscoveryOut::ValueNotFound( - e.into_key(), - stats.duration().unwrap_or_default(), - ) - }, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::PutRecord(res), - stats, - .. - } => { - let ev = match res { - Ok(ok) => DiscoveryOut::ValuePut( - ok.key, + Err(e) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", + e, + ); + DiscoveryOut::ValueNotFound( + e.into_key(), stats.duration().unwrap_or_default(), - ), - Err(e) => { - debug!( - target: "sub-libp2p", - "Libp2p => Failed to put record: {:?}", - e, - ); - DiscoveryOut::ValuePutFailed( - e.into_key(), - stats.duration().unwrap_or_default(), - ) - }, - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) - }, - KademliaEvent::OutboundQueryCompleted { - result: QueryResult::RepublishRecord(res), - .. - } => match res { - Ok(ok) => debug!( - target: "sub-libp2p", - "Libp2p => Record republished: {:?}", - ok.key, - ), - Err(e) => debug!( - target: "sub-libp2p", - "Libp2p => Republishing of record {:?} failed with: {:?}", - e.key(), e, - ), - }, - // We never start any other type of query. - KademliaEvent::OutboundQueryCompleted { result: e, .. } => { - warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) - }, + ) + }, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, - NetworkBehaviourAction::Dial { opts, handler } => { - let pid = pid.clone(); - let handler = self.new_handler_with_replacement(pid, handler); - return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }) + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::PutRecord(res), + stats, + .. + } => { + let ev = match res { + Ok(ok) => + DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_default()), + Err(e) => { + debug!( + target: "sub-libp2p", + "Libp2p => Failed to put record: {:?}", + e, + ); + DiscoveryOut::ValuePutFailed( + e.into_key(), + stats.duration().unwrap_or_default(), + ) + }, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) }, - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event: (pid.clone(), event), - }), - NetworkBehaviourAction::ReportObservedAddr { address, score } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, - score, - }), - NetworkBehaviourAction::CloseConnection { peer_id, connection } => - return Poll::Ready(NetworkBehaviourAction::CloseConnection { - peer_id, - connection, - }), - } + KademliaEvent::OutboundQueryCompleted { + result: QueryResult::RepublishRecord(res), + .. + } => match res { + Ok(ok) => debug!( + target: "sub-libp2p", + "Libp2p => Record republished: {:?}", + ok.key, + ), + Err(e) => debug!( + target: "sub-libp2p", + "Libp2p => Republishing of record {:?} failed with: {:?}", + e.key(), e, + ), + }, + // We never start any other type of query. + KademliaEvent::OutboundQueryCompleted { result: e, .. } => { + warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) + }, + }, + NetworkBehaviourAction::Dial { opts, handler } => + return Poll::Ready(NetworkBehaviourAction::Dial { opts, handler }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), + NetworkBehaviourAction::CloseConnection { peer_id, connection } => + return Poll::Ready(NetworkBehaviourAction::CloseConnection { + peer_id, + connection, + }), } } @@ -1014,7 +919,7 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .add_protocol(protocol_id.clone()); + .with_kademlia(protocol_id.clone()); config.finish() }; @@ -1078,7 +983,7 @@ mod tests { to_discover[swarm_n].remove(&other); }, - DiscoveryOut::RandomKademliaStarted(_) => {}, + DiscoveryOut::RandomKademliaStarted => {}, e => { panic!("Unexpected event: {:?}", e) }, @@ -1117,7 +1022,7 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .add_protocol(supported_protocol_id.clone()); + .with_kademlia(supported_protocol_id.clone()); config.finish() }; @@ -1131,7 +1036,8 @@ mod tests { remote_addr.clone(), ); - for kademlia in discovery.kademlias.values_mut() { + { + let kademlia = discovery.kademlia.as_mut().unwrap(); assert!( kademlia .kbucket(remote_peer_id) @@ -1148,66 +1054,14 @@ mod tests { remote_addr.clone(), ); - for kademlia in discovery.kademlias.values_mut() { - assert_eq!( - 1, - kademlia - .kbucket(remote_peer_id) - .expect("Remote peer id not to be equal to local peer id.") - .num_entries(), - "Expect peer with supported protocol to be added." - ); - } - } - - #[test] - fn discovery_adds_peer_to_kademlia_of_same_protocol_only() { - let protocol_a = ProtocolId::from("a"); - let protocol_b = ProtocolId::from("b"); - - let mut discovery = { - let keypair = Keypair::generate_ed25519(); - let mut config = DiscoveryConfig::new(keypair.public()); - config - .allow_private_ipv4(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .add_protocol(protocol_a.clone()) - .add_protocol(protocol_b.clone()); - config.finish() - }; - - let remote_peer_id = PeerId::random(); - let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - // Add remote peer with `protocol_a` only. - discovery.add_self_reported_address( - &remote_peer_id, - &[protocol_name_from_protocol_id(&protocol_a)], - remote_addr.clone(), - ); - + let kademlia = discovery.kademlia.as_mut().unwrap(); assert_eq!( 1, - discovery - .kademlias - .get_mut(&protocol_a) - .expect("Kademlia instance to exist.") + kademlia .kbucket(remote_peer_id) .expect("Remote peer id not to be equal to local peer id.") .num_entries(), - "Expected remote peer to be added to `protocol_a` Kademlia instance.", - ); - - assert!( - discovery - .kademlias - .get_mut(&protocol_b) - .expect("Kademlia instance to exist.") - .kbucket(remote_peer_id) - .expect("Remote peer id not to be equal to local peer id.") - .is_empty(), - "Expected remote peer not to be added to `protocol_b` Kademlia instance.", + "Expect peer with supported protocol to be added." ); } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index f95e67df142b0..d24a1543c56fe 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -284,7 +284,7 @@ where config.discovery_limit( u64::from(params.network_config.default_peers_set.out_peers) + 15, ); - config.add_protocol(params.protocol_id.clone()); + config.with_kademlia(params.protocol_id.clone()); config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); config.use_kademlia_disjoint_query_paths( @@ -1665,16 +1665,9 @@ where .user_protocol_mut() .add_default_set_discovered_nodes(iter::once(peer_id)); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( - protocols, - ))) => + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => if let Some(metrics) = this.metrics.as_ref() { - for protocol in protocols { - metrics - .kademlia_random_queries_total - .with_label_values(&[protocol.as_ref()]) - .inc(); - } + metrics.kademlia_random_queries_total.inc(); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, @@ -2015,28 +2008,21 @@ where this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - for (proto, buckets) in this.network_service.behaviour_mut().num_entries_per_kbucket() { + if let Some(buckets) = this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { metrics .kbuckets_num_nodes - .with_label_values(&[proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) + .with_label_values(&[&lower_ilog2_bucket_bound.to_string()]) .set(num_entries as u64); } } - for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() - { - metrics - .kademlia_records_count - .with_label_values(&[proto.as_ref()]) - .set(num_entries as u64); + if let Some(num_entries) = this.network_service.behaviour_mut().num_kademlia_records() { + metrics.kademlia_records_count.set(num_entries as u64); } - for (proto, num_entries) in + if let Some(num_entries) = this.network_service.behaviour_mut().kademlia_records_total_size() { - metrics - .kademlia_records_sizes_total - .with_label_values(&[proto.as_ref()]) - .set(num_entries as u64); + metrics.kademlia_records_sizes_total.set(num_entries as u64); } metrics .peerset_num_discovered diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs index 4b63df00b8d66..db1b6f7f6500d 100644 --- a/client/network/src/service/metrics.rs +++ b/client/network/src/service/metrics.rs @@ -59,9 +59,9 @@ pub struct Metrics { pub incoming_connections_total: Counter, pub issued_light_requests: Counter, pub kademlia_query_duration: HistogramVec, - pub kademlia_random_queries_total: CounterVec, - pub kademlia_records_count: GaugeVec, - pub kademlia_records_sizes_total: GaugeVec, + pub kademlia_random_queries_total: Counter, + pub kademlia_records_count: Gauge, + pub kademlia_records_sizes_total: Gauge, pub kbuckets_num_nodes: GaugeVec, pub listeners_local_addresses: Gauge, pub listeners_errors_total: Counter, @@ -138,33 +138,24 @@ impl Metrics { }, &["type"] )?, registry)?, - kademlia_random_queries_total: prometheus::register(CounterVec::new( - Opts::new( - "substrate_sub_libp2p_kademlia_random_queries_total", - "Number of random Kademlia queries started" - ), - &["protocol"] + kademlia_random_queries_total: prometheus::register(Counter::new( + "substrate_sub_libp2p_kademlia_random_queries_total", + "Number of random Kademlia queries started", )?, registry)?, - kademlia_records_count: prometheus::register(GaugeVec::new( - Opts::new( - "substrate_sub_libp2p_kademlia_records_count", - "Number of records in the Kademlia records store" - ), - &["protocol"] + kademlia_records_count: prometheus::register(Gauge::new( + "substrate_sub_libp2p_kademlia_records_count", + "Number of records in the Kademlia records store", )?, registry)?, - kademlia_records_sizes_total: prometheus::register(GaugeVec::new( - Opts::new( - "substrate_sub_libp2p_kademlia_records_sizes_total", - "Total size of all the records in the Kademlia records store" - ), - &["protocol"] + kademlia_records_sizes_total: prometheus::register(Gauge::new( + "substrate_sub_libp2p_kademlia_records_sizes_total", + "Total size of all the records in the Kademlia records store", )?, registry)?, kbuckets_num_nodes: prometheus::register(GaugeVec::new( Opts::new( "substrate_sub_libp2p_kbuckets_num_nodes", "Number of nodes per kbucket per Kademlia instance" ), - &["protocol", "lower_ilog2_bucket_bound"] + &["lower_ilog2_bucket_bound"] )?, registry)?, listeners_local_addresses: prometheus::register(Gauge::new( "substrate_sub_libp2p_listeners_local_addresses", From 460b64ba83473e218e39b266876f1693ae179231 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 20 Oct 2022 20:57:00 +0200 Subject: [PATCH 267/348] CI: Enable debug assertions in Wasmer sandbox test (#12540) * CI: Enable debug assertions in Wasmer sandbox test Signed-off-by: Oliver Tale-Yazdi * Fix feature dependant import Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/support/test/tests/construct_runtime_ui.rs | 6 ++---- scripts/ci/gitlab/pipeline/test.yml | 4 ++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index 38aa780766835..42fd87ca95c0e 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -15,19 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::env; - #[rustversion::attr(not(stable), ignore)] #[cfg(not(feature = "disable-ui-tests"))] #[test] fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. - if env::var("RUN_UI_TESTS").is_err() { + if std::env::var("RUN_UI_TESTS").is_err() { return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("SKIP_WASM_BUILD", "1"); + std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); t.compile_fail("tests/construct_runtime_ui/*.rs"); diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index e85ab85afe6ed..b405cfa1eb3a4 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -394,6 +394,10 @@ test-wasmer-sandbox: - .docker-env - .test-refs-wasmer-sandbox variables: + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" CI_JOB_NAME: "test-wasmer-sandbox" parallel: 3 script: From 1125011be165fca564fb78c8a50124537a1f4d0e Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 21 Oct 2022 05:07:52 +0900 Subject: [PATCH 268/348] Force base weights to be the minimum only when the intercept is negative (#12482) * Force base weights to be the minimum only when the intercept is negative; emit minimum execution times * Add an `assert` making sure the intercept is zero when it's supposed to be zero * Fix template Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_assets * ".git/.scripts/bench-bot.sh" pallet dev pallet_uniques Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> --- .maintain/frame-weight-template.hbs | 2 + frame/assets/src/weights.rs | 213 +++++++++++------- frame/benchmarking/src/analysis.rs | 62 +++-- frame/uniques/src/weights.rs | 199 ++++++++++------ .../benchmarking-cli/src/pallet/template.hbs | 1 + .../benchmarking-cli/src/pallet/writer.rs | 3 + 6 files changed, 314 insertions(+), 166 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index b0244fbdab85b..96731770ff2ea 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -64,6 +64,7 @@ impl WeightInfo for SubstrateWeight { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { + // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} @@ -99,6 +100,7 @@ impl WeightInfo for () { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { + // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index cba78f12c218c..c260da70e3480 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -18,22 +18,25 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_assets // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_assets +// --chain=dev // --output=./frame/assets/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -74,13 +77,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - Weight::from_ref_time(27_167_000 as u64) + // Minimum execution time: 32_200 nanoseconds. + Weight::from_ref_time(32_739_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - Weight::from_ref_time(15_473_000 as u64) + // Minimum execution time: 19_192 nanoseconds. + Weight::from_ref_time(19_615_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -89,14 +94,16 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:5000 w:5000) // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) + /// The range of component `c` is `[0, 5000]`. + /// The range of component `s` is `[0, 5000]`. + /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(c as u64)) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as u64).saturating_mul(s as u64)) - // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as u64).saturating_mul(a as u64)) + // Minimum execution time: 75_299_531 nanoseconds. + Weight::from_ref_time(75_567_795_000 as u64) + // Standard Error: 126_181 + .saturating_add(Weight::from_ref_time(8_378_363 as u64).saturating_mul(c as u64)) + // Standard Error: 126_181 + .saturating_add(Weight::from_ref_time(10_950_076 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) @@ -109,14 +116,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - Weight::from_ref_time(30_819_000 as u64) + // Minimum execution time: 35_066 nanoseconds. + Weight::from_ref_time(36_217_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - Weight::from_ref_time(35_212_000 as u64) + // Minimum execution time: 44_915 nanoseconds. + Weight::from_ref_time(45_807_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -124,7 +133,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(47_401_000 as u64) + // Minimum execution time: 57_245 nanoseconds. + Weight::from_ref_time(58_179_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -132,7 +142,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(42_300_000 as u64) + // Minimum execution time: 47_028 nanoseconds. + Weight::from_ref_time(47_571_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -140,93 +151,108 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(47_946_000 as u64) + // Minimum execution time: 57_069 nanoseconds. + Weight::from_ref_time(58_245_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(21_670_000 as u64) + // Minimum execution time: 26_823 nanoseconds. + Weight::from_ref_time(27_689_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(21_503_000 as u64) + // Minimum execution time: 26_984 nanoseconds. + Weight::from_ref_time(27_591_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - Weight::from_ref_time(18_158_000 as u64) + // Minimum execution time: 23_803 nanoseconds. + Weight::from_ref_time(24_269_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - Weight::from_ref_time(18_525_000 as u64) + // Minimum execution time: 22_977 nanoseconds. + Weight::from_ref_time(23_527_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - Weight::from_ref_time(19_858_000 as u64) + // Minimum execution time: 23_815 nanoseconds. + Weight::from_ref_time(24_597_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(18_045_000 as u64) + // Minimum execution time: 22_691 nanoseconds. + Weight::from_ref_time(23_173_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - fn set_metadata(n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_395_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(n as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(_n: u32, _s: u32, ) -> Weight { + // Minimum execution time: 40_675 nanoseconds. + Weight::from_ref_time(42_869_113 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(32_893_000 as u64) + // Minimum execution time: 42_361 nanoseconds. + Weight::from_ref_time(42_912_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(19_586_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(s as u64)) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Minimum execution time: 22_998 nanoseconds. + Weight::from_ref_time(23_844_675 as u64) + // Standard Error: 506 + .saturating_add(Weight::from_ref_time(2_874 as u64).saturating_mul(n as u64)) + // Standard Error: 506 + .saturating_add(Weight::from_ref_time(3_817 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - Weight::from_ref_time(32_478_000 as u64) + // Minimum execution time: 42_305 nanoseconds. + Weight::from_ref_time(42_678_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - Weight::from_ref_time(17_143_000 as u64) + // Minimum execution time: 22_052 nanoseconds. + Weight::from_ref_time(22_610_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(36_389_000 as u64) + // Minimum execution time: 44_900 nanoseconds. + Weight::from_ref_time(46_032_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -235,21 +261,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - Weight::from_ref_time(61_854_000 as u64) + // Minimum execution time: 72_261 nanoseconds. + Weight::from_ref_time(73_186_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(36_759_000 as u64) + // Minimum execution time: 47_268 nanoseconds. + Weight::from_ref_time(47_712_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - Weight::from_ref_time(37_753_000 as u64) + // Minimum execution time: 47_363 nanoseconds. + Weight::from_ref_time(48_696_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -259,13 +288,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - Weight::from_ref_time(27_167_000 as u64) + // Minimum execution time: 32_200 nanoseconds. + Weight::from_ref_time(32_739_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - Weight::from_ref_time(15_473_000 as u64) + // Minimum execution time: 19_192 nanoseconds. + Weight::from_ref_time(19_615_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -274,14 +305,16 @@ impl WeightInfo for () { // Storage: System Account (r:5000 w:5000) // Storage: Assets Metadata (r:1 w:0) // Storage: Assets Approvals (r:501 w:500) + /// The range of component `c` is `[0, 5000]`. + /// The range of component `s` is `[0, 5000]`. + /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(17_145_000 as u64).saturating_mul(c as u64)) - // Standard Error: 37_000 - .saturating_add(Weight::from_ref_time(19_333_000 as u64).saturating_mul(s as u64)) - // Standard Error: 375_000 - .saturating_add(Weight::from_ref_time(17_046_000 as u64).saturating_mul(a as u64)) + // Minimum execution time: 75_299_531 nanoseconds. + Weight::from_ref_time(75_567_795_000 as u64) + // Standard Error: 126_181 + .saturating_add(Weight::from_ref_time(8_378_363 as u64).saturating_mul(c as u64)) + // Standard Error: 126_181 + .saturating_add(Weight::from_ref_time(10_950_076 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) @@ -294,14 +327,16 @@ impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - Weight::from_ref_time(30_819_000 as u64) + // Minimum execution time: 35_066 nanoseconds. + Weight::from_ref_time(36_217_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - Weight::from_ref_time(35_212_000 as u64) + // Minimum execution time: 44_915 nanoseconds. + Weight::from_ref_time(45_807_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -309,7 +344,8 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(47_401_000 as u64) + // Minimum execution time: 57_245 nanoseconds. + Weight::from_ref_time(58_179_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -317,7 +353,8 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(42_300_000 as u64) + // Minimum execution time: 47_028 nanoseconds. + Weight::from_ref_time(47_571_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -325,93 +362,108 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(47_946_000 as u64) + // Minimum execution time: 57_069 nanoseconds. + Weight::from_ref_time(58_245_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(21_670_000 as u64) + // Minimum execution time: 26_823 nanoseconds. + Weight::from_ref_time(27_689_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(21_503_000 as u64) + // Minimum execution time: 26_984 nanoseconds. + Weight::from_ref_time(27_591_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - Weight::from_ref_time(18_158_000 as u64) + // Minimum execution time: 23_803 nanoseconds. + Weight::from_ref_time(24_269_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - Weight::from_ref_time(18_525_000 as u64) + // Minimum execution time: 22_977 nanoseconds. + Weight::from_ref_time(23_527_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - Weight::from_ref_time(19_858_000 as u64) + // Minimum execution time: 23_815 nanoseconds. + Weight::from_ref_time(24_597_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(18_045_000 as u64) + // Minimum execution time: 22_691 nanoseconds. + Weight::from_ref_time(23_173_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - fn set_metadata(n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_395_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(n as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn set_metadata(_n: u32, _s: u32, ) -> Weight { + // Minimum execution time: 40_675 nanoseconds. + Weight::from_ref_time(42_869_113 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(32_893_000 as u64) + // Minimum execution time: 42_361 nanoseconds. + Weight::from_ref_time(42_912_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - Weight::from_ref_time(19_586_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(s as u64)) + /// The range of component `n` is `[0, 50]`. + /// The range of component `s` is `[0, 50]`. + fn force_set_metadata(n: u32, s: u32, ) -> Weight { + // Minimum execution time: 22_998 nanoseconds. + Weight::from_ref_time(23_844_675 as u64) + // Standard Error: 506 + .saturating_add(Weight::from_ref_time(2_874 as u64).saturating_mul(n as u64)) + // Standard Error: 506 + .saturating_add(Weight::from_ref_time(3_817 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - Weight::from_ref_time(32_478_000 as u64) + // Minimum execution time: 42_305 nanoseconds. + Weight::from_ref_time(42_678_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - Weight::from_ref_time(17_143_000 as u64) + // Minimum execution time: 22_052 nanoseconds. + Weight::from_ref_time(22_610_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(36_389_000 as u64) + // Minimum execution time: 44_900 nanoseconds. + Weight::from_ref_time(46_032_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -420,21 +472,24 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - Weight::from_ref_time(61_854_000 as u64) + // Minimum execution time: 72_261 nanoseconds. + Weight::from_ref_time(73_186_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(36_759_000 as u64) + // Minimum execution time: 47_268 nanoseconds. + Weight::from_ref_time(47_712_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - Weight::from_ref_time(37_753_000 as u64) + // Minimum execution time: 47_363 nanoseconds. + Weight::from_ref_time(48_696_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index a736cdc203182..db8ee599ef731 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -26,6 +26,7 @@ pub struct Analysis { pub names: Vec, pub value_dists: Option, u128, u128)>>, pub errors: Option>, + pub minimum: u128, selector: BenchmarkSelector, } @@ -49,7 +50,10 @@ fn mul_1000_into_u128(value: f64) -> u128 { impl BenchmarkSelector { fn scale_and_cast_weight(self, value: f64, round_up: bool) -> u128 { if let BenchmarkSelector::ExtrinsicTime = self { - mul_1000_into_u128(value) + // We add a very slight bias here to counteract the numerical imprecision of the linear + // regression where due to rounding issues it can emit a number like `2999999.999999998` + // which we most certainly always want to round up instead of truncating. + mul_1000_into_u128(value + 0.000_000_005) } else { if round_up { (value + 0.5) as u128 @@ -74,6 +78,24 @@ impl BenchmarkSelector { value } } + + fn get_value(self, result: &BenchmarkResult) -> u128 { + match self { + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), + } + } + + fn get_minimum(self, results: &[BenchmarkResult]) -> u128 { + results + .iter() + .map(|result| self.get_value(result)) + .min() + .expect("results cannot be empty") + } } #[derive(Debug)] @@ -109,8 +131,8 @@ impl TryFrom> for AnalysisChoice { } fn raw_linear_regression( - xs: Vec, - ys: Vec, + xs: &[f64], + ys: &[f64], x_vars: usize, with_intercept: bool, ) -> Option<(f64, Vec, Vec)> { @@ -147,6 +169,14 @@ fn linear_regression( mut ys: Vec, x_vars: usize, ) -> Option<(f64, Vec, Vec)> { + let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, true)?; + if intercept > 0.0 { + return Some((intercept, params, errors[1..].to_vec())) + } + + // The intercept is negative. + // The weights must be always positive, so we can't have that. + let mut min = ys[0]; for &value in &ys { if value < min { @@ -158,8 +188,9 @@ fn linear_regression( *value -= min; } - let (intercept, params, errors) = raw_linear_regression(xs, ys, x_vars, false)?; - Some((intercept + min, params, errors[1..].to_vec())) + let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, false)?; + assert!(intercept.abs() <= 0.0001); + Some((min, params, errors[1..].to_vec())) } impl Analysis { @@ -190,6 +221,7 @@ impl Analysis { names: Vec::new(), value_dists: None, errors: None, + minimum: selector.get_minimum(&r), selector, }) } @@ -289,6 +321,7 @@ impl Analysis { names: results.into_iter().map(|x| x.0).collect::>(), value_dists: None, errors: None, + minimum: selector.get_minimum(&r), selector, }) } @@ -361,6 +394,7 @@ impl Analysis { .map(|value| selector.scale_and_cast_weight(value, false)) .collect(), ), + minimum: selector.get_minimum(&r), selector, }) } @@ -392,8 +426,9 @@ impl Analysis { let names = median_slopes.names; let value_dists = min_squares.value_dists; let errors = min_squares.errors; + let minimum = selector.get_minimum(&r); - Some(Self { base, slopes, names, value_dists, errors, selector }) + Some(Self { base, slopes, names, value_dists, errors, selector, minimum }) } } @@ -533,8 +568,7 @@ mod tests { 16.0, 17.0, 18.0, 19.0, 20.0, ]; - let (intercept, params, errors) = - raw_linear_regression(xs.clone(), ys.clone(), 1, true).unwrap(); + let (intercept, params, errors) = raw_linear_regression(&xs, &ys, 1, true).unwrap(); assert_eq!(intercept as i64, -2712997); assert_eq!(params.len(), 1); assert_eq!(params[0] as i64, 34444926); @@ -688,15 +722,15 @@ mod tests { let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 11_500_000_000); - assert_eq!(extrinsic_time.slopes, vec![630635838, 23699421]); + assert_eq!(extrinsic_time.base, 10_000_000_000); + assert_eq!(extrinsic_time.slopes, vec![1000000000, 100000000]); let reads = Analysis::min_squares_iqr(&data, BenchmarkSelector::Reads).unwrap(); - assert_eq!(reads.base, 3); + assert_eq!(reads.base, 2); assert_eq!(reads.slopes, vec![1, 0]); let writes = Analysis::min_squares_iqr(&data, BenchmarkSelector::Writes).unwrap(); - assert_eq!(writes.base, 2); + assert_eq!(writes.base, 0); assert_eq!(writes.slopes, vec![0, 2]); } @@ -711,7 +745,7 @@ mod tests { let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); - assert_eq!(extrinsic_time.base, 2_000_000_000); - assert_eq!(extrinsic_time.slopes, vec![4_000_000_000]); + assert_eq!(extrinsic_time.base, 3_000_000_000); + assert_eq!(extrinsic_time.slopes, vec![3_000_000_000]); } } diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index c7a8a2f6baa0f..a134b68d093b6 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-07-13, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `test-bench-bot`, CPU: `Intel(R) Xeon(R) CPU @ 3.10GHz` +//! DATE: 2022-10-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet // --steps=50 @@ -32,6 +32,7 @@ // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json // --pallet=pallet_uniques // --chain=dev // --output=./frame/uniques/src/weights.rs @@ -80,14 +81,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as u64) + // Minimum execution time: 32_901 nanoseconds. + Weight::from_ref_time(33_628_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as u64) + // Minimum execution time: 21_633 nanoseconds. + Weight::from_ref_time(22_380_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -103,13 +106,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + // Minimum execution time: 2_429_508 nanoseconds. + Weight::from_ref_time(2_446_263_000 as u64) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(8_477_090 as u64).saturating_mul(n as u64)) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(314_268 as u64).saturating_mul(m as u64)) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(249_624 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -122,7 +126,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as u64) + // Minimum execution time: 41_938 nanoseconds. + Weight::from_ref_time(42_814_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -131,7 +136,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as u64) + // Minimum execution time: 43_593 nanoseconds. + Weight::from_ref_time(44_420_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -140,17 +146,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as u64) + // Minimum execution time: 34_037 nanoseconds. + Weight::from_ref_time(34_848_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:100 w:100) + // Storage: Uniques Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 23_295 nanoseconds. + Weight::from_ref_time(23_482_000 as u64) + // Standard Error: 9_490 + .saturating_add(Weight::from_ref_time(10_899_320 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -159,26 +167,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as u64) + // Minimum execution time: 27_005 nanoseconds. + Weight::from_ref_time(27_344_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as u64) + // Minimum execution time: 26_815 nanoseconds. + Weight::from_ref_time(27_400_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as u64) + // Minimum execution time: 21_988 nanoseconds. + Weight::from_ref_time(22_596_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as u64) + // Minimum execution time: 21_886 nanoseconds. + Weight::from_ref_time(22_617_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -186,20 +198,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as u64) + // Minimum execution time: 30_241 nanoseconds. + Weight::from_ref_time(30_677_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as u64) + // Minimum execution time: 23_011 nanoseconds. + Weight::from_ref_time(23_796_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as u64) + // Minimum execution time: 25_739 nanoseconds. + Weight::from_ref_time(26_572_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -207,7 +222,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as u64) + // Minimum execution time: 48_486 nanoseconds. + Weight::from_ref_time(49_029_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -215,69 +231,79 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as u64) + // Minimum execution time: 47_408 nanoseconds. + Weight::from_ref_time(48_753_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as u64) + // Minimum execution time: 40_085 nanoseconds. + Weight::from_ref_time(40_625_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as u64) + // Minimum execution time: 40_546 nanoseconds. + Weight::from_ref_time(41_113_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as u64) + // Minimum execution time: 39_902 nanoseconds. + Weight::from_ref_time(40_599_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as u64) + // Minimum execution time: 37_597 nanoseconds. + Weight::from_ref_time(37_964_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as u64) + // Minimum execution time: 28_719 nanoseconds. + Weight::from_ref_time(29_213_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as u64) + // Minimum execution time: 27_608 nanoseconds. + Weight::from_ref_time(28_096_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as u64) + // Minimum execution time: 25_568 nanoseconds. + Weight::from_ref_time(26_098_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as u64) + // Minimum execution time: 24_521 nanoseconds. + Weight::from_ref_time(25_062_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as u64) + // Minimum execution time: 25_337 nanoseconds. + Weight::from_ref_time(25_902_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -286,7 +312,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as u64) + // Minimum execution time: 46_736 nanoseconds. + Weight::from_ref_time(47_264_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -297,14 +324,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(33_075_000 as u64) + // Minimum execution time: 32_901 nanoseconds. + Weight::from_ref_time(33_628_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - Weight::from_ref_time(19_528_000 as u64) + // Minimum execution time: 21_633 nanoseconds. + Weight::from_ref_time(22_380_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -320,13 +349,14 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(13_639_000 as u64).saturating_mul(n as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_393_000 as u64).saturating_mul(m as u64)) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(2_217_000 as u64).saturating_mul(a as u64)) + // Minimum execution time: 2_429_508 nanoseconds. + Weight::from_ref_time(2_446_263_000 as u64) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(8_477_090 as u64).saturating_mul(n as u64)) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(314_268 as u64).saturating_mul(m as u64)) + // Standard Error: 28_236 + .saturating_add(Weight::from_ref_time(249_624 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -339,7 +369,8 @@ impl WeightInfo for () { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - Weight::from_ref_time(42_146_000 as u64) + // Minimum execution time: 41_938 nanoseconds. + Weight::from_ref_time(42_814_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -348,7 +379,8 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - Weight::from_ref_time(42_960_000 as u64) + // Minimum execution time: 43_593 nanoseconds. + Weight::from_ref_time(44_420_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -357,17 +389,19 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - Weight::from_ref_time(33_025_000 as u64) + // Minimum execution time: 34_037 nanoseconds. + Weight::from_ref_time(34_848_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) - // Storage: Uniques Asset (r:100 w:100) + // Storage: Uniques Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 24_000 - .saturating_add(Weight::from_ref_time(15_540_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 23_295 nanoseconds. + Weight::from_ref_time(23_482_000 as u64) + // Standard Error: 9_490 + .saturating_add(Weight::from_ref_time(10_899_320 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -376,26 +410,30 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - Weight::from_ref_time(25_194_000 as u64) + // Minimum execution time: 27_005 nanoseconds. + Weight::from_ref_time(27_344_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - Weight::from_ref_time(25_397_000 as u64) + // Minimum execution time: 26_815 nanoseconds. + Weight::from_ref_time(27_400_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - Weight::from_ref_time(19_278_000 as u64) + // Minimum execution time: 21_988 nanoseconds. + Weight::from_ref_time(22_596_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - Weight::from_ref_time(19_304_000 as u64) + // Minimum execution time: 21_886 nanoseconds. + Weight::from_ref_time(22_617_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -403,20 +441,23 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - Weight::from_ref_time(28_615_000 as u64) + // Minimum execution time: 30_241 nanoseconds. + Weight::from_ref_time(30_677_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - Weight::from_ref_time(19_943_000 as u64) + // Minimum execution time: 23_011 nanoseconds. + Weight::from_ref_time(23_796_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - Weight::from_ref_time(22_583_000 as u64) + // Minimum execution time: 25_739 nanoseconds. + Weight::from_ref_time(26_572_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -424,7 +465,8 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - Weight::from_ref_time(47_520_000 as u64) + // Minimum execution time: 48_486 nanoseconds. + Weight::from_ref_time(49_029_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -432,69 +474,79 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - Weight::from_ref_time(45_316_000 as u64) + // Minimum execution time: 47_408 nanoseconds. + Weight::from_ref_time(48_753_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - Weight::from_ref_time(38_391_000 as u64) + // Minimum execution time: 40_085 nanoseconds. + Weight::from_ref_time(40_625_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - Weight::from_ref_time(38_023_000 as u64) + // Minimum execution time: 40_546 nanoseconds. + Weight::from_ref_time(41_113_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - Weight::from_ref_time(37_398_000 as u64) + // Minimum execution time: 39_902 nanoseconds. + Weight::from_ref_time(40_599_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - Weight::from_ref_time(35_621_000 as u64) + // Minimum execution time: 37_597 nanoseconds. + Weight::from_ref_time(37_964_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - Weight::from_ref_time(25_856_000 as u64) + // Minimum execution time: 28_719 nanoseconds. + Weight::from_ref_time(29_213_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - Weight::from_ref_time(26_098_000 as u64) + // Minimum execution time: 27_608 nanoseconds. + Weight::from_ref_time(28_096_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - Weight::from_ref_time(24_076_000 as u64) + // Minimum execution time: 25_568 nanoseconds. + Weight::from_ref_time(26_098_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - Weight::from_ref_time(22_035_000 as u64) + // Minimum execution time: 24_521 nanoseconds. + Weight::from_ref_time(25_062_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - Weight::from_ref_time(22_534_000 as u64) + // Minimum execution time: 25_337 nanoseconds. + Weight::from_ref_time(25_902_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -503,7 +555,8 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - Weight::from_ref_time(45_272_000 as u64) + // Minimum execution time: 46_736 nanoseconds. + Weight::from_ref_time(47_264_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } diff --git a/utils/frame/benchmarking-cli/src/pallet/template.hbs b/utils/frame/benchmarking-cli/src/pallet/template.hbs index fbe435edf8fc3..7e2e0688d654f 100644 --- a/utils/frame/benchmarking-cli/src/pallet/template.hbs +++ b/utils/frame/benchmarking-cli/src/pallet/template.hbs @@ -33,6 +33,7 @@ impl {{pallet}}::WeightInfo for WeightInfo { {{~#each benchmark.components as |c| ~}} {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} ) -> Weight { + // Minimum execution time: {{underscore benchmark.min_execution_time}} nanoseconds. Weight::from_ref_time({{underscore benchmark.base_weight}} as u64) {{#each benchmark.component_weight as |cw|}} // Standard Error: {{underscore cw.error}} diff --git a/utils/frame/benchmarking-cli/src/pallet/writer.rs b/utils/frame/benchmarking-cli/src/pallet/writer.rs index cd52ffc329d1c..a52bbcd229cb1 100644 --- a/utils/frame/benchmarking-cli/src/pallet/writer.rs +++ b/utils/frame/benchmarking-cli/src/pallet/writer.rs @@ -69,6 +69,8 @@ struct BenchmarkData { component_writes: Vec, component_ranges: Vec, comments: Vec, + #[serde(serialize_with = "string_serialize")] + min_execution_time: u128, } // This forwards some specific metadata from the `PalletCmd` @@ -257,6 +259,7 @@ fn get_benchmark_data( component_writes: used_writes, component_ranges, comments, + min_execution_time: extrinsic_time.minimum, } } From a8a8394b855fd988fc879ee0dfea9cdab98f5f86 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 20 Oct 2022 23:06:12 +0200 Subject: [PATCH 269/348] Add `DefensiveTruncateFrom` (#12515) * Add DefensiveTruncateFrom Signed-off-by: Oliver Tale-Yazdi * Add tests Signed-off-by: Oliver Tale-Yazdi * Fix tests Signed-off-by: Oliver Tale-Yazdi * Map_err in preimage Signed-off-by: Oliver Tale-Yazdi * Map_err in beefy Signed-off-by: Oliver Tale-Yazdi * Make test dependant in debug-assertions Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> --- frame/beefy/src/lib.rs | 3 +- frame/preimage/src/lib.rs | 1 + frame/support/src/traits.rs | 9 ++- frame/support/src/traits/misc.rs | 89 ++++++++++++++++++++++ primitives/core/src/bounded/bounded_vec.rs | 73 +++++++++++++++++- 5 files changed, 168 insertions(+), 7 deletions(-) diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index fce531d3f5dd7..305b158124b67 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -159,7 +159,8 @@ impl Pallet { } let bounded_authorities = - BoundedSlice::::try_from(authorities.as_slice())?; + BoundedSlice::::try_from(authorities.as_slice()) + .map_err(|_| ())?; let id = 0; >::put(bounded_authorities); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index e899d3643dbbf..6549832c11f5d 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -342,6 +342,7 @@ impl Pallet { fn insert(hash: &T::Hash, preimage: Cow<[u8]>) -> Result<(), ()> { BoundedSlice::>::try_from(preimage.as_ref()) + .map_err(|_| ()) .map(|s| PreimageFor::::insert((hash, s.len() as u32), s)) } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 302d3354dae5e..2b6c5efee10bb 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -56,10 +56,11 @@ mod misc; pub use misc::{ defensive_prelude::{self, *}, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, - ConstU32, ConstU64, ConstU8, DefensiveSaturating, EnsureInherentsAreFirst, EqualPrivilegeOnly, - EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, - SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, + ConstU32, ConstU64, ConstU8, DefensiveSaturating, DefensiveTruncateFrom, + EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, + GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, + OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, TryCollect, TryDrop, TypedGet, + UnixTime, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 5a976478fa7c4..1297956b2f64e 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -22,6 +22,7 @@ use codec::{CompactLen, Decode, DecodeLimit, Encode, EncodeLike, Input, MaxEncod use impl_trait_for_tuples::impl_for_tuples; use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_arithmetic::traits::{CheckedAdd, CheckedMul, CheckedSub, Saturating}; +use sp_core::bounded::bounded_vec::TruncateFrom; #[doc(hidden)] pub use sp_runtime::traits::{ ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, @@ -369,6 +370,43 @@ impl DefensiveSaturating f } } +/// Construct an object by defensively truncating an input if the `TryFrom` conversion fails. +pub trait DefensiveTruncateFrom { + /// Use `TryFrom` first and defensively fall back to truncating otherwise. + /// + /// # Example + /// + /// ``` + /// use frame_support::{BoundedVec, traits::DefensiveTruncateFrom}; + /// use sp_runtime::traits::ConstU32; + /// + /// let unbound = vec![1, 2]; + /// let bound = BoundedVec::>::defensive_truncate_from(unbound); + /// + /// assert_eq!(bound, vec![1, 2]); + /// ``` + fn defensive_truncate_from(unbound: T) -> Self; +} + +impl DefensiveTruncateFrom for T +where + // NOTE: We use the fact that `BoundedVec` and + // `BoundedSlice` use `Self` as error type. We could also + // require a `Clone` bound and use `unbound.clone()` in the + // error case. + T: TruncateFrom + TryFrom, +{ + fn defensive_truncate_from(unbound: U) -> Self { + unbound.try_into().map_or_else( + |err| { + defensive!("DefensiveTruncateFrom truncating"); + T::truncate_from(err) + }, + |bound| bound, + ) + } +} + /// Anything that can have a `::len()` method. pub trait Len { /// Return the length of data type. @@ -950,8 +988,59 @@ impl PreimageRecipient for () { #[cfg(test)] mod test { use super::*; + use sp_core::bounded::{BoundedSlice, BoundedVec}; use sp_std::marker::PhantomData; + #[test] + #[cfg(not(debug_assertions))] + fn defensive_truncating_from_vec_defensive_works() { + let unbound = vec![1u32, 2]; + let bound = BoundedVec::>::defensive_truncate_from(unbound); + assert_eq!(bound, vec![1u32]); + } + + #[test] + #[cfg(not(debug_assertions))] + fn defensive_truncating_from_slice_defensive_works() { + let unbound = &[1u32, 2]; + let bound = BoundedSlice::>::defensive_truncate_from(unbound); + assert_eq!(bound, &[1u32][..]); + } + + #[test] + #[cfg(debug_assertions)] + #[should_panic( + expected = "Defensive failure has been triggered!: \"DefensiveTruncateFrom truncating\"" + )] + fn defensive_truncating_from_vec_defensive_panics() { + let unbound = vec![1u32, 2]; + let _ = BoundedVec::>::defensive_truncate_from(unbound); + } + + #[test] + #[cfg(debug_assertions)] + #[should_panic( + expected = "Defensive failure has been triggered!: \"DefensiveTruncateFrom truncating\"" + )] + fn defensive_truncating_from_slice_defensive_panics() { + let unbound = &[1u32, 2]; + let _ = BoundedSlice::>::defensive_truncate_from(unbound); + } + + #[test] + fn defensive_truncate_from_vec_works() { + let unbound = vec![1u32, 2, 3]; + let bound = BoundedVec::>::defensive_truncate_from(unbound.clone()); + assert_eq!(bound, unbound); + } + + #[test] + fn defensive_truncate_from_slice_works() { + let unbound = [1u32, 2, 3]; + let bound = BoundedSlice::>::defensive_truncate_from(&unbound); + assert_eq!(bound, &unbound[..]); + } + #[derive(Encode, Decode)] enum NestedType { Nested(Box), diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 1832e43e8646c..2f39f3340ce50 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -47,6 +47,12 @@ pub struct BoundedVec( #[cfg_attr(feature = "std", serde(skip_serializing))] PhantomData, ); +/// Create an object through truncation. +pub trait TruncateFrom { + /// Create an object through truncation. + fn truncate_from(unbound: T) -> Self; +} + #[cfg(feature = "std")] impl<'de, T, S: Get> Deserialize<'de> for BoundedVec where @@ -234,12 +240,12 @@ impl<'a, T: Ord, Bound: Get> Ord for BoundedSlice<'a, T, Bound> { } impl<'a, T, S: Get> TryFrom<&'a [T]> for BoundedSlice<'a, T, S> { - type Error = (); + type Error = &'a [T]; fn try_from(t: &'a [T]) -> Result { if t.len() <= S::get() as usize { Ok(BoundedSlice(t, PhantomData)) } else { - Err(()) + Err(t) } } } @@ -250,12 +256,28 @@ impl<'a, T, S> From> for &'a [T] { } } +impl<'a, T, S: Get> TruncateFrom<&'a [T]> for BoundedSlice<'a, T, S> { + fn truncate_from(unbound: &'a [T]) -> Self { + BoundedSlice::::truncate_from(unbound) + } +} + impl<'a, T, S> Clone for BoundedSlice<'a, T, S> { fn clone(&self) -> Self { BoundedSlice(self.0, PhantomData) } } +impl<'a, T, S> sp_std::fmt::Debug for BoundedSlice<'a, T, S> +where + &'a [T]: sp_std::fmt::Debug, + S: Get, +{ + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + f.debug_tuple("BoundedSlice").field(&self.0).field(&S::get()).finish() + } +} + // Since a reference `&T` is always `Copy`, so is `BoundedSlice<'a, T, S>`. impl<'a, T, S> Copy for BoundedSlice<'a, T, S> {} @@ -692,6 +714,12 @@ impl> TryFrom> for BoundedVec { } } +impl> TruncateFrom> for BoundedVec { + fn truncate_from(unbound: Vec) -> Self { + BoundedVec::::truncate_from(unbound) + } +} + // It is okay to give a non-mutable reference of the inner vec to anyone. impl AsRef> for BoundedVec { fn as_ref(&self) -> &Vec { @@ -809,6 +837,12 @@ where } } +impl<'a, T: PartialEq, S: Get> PartialEq<&'a [T]> for BoundedSlice<'a, T, S> { + fn eq(&self, other: &&'a [T]) -> bool { + &self.0 == other + } +} + impl> PartialEq> for BoundedVec { fn eq(&self, other: &Vec) -> bool { &self.0 == other @@ -1219,6 +1253,18 @@ pub mod test { assert!(b2.is_err()); } + #[test] + fn bounded_vec_debug_works() { + let bound = BoundedVec::>::truncate_from(vec![1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedVec([1, 2, 3], 5)"); + } + + #[test] + fn bounded_slice_debug_works() { + let bound = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(format!("{:?}", bound), "BoundedSlice([1, 2, 3], 5)"); + } + #[test] fn bounded_vec_sort_by_key_works() { let mut v: BoundedVec> = bounded_vec![-5, 4, 1, -3, 2]; @@ -1226,4 +1272,27 @@ pub mod test { v.sort_by_key(|k| k.abs()); assert_eq!(v, vec![1, 2, -3, 4, -5]); } + + #[test] + fn bounded_vec_truncate_from_works() { + let unbound = vec![1, 2, 3, 4, 5]; + let bound = BoundedVec::>::truncate_from(unbound.clone()); + assert_eq!(bound, vec![1, 2, 3]); + } + + #[test] + fn bounded_slice_truncate_from_works() { + let unbound = [1, 2, 3, 4, 5]; + let bound = BoundedSlice::>::truncate_from(&unbound); + assert_eq!(bound, &[1, 2, 3][..]); + } + + #[test] + fn bounded_slice_partialeq_slice_works() { + let unbound = [1, 2, 3]; + let bound = BoundedSlice::>::truncate_from(&unbound); + + assert_eq!(bound, &unbound[..]); + assert!(bound == &unbound[..]); + } } From ac176294e647f0eabd0970537567bf37ded00360 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Fri, 21 Oct 2022 09:39:23 +0300 Subject: [PATCH 270/348] Refactor service tests in `sc-network` (#12517) * Refactor service tests in `sc-network` Create a separate directory for the tests and move common network instantion related code to `mod.rs` from where it can be used by both service and chainsync tests. Use the builder pattern when creating the `TestNetwork` object to reduce code duplication between the test files. * Update client/network/src/service/tests/mod.rs Co-authored-by: Dmitrii Markin Co-authored-by: Dmitrii Markin Co-authored-by: parity-processbot <> --- client/network/src/service.rs | 2 - .../chain_sync.rs} | 205 +------ client/network/src/service/tests/mod.rs | 297 ++++++++++ .../service/{tests.rs => tests/service.rs} | 511 ++++++------------ 4 files changed, 500 insertions(+), 515 deletions(-) rename client/network/src/service/{chainsync_tests.rs => tests/chain_sync.rs} (52%) create mode 100644 client/network/src/service/tests/mod.rs rename client/network/src/service/{tests.rs => tests/service.rs} (56%) diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d24a1543c56fe..8dfd46f24a731 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -94,8 +94,6 @@ use std::{ pub use behaviour::{InboundFailure, OutboundFailure, ResponseFailure}; -#[cfg(test)] -mod chainsync_tests; mod metrics; mod out_events; #[cfg(test)] diff --git a/client/network/src/service/chainsync_tests.rs b/client/network/src/service/tests/chain_sync.rs similarity index 52% rename from client/network/src/service/chainsync_tests.rs rename to client/network/src/service/tests/chain_sync.rs index 27c0588a67200..7ff8c589d8550 100644 --- a/client/network/src/service/chainsync_tests.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -16,178 +16,26 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, ChainSyncInterface, NetworkWorker}; +use crate::service::tests::TestNetworkBuilder; use futures::prelude::*; use libp2p::PeerId; use sc_block_builder::BlockBuilderProvider; -use sc_client_api::{BlockBackend, HeaderBackend}; +use sc_client_api::HeaderBackend; use sc_consensus::JustificationSyncLink; use sc_network_common::{ - config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, - TransportConfig, - }, - protocol::role::Roles, service::NetworkSyncForkRequest, - sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT, SyncState, SyncStatus}, -}; -use sc_network_light::light_client_requests::handler::LightClientRequestHandler; -use sc_network_sync::{ - block_request_handler::BlockRequestHandler, mock::MockChainSync, - service::mock::MockChainSyncInterface, state_request_handler::StateRequestHandler, + sync::{SyncState, SyncStatus}, }; +use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface}; use sp_core::H256; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as _, Zero}, + traits::{Block as BlockT, Header as _}, }; use std::{iter, sync::Arc, task::Poll}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; -type TestNetworkWorker = NetworkWorker< - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::Hash, - substrate_test_runtime_client::TestClient, ->; - -const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; -const PROTOCOL_NAME: &str = "/foo"; - -fn make_network( - chain_sync: Box>, - chain_sync_service: Box>, - client: Arc, -) -> (TestNetworkWorker, Arc) { - let network_config = config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }], - listen_addresses: vec![config::build_multiaddr![Memory(rand::random::())]], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }; - - #[derive(Clone)] - struct PassThroughVerifier(bool); - - #[async_trait::async_trait] - impl sc_consensus::Verifier for PassThroughVerifier { - async fn verify( - &mut self, - mut block: sc_consensus::BlockImportParams, - ) -> Result< - ( - sc_consensus::BlockImportParams, - Option)>>, - ), - String, - > { - let maybe_keys = block - .header - .digest() - .log(|l| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( - b"babe", - )) - }) - }) - .map(|blob| { - vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] - }); - - block.finalized = self.0; - block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((block, maybe_keys)) - } - } - - let import_queue = Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - )); - - let protocol_id = ProtocolId::from("/test-protocol-name"); - - let fork_id = Some(String::from("test-fork-id")); - - let block_request_protocol_config = { - let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let state_request_protocol_config = { - let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let light_client_request_protocol_config = { - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, None, client.clone()); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let block_announce_config = NonDefaultSetConfig { - notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), - fallback_names: vec![], - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< - substrate_test_runtime_client::runtime::Block, - >::build( - Roles::from(&config::Role::Full), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ))), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - }; - - let worker = NetworkWorker::new(config::Params { - block_announce_config, - role: config::Role::Full, - executor: None, - network_config, - chain: client.clone(), - protocol_id, - fork_id, - import_queue, - chain_sync, - chain_sync_service, - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync_protocol_config: None, - request_response_protocol_configs: Vec::new(), - }) - .unwrap(); - - (worker, client) -} - fn set_default_expecations_no_peers( chain_sync: &mut MockChainSync, ) { @@ -208,8 +56,6 @@ fn set_default_expecations_no_peers( #[async_std::test] async fn normal_network_poll_no_peers() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - // build `ChainSync` and set default expectations for it let mut chain_sync = Box::new(MockChainSync::::new()); @@ -220,11 +66,13 @@ async fn normal_network_poll_no_peers() { let chain_sync_service = Box::new(MockChainSyncInterface::::new()); - let (mut network, _) = make_network(chain_sync, chain_sync_service, client); + let mut network = TestNetworkBuilder::new() + .with_chain_sync((chain_sync, chain_sync_service)) + .build(); // poll the network once futures::future::poll_fn(|cx| { - let _ = network.poll_unpin(cx); + let _ = network.network().poll_unpin(cx); Poll::Ready(()) }) .await; @@ -232,8 +80,6 @@ async fn normal_network_poll_no_peers() { #[async_std::test] async fn request_justification() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be // called) let chain_sync_service = @@ -253,22 +99,22 @@ async fn request_justification() { .returning(|_, _| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, chain_sync_service, client); + let mut network = TestNetworkBuilder::new() + .with_chain_sync((chain_sync, chain_sync_service)) + .build(); // send "request justifiction" message and poll the network network.service().request_justification(&hash, number); futures::future::poll_fn(|cx| { - let _ = network.poll_unpin(cx); + let _ = network.network().poll_unpin(cx); Poll::Ready(()) }) .await; } #[async_std::test] -async fn clear_justification_requests(&mut self) { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - +async fn clear_justification_requests() { // build `ChainSyncInterface` provider and set no expecations for it (i.e., it cannot be // called) let chain_sync_service = @@ -281,13 +127,15 @@ async fn clear_justification_requests(&mut self) { chain_sync.expect_clear_justification_requests().once().returning(|| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, chain_sync_service, client); + let mut network = TestNetworkBuilder::new() + .with_chain_sync((chain_sync, chain_sync_service)) + .build(); // send "request justifiction" message and poll the network network.service().clear_justification_requests(); futures::future::poll_fn(|cx| { - let _ = network.poll_unpin(cx); + let _ = network.network().poll_unpin(cx); Poll::Ready(()) }) .await; @@ -295,8 +143,6 @@ async fn clear_justification_requests(&mut self) { #[async_std::test] async fn set_sync_fork_request() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - // build `ChainSync` and set default expectations for it let mut chain_sync = Box::new(MockChainSync::::new()); @@ -320,13 +166,15 @@ async fn set_sync_fork_request() { .once() .returning(|_, _, _| ()); - let (mut network, _) = make_network(chain_sync, Box::new(chain_sync_service), client); + let mut network = TestNetworkBuilder::new() + .with_chain_sync((chain_sync, Box::new(chain_sync_service))) + .build(); // send "set sync fork request" message and poll the network network.service().set_sync_fork_request(copy_peers, hash, number); futures::future::poll_fn(|cx| { - let _ = network.poll_unpin(cx); + let _ = network.network().poll_unpin(cx); Poll::Ready(()) }) .await; @@ -362,13 +210,16 @@ async fn on_block_finalized() { .returning(|_, _| ()); set_default_expecations_no_peers(&mut chain_sync); - let (mut network, _) = make_network(chain_sync, chain_sync_service, client); + let mut network = TestNetworkBuilder::new() + .with_client(client) + .with_chain_sync((chain_sync, chain_sync_service)) + .build(); // send "set sync fork request" message and poll the network - network.on_block_finalized(hash, header); + network.network().on_block_finalized(hash, header); futures::future::poll_fn(|cx| { - let _ = network.poll_unpin(cx); + let _ = network.network().poll_unpin(cx); Poll::Ready(()) }) .await; diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs new file mode 100644 index 0000000000000..f829d9d43090f --- /dev/null +++ b/client/network/src/service/tests/mod.rs @@ -0,0 +1,297 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, ChainSyncInterface, NetworkService, NetworkWorker}; + +use futures::prelude::*; +use libp2p::Multiaddr; +use sc_client_api::{BlockBackend, HeaderBackend}; +use sc_consensus::ImportQueue; +use sc_network_common::{ + config::{ + NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + TransportConfig, + }, + protocol::{event::Event, role::Roles}, + service::NetworkEventStream, + sync::{message::BlockAnnouncesHandshake, ChainSync as ChainSyncT}, +}; +use sc_network_light::light_client_requests::handler::LightClientRequestHandler; +use sc_network_sync::{ + block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + ChainSync, +}; +use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; +use std::sync::Arc; +use substrate_test_runtime_client::{ + runtime::{Block as TestBlock, Hash as TestHash}, + TestClient, TestClientBuilder, TestClientBuilderExt as _, +}; + +#[cfg(test)] +mod chain_sync; +#[cfg(test)] +mod service; + +type TestNetworkWorker = NetworkWorker; +type TestNetworkService = NetworkService; + +const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; +const PROTOCOL_NAME: &str = "/foo"; + +struct TestNetwork { + network: TestNetworkWorker, +} + +impl TestNetwork { + pub fn new(network: TestNetworkWorker) -> Self { + Self { network } + } + + pub fn service(&self) -> &Arc { + &self.network.service() + } + + pub fn network(&mut self) -> &mut TestNetworkWorker { + &mut self.network + } + + pub fn start_network( + self, + ) -> (Arc, (impl Stream + std::marker::Unpin)) { + let worker = self.network; + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) + } +} + +struct TestNetworkBuilder { + import_queue: Option>>, + client: Option>, + listen_addresses: Vec, + set_config: Option, + chain_sync: Option<(Box>, Box>)>, + config: Option, +} + +impl TestNetworkBuilder { + pub fn new() -> Self { + Self { + import_queue: None, + client: None, + listen_addresses: Vec::new(), + set_config: None, + chain_sync: None, + config: None, + } + } + + pub fn with_client(mut self, client: Arc) -> Self { + self.client = Some(client); + self + } + + pub fn with_config(mut self, config: config::NetworkConfiguration) -> Self { + self.config = Some(config); + self + } + + pub fn with_listen_addresses(mut self, addresses: Vec) -> Self { + self.listen_addresses = addresses; + self + } + + pub fn with_set_config(mut self, set_config: SetConfig) -> Self { + self.set_config = Some(set_config); + self + } + + pub fn with_chain_sync( + mut self, + chain_sync: (Box>, Box>), + ) -> Self { + self.chain_sync = Some(chain_sync); + self + } + + pub fn build(mut self) -> TestNetwork { + let client = self.client.as_mut().map_or( + Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), + |v| v.clone(), + ); + + let network_config = self.config.unwrap_or(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + handshake: None, + set_config: self.set_config.unwrap_or_default(), + }], + listen_addresses: self.listen_addresses, + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + + #[async_trait::async_trait] + impl sc_consensus::Verifier for PassThroughVerifier { + async fn verify( + &mut self, + mut block: sc_consensus::BlockImportParams, + ) -> Result< + ( + sc_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = block + .header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) + }) + }) + .map(|blob| { + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] + }); + + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) + } + } + + let import_queue = self.import_queue.unwrap_or(Box::new(sc_consensus::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + &sp_core::testing::TaskExecutor::new(), + None, + ))); + + let (chain_sync, chain_sync_service) = self.chain_sync.unwrap_or({ + let (chain_sync, chain_sync_service) = ChainSync::new( + match network_config.sync_mode { + config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sc_network_common::sync::SyncMode::LightState { + skip_proofs, + storage_chain_mode, + }, + config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, + }, + client.clone(), + Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), + network_config.max_parallel_downloads, + None, + ) + .unwrap(); + + (Box::new(chain_sync), chain_sync_service) + }); + + let protocol_id = ProtocolId::from("test-protocol-name"); + let fork_id = Some(String::from("test-fork-id")); + + let block_request_protocol_config = { + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let state_request_protocol_config = { + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, None, client.clone(), 50); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let light_client_request_protocol_config = { + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, None, client.clone()); + async_std::task::spawn(handler.run().boxed()); + protocol_config + }; + + let block_announce_config = NonDefaultSetConfig { + notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< + substrate_test_runtime_client::runtime::Block, + >::build( + Roles::from(&config::Role::Full), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ))), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + }; + + let worker = NetworkWorker::< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, + substrate_test_runtime_client::TestClient, + >::new(config::Params { + block_announce_config, + role: config::Role::Full, + executor: None, + network_config, + chain: client.clone(), + protocol_id, + fork_id, + import_queue, + chain_sync, + chain_sync_service, + metrics_registry: None, + block_request_protocol_config, + state_request_protocol_config, + light_client_request_protocol_config, + warp_sync_protocol_config: None, + request_response_protocol_configs: Vec::new(), + }) + .unwrap(); + + TestNetwork::new(worker) + } +} diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests/service.rs similarity index 56% rename from client/network/src/service/tests.rs rename to client/network/src/service/tests/service.rs index 4a0ef4611da6e..90945fdcef2cf 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests/service.rs @@ -16,183 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, NetworkService, NetworkWorker}; +use crate::{config, service::tests::TestNetworkBuilder, NetworkService}; use futures::prelude::*; use libp2p::PeerId; -use sc_client_api::{BlockBackend, HeaderBackend}; use sc_network_common::{ - config::{ - MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, - ProtocolId, SetConfig, TransportConfig, - }, - protocol::{event::Event, role::Roles}, - service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, - sync::message::BlockAnnouncesHandshake, + config::{MultiaddrWithPeerId, NonDefaultSetConfig, SetConfig, TransportConfig}, + protocol::event::Event, + service::{NetworkNotification, NetworkPeers, NetworkStateInfo}, }; -use sc_network_light::light_client_requests::handler::LightClientRequestHandler; -use sc_network_sync::{ - block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, - ChainSync, -}; -use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; use std::{sync::Arc, time::Duration}; -use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::Hash, >; -/// Builds a full node to be used for testing. Returns the node service and its associated events -/// stream. -/// -/// > **Note**: We return the events stream in order to not possibly lose events between the -/// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node( - network_config: config::NetworkConfiguration, -) -> (Arc, impl Stream) { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - - #[derive(Clone)] - struct PassThroughVerifier(bool); - - #[async_trait::async_trait] - impl sc_consensus::Verifier for PassThroughVerifier { - async fn verify( - &mut self, - mut block: sc_consensus::BlockImportParams, - ) -> Result< - ( - sc_consensus::BlockImportParams, - Option)>>, - ), - String, - > { - let maybe_keys = block - .header - .digest() - .log(|l| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( - b"babe", - )) - }) - }) - .map(|blob| { - vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] - }); - - block.finalized = self.0; - block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((block, maybe_keys)) - } - } - - let import_queue = Box::new(sc_consensus::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - &sp_core::testing::TaskExecutor::new(), - None, - )); - - let protocol_id = ProtocolId::from("/test-protocol-name"); - - let fork_id = Some(String::from("test-fork-id")); - - let block_request_protocol_config = { - let (handler, protocol_config) = - BlockRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let state_request_protocol_config = { - let (handler, protocol_config) = - StateRequestHandler::new(&protocol_id, None, client.clone(), 50); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let light_client_request_protocol_config = { - let (handler, protocol_config) = - LightClientRequestHandler::new(&protocol_id, None, client.clone()); - async_std::task::spawn(handler.run().boxed()); - protocol_config - }; - - let (chain_sync, chain_sync_service) = ChainSync::new( - match network_config.sync_mode { - config::SyncMode::Full => sc_network_common::sync::SyncMode::Full, - config::SyncMode::Fast { skip_proofs, storage_chain_mode } => - sc_network_common::sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, - config::SyncMode::Warp => sc_network_common::sync::SyncMode::Warp, - }, - client.clone(), - Box::new(DefaultBlockAnnounceValidator), - network_config.max_parallel_downloads, - None, - ) - .unwrap(); - - let block_announce_config = NonDefaultSetConfig { - notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), - fallback_names: vec![], - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< - substrate_test_runtime_client::runtime::Block, - >::build( - Roles::from(&config::Role::Full), - client.info().best_number, - client.info().best_hash, - client - .block_hash(Zero::zero()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - ))), - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, - }, - }; - - let worker = NetworkWorker::new(config::Params { - block_announce_config, - role: config::Role::Full, - executor: None, - network_config, - chain: client.clone(), - protocol_id, - fork_id, - import_queue, - chain_sync: Box::new(chain_sync), - chain_sync_service, - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync_protocol_config: None, - request_response_protocol_configs: Vec::new(), - }) - .unwrap(); - - let service = worker.service().clone(); - let event_stream = service.event_stream("test"); - - async_std::task::spawn(async move { - futures::pin_mut!(worker); - let _ = worker.await; - }); - - (service, event_stream) -} - const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; const PROTOCOL_NAME: &str = "/foo"; @@ -206,37 +45,21 @@ fn build_nodes_one_proto() -> ( ) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }], - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (node1, events_stream1) = TestNetworkBuilder::new() + .with_listen_addresses(vec![listen_addr.clone()]) + .build() + .start_network(); - let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id(), - }], - ..Default::default() - }, - }], - listen_addresses: vec![], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (node2, events_stream2) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id(), + }], + ..Default::default() + }) + .build() + .start_network(); (node1, events_stream1, node2, events_stream2) } @@ -393,22 +216,15 @@ fn notifications_state_consistent() { }); } -#[test] -fn lots_of_incoming_peers_works() { +#[async_std::test] +async fn lots_of_incoming_peers_works() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (main_node, _) = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, - }], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (main_node, _) = TestNetworkBuilder::new() + .with_listen_addresses(vec![listen_addr.clone()]) + .with_set_config(SetConfig { in_peers: u32::MAX, ..Default::default() }) + .build() + .start_network(); let main_node_peer_id = main_node.local_peer_id(); @@ -417,24 +233,16 @@ fn lots_of_incoming_peers_works() { let mut background_tasks_to_wait = Vec::new(); for _ in 0..32 { - let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![], - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id, - }], - ..Default::default() - }, - }], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (_dialing_node, event_stream) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id, + }], + ..Default::default() + }) + .build() + .start_network(); background_tasks_to_wait.push(async_std::task::spawn(async move { // Create a dummy timer that will "never" fire, and that will be overwritten when we @@ -469,7 +277,7 @@ fn lots_of_incoming_peers_works() { })); } - futures::executor::block_on(async move { future::join_all(background_tasks_to_wait).await }); + future::join_all(background_tasks_to_wait).await; } #[test] @@ -531,42 +339,35 @@ fn notifications_back_pressure() { fn fallback_name_working() { // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether // they can connect. - const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME"; let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.into(), - fallback_names: vec![PROTOCOL_NAME.into()], - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }], - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); - - let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id(), - }], - ..Default::default() - }, - }], - listen_addresses: vec![], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (node1, mut events_stream1) = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: NEW_PROTOCOL_NAME.into(), + fallback_names: vec![PROTOCOL_NAME.into()], + max_notification_size: 1024 * 1024, + handshake: None, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }) + .build() + .start_network(); + + let (_, mut events_stream2) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id(), + }], + ..Default::default() + }) + .build() + .start_network(); let receiver = async_std::task::spawn(async move { // Wait for the `NotificationStreamOpened`. @@ -604,39 +405,7 @@ fn fallback_name_working() { // protocol name and verify that `SyncDisconnected` event is emitted #[async_std::test] async fn disconnect_sync_peer_using_block_announcement_protocol_name() { - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - - let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: vec![], - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }], - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); - - let (node2, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: SetConfig { - reserved_nodes: vec![MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id(), - }], - ..Default::default() - }, - }], - listen_addresses: vec![], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new_local() - }); + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { let mut notif_received = false; @@ -673,11 +442,19 @@ async fn disconnect_sync_peer_using_block_announcement_protocol_name() { fn ensure_listen_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -685,10 +462,18 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { fn ensure_listen_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -700,12 +485,20 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { peer_id: PeerId::random(), }; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - boot_nodes: vec![boot_node], - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + boot_nodes: vec![boot_node], + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -717,11 +510,19 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { peer_id: PeerId::random(), }; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - boot_nodes: vec![boot_node], - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + boot_nodes: vec![boot_node], + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -733,12 +534,23 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { peer_id: PeerId::random(), }; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + default_peers_set: SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -750,11 +562,22 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { peer_id: PeerId::random(), }; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - default_peers_set: SetConfig { reserved_nodes: vec![reserved_node], ..Default::default() }, - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + default_peers_set: SetConfig { + reserved_nodes: vec![reserved_node], + ..Default::default() + }, + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -763,12 +586,20 @@ fn ensure_public_addresses_consistent_with_transport_memory() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let public_address = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - public_addresses: vec![public_address], - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + public_addresses: vec![public_address], + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } #[test] @@ -777,9 +608,17 @@ fn ensure_public_addresses_consistent_with_transport_not_memory() { let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; let public_address = config::build_multiaddr![Memory(rand::random::())]; - let _ = build_test_full_node(config::NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - public_addresses: vec![public_address], - ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) - }); + let _ = TestNetworkBuilder::new() + .with_config(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + public_addresses: vec![public_address], + ..config::NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ) + }) + .build() + .start_network(); } From e45883548b8fafc9dcea41ee6585634412462072 Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 21 Oct 2022 16:38:53 +0300 Subject: [PATCH 271/348] Actually fix major sync detection (#12114) * Actually fix major sync detection * Introduce `SyncState::Importing` state * Add target to SyncState enum variants and add `is_major_syncing` method on it * Remove unnecessary duplicated `best_seen_block` from `SyncState` struct * Revert "Remove unnecessary duplicated `best_seen_block` from `SyncState` struct" This reverts commit bb8abd458c939881c049f69d59f3acba47c97c5c. * Add missing `websocket` feature to `libp2p` Co-authored-by: parity-processbot <> --- client/informant/src/display.rs | 63 +++++++++++++--------------- client/network/Cargo.toml | 2 +- client/network/common/src/service.rs | 2 +- client/network/common/src/sync.rs | 15 +++++-- client/network/src/service.rs | 14 ++++--- client/network/sync/src/lib.rs | 37 ++++++++-------- 6 files changed, 71 insertions(+), 62 deletions(-) diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 0441011b0ec42..3d585a9985134 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -93,42 +93,37 @@ impl InformantDisplay { (diff_bytes_inbound, diff_bytes_outbound) }; - let (level, status, target) = match ( - net_status.sync_state, - net_status.best_seen_block, - net_status.state_sync, - net_status.warp_sync, - ) { - ( - _, - _, - _, - Some(WarpSyncProgress { phase: WarpSyncPhase::DownloadingBlocks(n), .. }), - ) => ("⏩", "Block history".into(), format!(", #{}", n)), - (_, _, _, Some(warp)) => ( - "⏩", - "Warping".into(), - format!( - ", {}, {:.2} Mib", - warp.phase, - (warp.total_bytes as f32) / (1024f32 * 1024f32) + let (level, status, target) = + match (net_status.sync_state, net_status.state_sync, net_status.warp_sync) { + ( + _, + _, + Some(WarpSyncProgress { phase: WarpSyncPhase::DownloadingBlocks(n), .. }), + ) => ("⏩", "Block history".into(), format!(", #{}", n)), + (_, _, Some(warp)) => ( + "⏩", + "Warping".into(), + format!( + ", {}, {:.2} Mib", + warp.phase, + (warp.total_bytes as f32) / (1024f32 * 1024f32) + ), ), - ), - (_, _, Some(state), _) => ( - "⚙️ ", - "Downloading state".into(), - format!( - ", {}%, {:.2} Mib", - state.percentage, - (state.size as f32) / (1024f32 * 1024f32) + (_, Some(state), _) => ( + "⚙️ ", + "Downloading state".into(), + format!( + ", {}%, {:.2} Mib", + state.percentage, + (state.size as f32) / (1024f32 * 1024f32) + ), ), - ), - (SyncState::Idle, _, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading, None, _, _) => - ("⚙️ ", format!("Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n), None, _) => - ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), - }; + (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading { target }, _, _) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{target}")), + (SyncState::Importing { target }, _, _) => + ("⚙️ ", format!("Preparing{}", speed), format!(", target=#{target}")), + }; if self.format.enable_color { info!( diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 9c2833ec00908..81b684af433d2 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -26,7 +26,7 @@ fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" -libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad", "mdns-async-io", "mplex", "noise", "ping", "tcp", "yamux"] } +libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad", "mdns-async-io", "mplex", "noise", "ping", "tcp", "yamux", "websocket"] } linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" diff --git a/client/network/common/src/service.rs b/client/network/common/src/service.rs index aa4967ba51700..54d254eac384f 100644 --- a/client/network/common/src/service.rs +++ b/client/network/common/src/service.rs @@ -98,7 +98,7 @@ where #[derive(Clone)] pub struct NetworkStatus { /// Current global sync state. - pub sync_state: SyncState, + pub sync_state: SyncState>, /// Target sync block number. pub best_seen_block: Option>, /// Number of peers participating in syncing. diff --git a/client/network/common/src/sync.rs b/client/network/common/src/sync.rs index aa761e66ebf68..dd216b2a5295a 100644 --- a/client/network/common/src/sync.rs +++ b/client/network/common/src/sync.rs @@ -44,11 +44,20 @@ pub struct PeerInfo { /// Reported sync state. #[derive(Clone, Eq, PartialEq, Debug)] -pub enum SyncState { +pub enum SyncState { /// Initial sync is complete, keep-up sync is active. Idle, /// Actively catching up with the chain. - Downloading, + Downloading { target: BlockNumber }, + /// All blocks are downloaded and are being imported. + Importing { target: BlockNumber }, +} + +impl SyncState { + /// Are we actively catching up with the chain? + pub fn is_major_syncing(&self) -> bool { + !matches!(self, SyncState::Idle) + } } /// Reported state download progress. @@ -64,7 +73,7 @@ pub struct StateDownloadProgress { #[derive(Clone)] pub struct SyncStatus { /// Current global sync state. - pub state: SyncState, + pub state: SyncState>, /// Target sync block number. pub best_seen_block: Option>, /// Number of peers participating in syncing. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 8dfd46f24a731..89dc2ff4405b2 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -70,7 +70,7 @@ use sc_network_common::{ NotificationSender as NotificationSenderT, NotificationSenderError, NotificationSenderReady as NotificationSenderReadyT, Signature, SigningError, }, - sync::{SyncState, SyncStatus}, + sync::SyncStatus, ExHashT, }; use sc_peerset::PeersetHandle; @@ -1997,11 +1997,13 @@ where *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = - match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { - SyncState::Idle => false, - SyncState::Downloading => true, - }; + let is_major_syncing = this + .network_service + .behaviour_mut() + .user_protocol_mut() + .sync_state() + .state + .is_major_syncing(); this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 76d7d624be523..63f63f7188cfe 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -410,13 +410,21 @@ where /// Returns the current sync status. fn status(&self) -> SyncStatus { - let best_seen = self.best_seen(); - let sync_state = if let Some(n) = best_seen { + let median_seen = self.median_seen(); + let best_seen_block = + median_seen.and_then(|median| (median > self.best_queued_number).then_some(median)); + let sync_state = if let Some(target) = median_seen { // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best block. + // more than `MAJOR_SYNC_BLOCKS` behind the best block or as importing + // if the same can be said about queued blocks. let best_block = self.client.info().best_number; - if n > best_block && n - best_block > MAJOR_SYNC_BLOCKS.into() { - SyncState::Downloading + if target > best_block && target - best_block > MAJOR_SYNC_BLOCKS.into() { + // If target is not queued, we're downloading, otherwise importing. + if target > self.best_queued_number { + SyncState::Downloading { target } + } else { + SyncState::Importing { target } + } } else { SyncState::Idle } @@ -437,7 +445,7 @@ where SyncStatus { state: sync_state, - best_seen_block: best_seen, + best_seen_block, num_peers: self.peers.len() as u32, queued_blocks: self.queue_blocks.len() as u32, state_sync: self.state_sync.as_ref().map(|s| s.progress()), @@ -693,7 +701,7 @@ where trace!(target: "sync", "Too many blocks in the queue."); return Box::new(std::iter::empty()) } - let major_sync = self.status().state == SyncState::Downloading; + let is_major_syncing = self.status().state.is_major_syncing(); let attrs = self.required_block_attributes(); let blocks = &mut self.blocks; let fork_targets = &mut self.fork_targets; @@ -703,7 +711,7 @@ where let client = &self.client; let queue = &self.queue_blocks; let allowed_requests = self.allowed_requests.take(); - let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; + let max_parallel = if is_major_syncing { 1 } else { self.max_parallel_downloads }; let gap_sync = &mut self.gap_sync; let iter = self.peers.iter_mut().filter_map(move |(&id, peer)| { if !peer.state.is_available() || !allowed_requests.contains(&id) { @@ -1797,8 +1805,8 @@ where Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)))) } - /// Returns the best seen block number if we don't have that block yet, `None` otherwise. - fn best_seen(&self) -> Option> { + /// Returns the median seen block number. + fn median_seen(&self) -> Option> { let mut best_seens = self.peers.values().map(|p| p.best_number).collect::>(); if best_seens.is_empty() { @@ -1807,12 +1815,7 @@ where let middle = best_seens.len() / 2; // Not the "perfect median" when we have an even number of peers. - let median = *best_seens.select_nth_unstable(middle).1; - if median > self.best_queued_number { - Some(median) - } else { - None - } + Some(*best_seens.select_nth_unstable(middle).1) } } @@ -1854,7 +1857,7 @@ where ); } - let origin = if !gap && self.status().state != SyncState::Downloading { + let origin = if !gap && !self.status().state.is_major_syncing() { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync From 1802a115e8480fd7a4654d45c85b58c2189c508a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 21 Oct 2022 17:05:51 +0200 Subject: [PATCH 272/348] BlockId removal: refactor: Backend::begin_state_operation (#12541) It changes the arguments of `Backend::begin_state_operation` from: block: `BlockId` to: hash: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) --- client/api/src/backend.rs | 2 +- client/api/src/in_mem.rs | 5 +- client/db/benches/state_access.rs | 3 +- client/db/src/lib.rs | 73 ++++++++++++----------------- client/service/src/client/client.rs | 3 +- 5 files changed, 35 insertions(+), 51 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 864a9af5685d8..77a9da3535655 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -467,7 +467,7 @@ pub trait Backend: AuxStore + Send + Sync { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: BlockId, + block: &Block::Hash, ) -> sp_blockchain::Result<()>; /// Commit block insertion. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 1cb61ba1a0b0a..29f7eade9fa6f 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -695,10 +695,9 @@ where fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: BlockId, + block: &Block::Hash, ) -> sp_blockchain::Result<()> { - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - operation.old_state = self.state_at(&hash)?; + operation.old_state = self.state_at(block)?; Ok(()) } diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index 912a9b028f638..ccceae1f5b419 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -22,7 +22,6 @@ use sc_client_api::{Backend as _, BlockImportOperation, NewBlockState, StateBack use sc_client_db::{Backend, BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; use sp_core::H256; use sp_runtime::{ - generic::BlockId, testing::{Block as RawBlock, ExtrinsicWrapper, Header}, StateVersion, Storage, }; @@ -67,7 +66,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 for i in 0..10 { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Hash(parent_hash)).unwrap(); + db.begin_state_operation(&mut op, &parent_hash).unwrap(); let mut header = Header { number, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 14ebafa01bec1..e212b382716f1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1961,13 +1961,12 @@ impl sc_client_api::backend::Backend for Backend { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: BlockId, + block: &Block::Hash, ) -> ClientResult<()> { - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - if block.is_pre_genesis() { + if *block == Default::default() { operation.old_state = self.empty_state()?; } else { - operation.old_state = self.state_at(&hash)?; + operation.old_state = self.state_at(block)?; } operation.commit_state = true; @@ -2442,13 +2441,9 @@ pub(crate) mod tests { }; let header_hash = header.hash(); - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; + let block_hash = if number == 0 { Default::default() } else { parent_hash }; let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); + backend.begin_state_operation(&mut op, &block_hash).unwrap(); op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); @@ -2489,21 +2484,17 @@ pub(crate) mod tests { assert!(db.blockchain().hash(i).unwrap().is_none()); { - let id = if i == 0 { - BlockId::Hash(Default::default()) + let hash = if i == 0 { + Default::default() } else { - BlockId::Number(i - 1) + db.blockchain.hash(i - 1).unwrap().unwrap() }; let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, id).unwrap(); + db.begin_state_operation(&mut op, &hash).unwrap(); let header = Header { number: i, - parent_hash: if i == 0 { - Default::default() - } else { - db.blockchain.hash(i - 1).unwrap().unwrap() - }, + parent_hash: hash, state_root: Default::default(), digest: Default::default(), extrinsics_root: Default::default(), @@ -2585,7 +2576,7 @@ pub(crate) mod tests { { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + db.begin_state_operation(&mut op, &hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2626,9 +2617,7 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend - .begin_state_operation(&mut op, BlockId::Hash(Default::default())) - .unwrap(); + backend.begin_state_operation(&mut op, &Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -2665,7 +2654,7 @@ pub(crate) mod tests { let hashof1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + backend.begin_state_operation(&mut op, &hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2702,7 +2691,7 @@ pub(crate) mod tests { let hashof2 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + backend.begin_state_operation(&mut op, &hashof1).unwrap(); let mut header = Header { number: 2, parent_hash: hashof1, @@ -2736,7 +2725,7 @@ pub(crate) mod tests { let hashof3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); + backend.begin_state_operation(&mut op, &hashof2).unwrap(); let mut header = Header { number: 3, parent_hash: hashof2, @@ -3070,14 +3059,14 @@ pub(crate) mod tests { let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); + backend.begin_state_operation(&mut op, &block0).unwrap(); op.mark_finalized(&block1, None).unwrap(); op.mark_finalized(&block2, None).unwrap(); backend.commit_operation(op).unwrap(); } { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + backend.begin_state_operation(&mut op, &block2).unwrap(); op.mark_finalized(&block3, None).unwrap(); op.mark_finalized(&block4, None).unwrap(); backend.commit_operation(op).unwrap(); @@ -3091,9 +3080,7 @@ pub(crate) mod tests { let hash0 = { let mut op = backend.begin_operation().unwrap(); - backend - .begin_state_operation(&mut op, BlockId::Hash(Default::default())) - .unwrap(); + backend.begin_state_operation(&mut op, &Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -3131,7 +3118,7 @@ pub(crate) mod tests { let hash1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + backend.begin_state_operation(&mut op, &hash0).unwrap(); let mut header = Header { number: 1, parent_hash: hash0, @@ -3180,7 +3167,7 @@ pub(crate) mod tests { let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); + backend.begin_state_operation(&mut op, &block0).unwrap(); op.mark_finalized(&block2, None).unwrap(); backend.commit_operation(op).unwrap_err(); } @@ -3208,7 +3195,7 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); for i in 1..5 { op.mark_finalized(&blocks[i], None).unwrap(); } @@ -3243,7 +3230,7 @@ pub(crate) mod tests { } let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); for i in 1..3 { op.mark_finalized(&blocks[i], None).unwrap(); } @@ -3300,7 +3287,7 @@ pub(crate) mod tests { .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); op.mark_head(&blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); @@ -3309,7 +3296,7 @@ pub(crate) mod tests { for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[i])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[i]).unwrap(); op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } @@ -3369,13 +3356,13 @@ pub(crate) mod tests { ) .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); op.mark_head(&blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } @@ -3501,7 +3488,7 @@ pub(crate) mod tests { for i in 1..10 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); + backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); op.mark_finalized(&blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); @@ -3701,7 +3688,7 @@ pub(crate) mod tests { let block3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); + backend.begin_state_operation(&mut op, &block1).unwrap(); let header = Header { number: 3, parent_hash: block2, @@ -3720,7 +3707,7 @@ pub(crate) mod tests { let block4 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + backend.begin_state_operation(&mut op, &block2).unwrap(); let header = Header { number: 4, parent_hash: block3, @@ -3739,7 +3726,7 @@ pub(crate) mod tests { let block3_fork = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + backend.begin_state_operation(&mut op, &block2).unwrap(); let header = Header { number: 3, parent_hash: block2, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b18c6d226706b..c0414a3bebc42 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -586,8 +586,7 @@ where Some(storage_changes) => { let storage_changes = match storage_changes { sc_consensus::StorageChanges::Changes(storage_changes) => { - self.backend - .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + self.backend.begin_state_operation(&mut operation.op, &parent_hash)?; let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); From c7a86c24b74ef19fda9805ad4f71c15d5016d402 Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Fri, 21 Oct 2022 17:46:25 -0400 Subject: [PATCH 273/348] use headers on templates (#12546) --- .maintain/frame-weight-template.hbs | 18 +----------------- scripts/run_all_benchmarks.sh | 2 ++ 2 files changed, 3 insertions(+), 17 deletions(-) diff --git a/.maintain/frame-weight-template.hbs b/.maintain/frame-weight-template.hbs index 96731770ff2ea..360279129980f 100644 --- a/.maintain/frame-weight-template.hbs +++ b/.maintain/frame-weight-template.hbs @@ -1,20 +1,4 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - +{{header}} //! Autogenerated weights for {{pallet}} //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} diff --git a/scripts/run_all_benchmarks.sh b/scripts/run_all_benchmarks.sh index 9aac58be45029..dd5d2e182baf2 100755 --- a/scripts/run_all_benchmarks.sh +++ b/scripts/run_all_benchmarks.sh @@ -121,6 +121,7 @@ for PALLET in "${PALLETS[@]}"; do --wasm-execution=compiled \ --heap-pages=4096 \ --output="$WEIGHT_FILE" \ + --header="./HEADER-APACHE2" \ --template=./.maintain/frame-weight-template.hbs 2>&1 ) if [ $? -ne 0 ]; then @@ -137,6 +138,7 @@ OUTPUT=$( --execution=wasm \ --wasm-execution=compiled \ --weight-path="./frame/support/src/weights/" \ + --header="./HEADER-APACHE2" \ --warmup=10 \ --repeat=100 2>&1 ) From 4d29da2156d587a6b8e4efbe10e412efaedbbd4e Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Sat, 22 Oct 2022 15:36:26 +0300 Subject: [PATCH 274/348] Make `NetworkService` callable for `ChainSync` (#12542) Introduce a middleware called `NetworkServiceProvider` which the `ChainSync` can use to communicate with `NetworkService`. `ChainSync` is given a `NetworkServiceHandle` which it uses to call `NetworkServiceProvider` which then dispatches the calls to `NetworkService` on behalf of `ChainSync`. This change will allow `ChainSync` to disconnect and report peers and in the future it'll be possible to send requests and notifications through the `NetworkServiceProvider`. `NetworkServiceProvider` is needed only until the `ChainSync` object has been removed from `Protocol`. After that, a normal `NetworkService` handle can be passed onto `ChainSync` and these changes can be deprecated. Co-authored-by: parity-processbot <> --- .../network/src/service/tests/chain_sync.rs | 182 +++++++++++++++++- client/network/src/service/tests/mod.rs | 28 ++- client/network/sync/src/lib.rs | 41 +++- client/network/sync/src/service/mock.rs | 48 ++++- client/network/sync/src/service/mod.rs | 1 + client/network/sync/src/service/network.rs | 128 ++++++++++++ client/network/sync/src/tests.rs | 4 +- client/network/test/src/lib.rs | 12 +- client/service/src/builder.rs | 12 +- 9 files changed, 443 insertions(+), 13 deletions(-) create mode 100644 client/network/sync/src/service/network.rs diff --git a/client/network/src/service/tests/chain_sync.rs b/client/network/src/service/tests/chain_sync.rs index 7ff8c589d8550..21149459413f4 100644 --- a/client/network/src/service/tests/chain_sync.rs +++ b/client/network/src/service/tests/chain_sync.rs @@ -16,7 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::service::tests::TestNetworkBuilder; +use crate::{ + config, + service::tests::{TestNetworkBuilder, BLOCK_ANNOUNCE_PROTO_NAME}, +}; use futures::prelude::*; use libp2p::PeerId; @@ -24,16 +27,23 @@ use sc_block_builder::BlockBuilderProvider; use sc_client_api::HeaderBackend; use sc_consensus::JustificationSyncLink; use sc_network_common::{ + config::{MultiaddrWithPeerId, SetConfig}, + protocol::event::Event, service::NetworkSyncForkRequest, sync::{SyncState, SyncStatus}, }; -use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface}; +use sc_network_sync::{mock::MockChainSync, service::mock::MockChainSyncInterface, ChainSync}; use sp_core::H256; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as _}, }; -use std::{iter, sync::Arc, task::Poll}; +use std::{ + iter, + sync::{Arc, RwLock}, + task::Poll, + time::Duration, +}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; fn set_default_expecations_no_peers( @@ -224,3 +234,169 @@ async fn on_block_finalized() { }) .await; } + +// report from mock import queue that importing a justification was not successful +// and verify that connection to the peer is closed +#[async_std::test] +async fn invalid_justification_imported() { + struct DummyImportQueue( + Arc< + RwLock< + Option<( + PeerId, + substrate_test_runtime_client::runtime::Hash, + sp_runtime::traits::NumberFor, + )>, + >, + >, + ); + + impl sc_consensus::ImportQueue for DummyImportQueue { + fn import_blocks( + &mut self, + _origin: sp_consensus::BlockOrigin, + _blocks: Vec< + sc_consensus::IncomingBlock, + >, + ) { + } + + fn import_justifications( + &mut self, + _who: sc_consensus::import_queue::RuntimeOrigin, + _hash: substrate_test_runtime_client::runtime::Hash, + _number: sp_runtime::traits::NumberFor, + _justifications: sp_runtime::Justifications, + ) { + } + + fn poll_actions( + &mut self, + _cx: &mut futures::task::Context, + link: &mut dyn sc_consensus::Link, + ) { + if let Some((peer, hash, number)) = *self.0.read().unwrap() { + link.justification_imported(peer, &hash, number, false); + } + } + } + + let justification_info = Arc::new(RwLock::new(None)); + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (service1, mut event_stream1) = TestNetworkBuilder::new() + .with_import_queue(Box::new(DummyImportQueue(justification_info.clone()))) + .with_listen_addresses(vec![listen_addr.clone()]) + .build() + .start_network(); + + let (service2, mut event_stream2) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: service1.local_peer_id, + }], + ..Default::default() + }) + .build() + .start_network(); + + async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { + let mut notif_received = false; + let mut sync_received = false; + while !notif_received || !sync_received { + match stream.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => notif_received = true, + Event::SyncConnected { .. } => sync_received = true, + _ => {}, + }; + } + } + + wait_for_events(&mut event_stream1).await; + wait_for_events(&mut event_stream2).await; + + { + let mut info = justification_info.write().unwrap(); + *info = Some((service2.local_peer_id, H256::random(), 1337u64)); + } + + let wait_disconnection = async { + while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} + }; + + if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) + .await + .is_err() + { + panic!("did not receive disconnection event in time"); + } +} + +#[async_std::test] +async fn disconnect_peer_using_chain_sync_handle() { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (chain_sync_network_provider, chain_sync_network_handle) = + sc_network_sync::service::network::NetworkServiceProvider::new(); + let handle_clone = chain_sync_network_handle.clone(); + + let (chain_sync, chain_sync_service) = ChainSync::new( + sc_network_common::sync::SyncMode::Full, + client.clone(), + Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), + 1u32, + None, + chain_sync_network_handle.clone(), + ) + .unwrap(); + + let (node1, mut event_stream1) = TestNetworkBuilder::new() + .with_listen_addresses(vec![listen_addr.clone()]) + .with_chain_sync((Box::new(chain_sync), chain_sync_service)) + .with_chain_sync_network((chain_sync_network_provider, chain_sync_network_handle)) + .with_client(client.clone()) + .build() + .start_network(); + + let (node2, mut event_stream2) = TestNetworkBuilder::new() + .with_set_config(SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id, + }], + ..Default::default() + }) + .with_client(client.clone()) + .build() + .start_network(); + + async fn wait_for_events(stream: &mut (impl Stream + std::marker::Unpin)) { + let mut notif_received = false; + let mut sync_received = false; + while !notif_received || !sync_received { + match stream.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => notif_received = true, + Event::SyncConnected { .. } => sync_received = true, + _ => {}, + }; + } + } + + wait_for_events(&mut event_stream1).await; + wait_for_events(&mut event_stream2).await; + + handle_clone.disconnect_peer(node2.local_peer_id, BLOCK_ANNOUNCE_PROTO_NAME.into()); + + let wait_disconnection = async { + while !std::matches!(event_stream1.next().await, Some(Event::SyncDisconnected { .. })) {} + }; + + if async_std::future::timeout(Duration::from_secs(5), wait_disconnection) + .await + .is_err() + { + panic!("did not receive disconnection event in time"); + } +} diff --git a/client/network/src/service/tests/mod.rs b/client/network/src/service/tests/mod.rs index f829d9d43090f..ef25616a07b0d 100644 --- a/client/network/src/service/tests/mod.rs +++ b/client/network/src/service/tests/mod.rs @@ -33,7 +33,9 @@ use sc_network_common::{ }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + block_request_handler::BlockRequestHandler, + service::network::{NetworkServiceHandle, NetworkServiceProvider}, + state_request_handler::StateRequestHandler, ChainSync, }; use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; @@ -93,6 +95,7 @@ struct TestNetworkBuilder { listen_addresses: Vec, set_config: Option, chain_sync: Option<(Box>, Box>)>, + chain_sync_network: Option<(NetworkServiceProvider, NetworkServiceHandle)>, config: Option, } @@ -104,6 +107,7 @@ impl TestNetworkBuilder { listen_addresses: Vec::new(), set_config: None, chain_sync: None, + chain_sync_network: None, config: None, } } @@ -136,6 +140,19 @@ impl TestNetworkBuilder { self } + pub fn with_chain_sync_network( + mut self, + chain_sync_network: (NetworkServiceProvider, NetworkServiceHandle), + ) -> Self { + self.chain_sync_network = Some(chain_sync_network); + self + } + + pub fn with_import_queue(mut self, import_queue: Box>) -> Self { + self.import_queue = Some(import_queue); + self + } + pub fn build(mut self) -> TestNetwork { let client = self.client.as_mut().map_or( Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), @@ -199,6 +216,9 @@ impl TestNetworkBuilder { None, ))); + let (chain_sync_network_provider, chain_sync_network_handle) = + self.chain_sync_network.unwrap_or(NetworkServiceProvider::new()); + let (chain_sync, chain_sync_service) = self.chain_sync.unwrap_or({ let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { @@ -214,6 +234,7 @@ impl TestNetworkBuilder { Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator), network_config.max_parallel_downloads, None, + chain_sync_network_handle, ) .unwrap(); @@ -292,6 +313,11 @@ impl TestNetworkBuilder { }) .unwrap(); + let service = worker.service().clone(); + async_std::task::spawn(async move { + let _ = chain_sync_network_provider.run(service).await; + }); + TestNetwork::new(worker) } } diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 63f63f7188cfe..f369bdb47e1c6 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -271,6 +271,8 @@ pub struct ChainSync { gap_sync: Option>, /// Channel for receiving service commands service_rx: TracingUnboundedReceiver>, + /// Handle for communicating with `NetworkService` + _network_service: service::network::NetworkServiceHandle, } /// All the data we have about a Peer that we are trying to sync with @@ -1775,6 +1777,7 @@ where block_announce_validator: Box + Send>, max_parallel_downloads: u32, warp_sync_provider: Option>>, + _network_service: service::network::NetworkServiceHandle, ) -> Result<(Self, Box>), ClientError> { let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync"); @@ -1800,6 +1803,7 @@ where import_existing: false, gap_sync: None, service_rx, + _network_service, }; sync.reset_sync_start_point()?; Ok((sync, Box::new(ChainSyncInterfaceHandle::new(tx)))) @@ -2670,6 +2674,7 @@ fn validate_blocks( #[cfg(test)] mod test { use super::*; + use crate::service::network::NetworkServiceProvider; use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; use sc_network_common::sync::message::{BlockData, BlockState, FromBlock}; @@ -2691,9 +2696,17 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); - let (mut sync, _) = - ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1, None) - .unwrap(); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); + let (mut sync, _) = ChainSync::new( + SyncMode::Full, + client.clone(), + block_announce_validator, + 1, + None, + chain_sync_network_handle, + ) + .unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -2739,12 +2752,16 @@ mod test { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); + let (mut sync, _) = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, None, + chain_sync_network_handle, ) .unwrap(); @@ -2905,6 +2922,8 @@ mod test { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let (mut sync, _) = ChainSync::new( SyncMode::Full, @@ -2912,6 +2931,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + chain_sync_network_handle, ) .unwrap(); @@ -3019,6 +3039,8 @@ mod test { }; let mut client = Arc::new(TestClientBuilder::new().build()); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let info = client.info(); let (mut sync, _) = ChainSync::new( @@ -3027,6 +3049,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + chain_sync_network_handle, ) .unwrap(); @@ -3140,6 +3163,8 @@ mod test { fn can_sync_huge_fork() { sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) .map(|_| build_block(&mut client, None, false)) @@ -3170,6 +3195,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + chain_sync_network_handle, ) .unwrap(); @@ -3269,6 +3295,8 @@ mod test { fn syncs_fork_without_duplicate_requests() { sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) .map(|_| build_block(&mut client, None, false)) @@ -3299,6 +3327,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 5, None, + chain_sync_network_handle, ) .unwrap(); @@ -3419,6 +3448,8 @@ mod test { #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let mut client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); @@ -3428,6 +3459,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + chain_sync_network_handle, ) .unwrap(); @@ -3450,6 +3482,8 @@ mod test { #[test] fn can_import_response_with_missing_blocks() { sp_tracing::try_init_simple(); + let (_chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let mut client2 = Arc::new(TestClientBuilder::new().build()); let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); @@ -3461,6 +3495,7 @@ mod test { Box::new(DefaultBlockAnnounceValidator), 1, None, + chain_sync_network_handle, ) .unwrap(); diff --git a/client/network/sync/src/service/mock.rs b/client/network/sync/src/service/mock.rs index e283907b392d1..c146e1ec07b48 100644 --- a/client/network/sync/src/service/mock.rs +++ b/client/network/sync/src/service/mock.rs @@ -16,10 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use libp2p::PeerId; -use sc_network_common::service::NetworkSyncForkRequest; +use sc_network_common::service::{NetworkPeers, NetworkSyncForkRequest}; use sp_runtime::traits::{Block as BlockT, NumberFor}; +pub use libp2p::{identity::error::SigningError, kad::record::Key as KademliaKey}; +use libp2p::{Multiaddr, PeerId}; +use sc_network_common::{config::MultiaddrWithPeerId, protocol::ProtocolName}; +use sc_peerset::ReputationChange; +use std::collections::HashSet; + mockall::mock! { pub ChainSyncInterface {} @@ -29,3 +34,42 @@ mockall::mock! { fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor); } } + +mockall::mock! { + pub NetworkServiceHandle {} +} + +// Mocked `Network` for `ChainSync`-related tests +mockall::mock! { + pub Network {} + + impl NetworkPeers for Network { + fn set_authorized_peers(&self, peers: HashSet); + fn set_authorized_only(&self, reserved_only: bool); + fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr); + fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange); + fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); + fn accept_unreserved_peers(&self); + fn deny_unreserved_peers(&self); + fn add_reserved_peer(&self, peer: MultiaddrWithPeerId) -> Result<(), String>; + fn remove_reserved_peer(&self, peer_id: PeerId); + fn set_reserved_peers( + &self, + protocol: ProtocolName, + peers: HashSet, + ) -> Result<(), String>; + fn add_peers_to_reserved_set( + &self, + protocol: ProtocolName, + peers: HashSet, + ) -> Result<(), String>; + fn remove_peers_from_reserved_set(&self, protocol: ProtocolName, peers: Vec); + fn add_to_peers_set( + &self, + protocol: ProtocolName, + peers: HashSet, + ) -> Result<(), String>; + fn remove_from_peers_set(&self, protocol: ProtocolName, peers: Vec); + fn sync_num_connected(&self) -> usize; + } +} diff --git a/client/network/sync/src/service/mod.rs b/client/network/sync/src/service/mod.rs index d64d9bbd1b01f..692aa26985458 100644 --- a/client/network/sync/src/service/mod.rs +++ b/client/network/sync/src/service/mod.rs @@ -20,3 +20,4 @@ pub mod chain_sync; pub mod mock; +pub mod network; diff --git a/client/network/sync/src/service/network.rs b/client/network/sync/src/service/network.rs new file mode 100644 index 0000000000000..44ed177661264 --- /dev/null +++ b/client/network/sync/src/service/network.rs @@ -0,0 +1,128 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::StreamExt; +use libp2p::PeerId; +use sc_network_common::{protocol::ProtocolName, service::NetworkPeers}; +use sc_peerset::ReputationChange; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::sync::Arc; + +/// Network-related services required by `sc-network-sync` +pub trait Network: NetworkPeers {} + +impl Network for T where T: NetworkPeers {} + +/// Network service provider for `ChainSync` +/// +/// It runs as an asynchronous task and listens to commands coming from `ChainSync` and +/// calls the `NetworkService` on its behalf. +pub struct NetworkServiceProvider { + rx: TracingUnboundedReceiver, +} + +/// Commands that `ChainSync` wishes to send to `NetworkService` +pub enum ToServiceCommand { + /// Call `NetworkPeers::disconnect_peer()` + DisconnectPeer(PeerId, ProtocolName), + + /// Call `NetworkPeers::report_peer()` + ReportPeer(PeerId, ReputationChange), +} + +/// Handle that is (temporarily) passed to `ChainSync` so it can +/// communicate with `NetworkService` through `SyncingEngine` +#[derive(Clone)] +pub struct NetworkServiceHandle { + tx: TracingUnboundedSender, +} + +impl NetworkServiceHandle { + /// Create new service handle + pub fn new(tx: TracingUnboundedSender) -> NetworkServiceHandle { + Self { tx } + } + + /// Report peer + pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + let _ = self.tx.unbounded_send(ToServiceCommand::ReportPeer(who, cost_benefit)); + } + + /// Disconnect peer + pub fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { + let _ = self.tx.unbounded_send(ToServiceCommand::DisconnectPeer(who, protocol)); + } +} + +impl NetworkServiceProvider { + /// Create new `NetworkServiceProvider` + pub fn new() -> (Self, NetworkServiceHandle) { + let (tx, rx) = tracing_unbounded("mpsc_network_service_provider"); + + (Self { rx }, NetworkServiceHandle::new(tx)) + } + + /// Run the `NetworkServiceProvider` + pub async fn run(mut self, service: Arc) { + while let Some(inner) = self.rx.next().await { + match inner { + ToServiceCommand::DisconnectPeer(peer, protocol_name) => + service.disconnect_peer(peer, protocol_name), + ToServiceCommand::ReportPeer(peer, reputation_change) => + service.report_peer(peer, reputation_change), + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::service::mock::MockNetwork; + + // typical pattern in `Protocol` code where peer is disconnected + // and then reported + #[async_std::test] + async fn disconnect_and_report_peer() { + let (provider, handle) = NetworkServiceProvider::new(); + + let peer = PeerId::random(); + let proto = ProtocolName::from("test-protocol"); + let proto_clone = proto.clone(); + let change = sc_peerset::ReputationChange::new_fatal("test-change"); + + let mut mock_network = MockNetwork::new(); + mock_network + .expect_disconnect_peer() + .withf(move |in_peer, in_proto| &peer == in_peer && &proto == in_proto) + .once() + .returning(|_, _| ()); + mock_network + .expect_report_peer() + .withf(move |in_peer, in_change| &peer == in_peer && &change == in_change) + .once() + .returning(|_, _| ()); + + async_std::task::spawn(async move { + provider.run(Arc::new(mock_network)).await; + }); + + handle.disconnect_peer(peer, proto_clone); + handle.report_peer(peer, change); + } +} diff --git a/client/network/sync/src/tests.rs b/client/network/sync/src/tests.rs index 47483c4ac440d..479c78bfdea97 100644 --- a/client/network/sync/src/tests.rs +++ b/client/network/sync/src/tests.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{ChainSync, ForkTarget}; +use crate::{service::network::NetworkServiceProvider, ChainSync, ForkTarget}; use libp2p::PeerId; use sc_network_common::{service::NetworkSyncForkRequest, sync::ChainSync as ChainSyncT}; @@ -29,12 +29,14 @@ use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _ // poll `ChainSync` and verify that a new sync fork request has been registered #[async_std::test] async fn delegate_to_chainsync() { + let (_chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (mut chain_sync, chain_sync_service) = ChainSync::new( sc_network_common::sync::SyncMode::Full, Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), Box::new(DefaultBlockAnnounceValidator), 1u32, None, + chain_sync_network_handle, ) .unwrap(); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 5460cc7d52461..c9b99fbc6af10 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -61,8 +61,8 @@ use sc_network_common::{ }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, - warp_request_handler, ChainSync, + block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, + state_request_handler::StateRequestHandler, warp_request_handler, ChainSync, }; use sc_service::client::Client; use sp_blockchain::{ @@ -864,6 +864,8 @@ where let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); + let (chain_sync_network_provider, chain_sync_network_handle) = + NetworkServiceProvider::new(); let (chain_sync, chain_sync_service) = ChainSync::new( match network_config.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, @@ -878,6 +880,7 @@ where block_announce_validator, network_config.max_parallel_downloads, Some(warp_sync), + chain_sync_network_handle, ) .unwrap(); let block_announce_config = chain_sync.get_block_announce_proto_config( @@ -915,6 +918,11 @@ where trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id()); + let service = network.service().clone(); + async_std::task::spawn(async move { + chain_sync_network_provider.run(service).await; + }); + self.mut_peers(move |peers| { for peer in peers.iter_mut() { peer.network diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 1a16268839054..3cb064ec814c5 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -46,7 +46,8 @@ use sc_network_common::{ }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ - block_request_handler::BlockRequestHandler, state_request_handler::StateRequestHandler, + block_request_handler::BlockRequestHandler, service::network::NetworkServiceProvider, + state_request_handler::StateRequestHandler, warp_request_handler::RequestHandler as WarpSyncRequestHandler, ChainSync, }; use sc_rpc::{ @@ -844,6 +845,7 @@ where protocol_config }; + let (chain_sync_network_provider, chain_sync_network_handle) = NetworkServiceProvider::new(); let (chain_sync, chain_sync_service) = ChainSync::new( match config.network.sync_mode { SyncMode::Full => sc_network_common::sync::SyncMode::Full, @@ -855,7 +857,9 @@ where block_announce_validator, config.network.max_parallel_downloads, warp_sync_provider, + chain_sync_network_handle, )?; + let block_announce_config = chain_sync.get_block_announce_proto_config( protocol_id.clone(), &config.chain_spec.fork_id().map(ToOwned::to_owned), @@ -926,7 +930,13 @@ where Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), config.prometheus_config.as_ref().map(|config| &config.registry), )?; + spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + spawn_handle.spawn( + "chain-sync-network-service-provider", + Some("networking"), + chain_sync_network_provider.run(network.clone()), + ); let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); From 3bbdba405d42d7023eeb297efb03a3ec4ccf9563 Mon Sep 17 00:00:00 2001 From: Dmitrii Markin Date: Mon, 24 Oct 2022 14:47:58 +0300 Subject: [PATCH 275/348] Base Kademlia protocol name on genesis hash and fork ID (#12545) --- client/network/src/discovery.rs | 132 +++++++++++++++++++++++--------- client/network/src/service.rs | 10 ++- 2 files changed, 104 insertions(+), 38 deletions(-) diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 712c3af97f58e..00fc78061293d 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,6 +46,7 @@ //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. +use array_bytes::bytes2hex; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; @@ -101,7 +102,7 @@ pub struct DiscoveryConfig { discovery_only_if_under_num: u64, enable_mdns: bool, kademlia_disjoint_query_paths: bool, - kademlia_protocol_id: Option, + kademlia_protocols: Vec>, } impl DiscoveryConfig { @@ -116,7 +117,7 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - kademlia_protocol_id: None, + kademlia_protocols: Vec::new(), } } @@ -161,9 +162,18 @@ impl DiscoveryConfig { } /// Add discovery via Kademlia for the given protocol. - pub fn with_kademlia(&mut self, id: ProtocolId) -> &mut Self { - self.kademlia_protocol_id = Some(id); - + /// + /// Currently accepts `protocol_id`. This should be removed once all the nodes + /// are upgraded to genesis hash- and fork ID-based Kademlia protocol name. + pub fn with_kademlia>( + &mut self, + genesis_hash: Hash, + fork_id: Option<&str>, + protocol_id: &ProtocolId, + ) -> &mut Self { + self.kademlia_protocols = Vec::new(); + self.kademlia_protocols.push(kademlia_protocol_name(genesis_hash, fork_id)); + self.kademlia_protocols.push(legacy_kademlia_protocol_name(protocol_id)); self } @@ -185,14 +195,12 @@ impl DiscoveryConfig { discovery_only_if_under_num, enable_mdns, kademlia_disjoint_query_paths, - kademlia_protocol_id, + kademlia_protocols, } = self; - let kademlia = kademlia_protocol_id.map(|protocol_id| { - let proto_name = protocol_name_from_protocol_id(&protocol_id); - + let kademlia = if !kademlia_protocols.is_empty() { let mut config = KademliaConfig::default(); - config.set_protocol_names(std::iter::once(proto_name.into()).collect()); + config.set_protocol_names(kademlia_protocols.into_iter().map(Into::into).collect()); // By default Kademlia attempts to insert all peers into its routing table once a // dialing attempt succeeds. In order to control which peer is added, disable the // auto-insertion and instead add peers manually. @@ -206,8 +214,10 @@ impl DiscoveryConfig { kad.add_address(peer_id, addr.clone()); } - kad - }); + Some(kad) + } else { + None + }; DiscoveryBehaviour { permanent_addresses, @@ -866,35 +876,50 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } -// NB: If this protocol name derivation is changed, check if -// `DiscoveryBehaviour::new_handler` is still correct. -fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { +/// Legacy (fallback) Kademlia protocol name based on `protocol_id`. +fn legacy_kademlia_protocol_name(id: &ProtocolId) -> Vec { let mut v = vec![b'/']; v.extend_from_slice(id.as_ref().as_bytes()); v.extend_from_slice(b"/kad"); v } +/// Kademlia protocol name based on `genesis_hash` and `fork_id`. +fn kademlia_protocol_name>(genesis_hash: Hash, fork_id: Option<&str>) -> Vec { + let genesis_hash_hex = bytes2hex("", genesis_hash.as_ref()); + if let Some(fork_id) = fork_id { + format!("/{}/{}/kad", genesis_hash_hex, fork_id).as_bytes().into() + } else { + format!("/{}/kad", genesis_hash_hex).as_bytes().into() + } +} + #[cfg(test)] mod tests { - use super::{protocol_name_from_protocol_id, DiscoveryConfig, DiscoveryOut}; + use super::{ + kademlia_protocol_name, legacy_kademlia_protocol_name, DiscoveryConfig, DiscoveryOut, + }; use futures::prelude::*; use libp2p::{ core::{ transport::{MemoryTransport, Transport}, upgrade, }, - identity::Keypair, + identity::{ed25519, Keypair}, noise, swarm::{Swarm, SwarmEvent}, - yamux, Multiaddr, PeerId, + yamux, Multiaddr, }; use sc_network_common::config::ProtocolId; + use sp_core::hash::H256; use std::{collections::HashSet, task::Poll}; #[test] fn discovery_working() { let mut first_swarm_peer_id_and_addr = None; + + let genesis_hash = H256::from_low_u64_be(1); + let fork_id = Some("test-fork-id"); let protocol_id = ProtocolId::from("dot"); // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of @@ -919,7 +944,7 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .with_kademlia(protocol_id.clone()); + .with_kademlia(genesis_hash, fork_id, &protocol_id); config.finish() }; @@ -972,12 +997,19 @@ mod tests { } }) .unwrap(); + // Test both genesis hash-based and legacy + // protocol names. + let protocol_name = if swarm_n % 2 == 0 { + kademlia_protocol_name(genesis_hash, fork_id) + } else { + legacy_kademlia_protocol_name(&protocol_id) + }; swarms[swarm_n] .0 .behaviour_mut() .add_self_reported_address( &other, - &[protocol_name_from_protocol_id(&protocol_id)], + &[protocol_name], addr, ); @@ -1012,6 +1044,8 @@ mod tests { #[test] fn discovery_ignores_peers_with_unknown_protocols() { + let supported_genesis_hash = H256::from_low_u64_be(1); + let unsupported_genesis_hash = H256::from_low_u64_be(2); let supported_protocol_id = ProtocolId::from("a"); let unsupported_protocol_id = ProtocolId::from("b"); @@ -1022,19 +1056,34 @@ mod tests { .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) - .with_kademlia(supported_protocol_id.clone()); + .with_kademlia(supported_genesis_hash, None, &supported_protocol_id); config.finish() }; - let remote_peer_id = PeerId::random(); - let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + let predictable_peer_id = |bytes: &[u8; 32]| { + Keypair::Ed25519(ed25519::Keypair::from( + ed25519::SecretKey::from_bytes(bytes.to_owned()).unwrap(), + )) + .public() + .to_peer_id() + }; + + let remote_peer_id = predictable_peer_id(b"00000000000000000000000000000001"); + let remote_addr: Multiaddr = "/memory/1".parse().unwrap(); + let another_peer_id = predictable_peer_id(b"00000000000000000000000000000002"); + let another_addr: Multiaddr = "/memory/2".parse().unwrap(); - // Add remote peer with unsupported protocol. + // Try adding remote peers with unsupported protocols. discovery.add_self_reported_address( &remote_peer_id, - &[protocol_name_from_protocol_id(&unsupported_protocol_id)], + &[kademlia_protocol_name(unsupported_genesis_hash, None)], remote_addr.clone(), ); + discovery.add_self_reported_address( + &another_peer_id, + &[legacy_kademlia_protocol_name(&unsupported_protocol_id)], + another_addr.clone(), + ); { let kademlia = discovery.kademlia.as_mut().unwrap(); @@ -1045,23 +1094,34 @@ mod tests { .is_empty(), "Expect peer with unsupported protocol not to be added." ); + assert!( + kademlia + .kbucket(another_peer_id) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with unsupported protocol not to be added." + ); } - // Add remote peer with supported protocol. + // Add remote peers with supported protocols. discovery.add_self_reported_address( &remote_peer_id, - &[protocol_name_from_protocol_id(&supported_protocol_id)], + &[kademlia_protocol_name(supported_genesis_hash, None)], remote_addr.clone(), ); - - let kademlia = discovery.kademlia.as_mut().unwrap(); - assert_eq!( - 1, - kademlia - .kbucket(remote_peer_id) - .expect("Remote peer id not to be equal to local peer id.") - .num_entries(), - "Expect peer with supported protocol to be added." + discovery.add_self_reported_address( + &another_peer_id, + &[legacy_kademlia_protocol_name(&supported_protocol_id)], + another_addr.clone(), ); + + { + let kademlia = discovery.kademlia.as_mut().unwrap(); + assert_eq!( + 2, + kademlia.kbuckets().fold(0, |acc, bucket| acc + bucket.num_entries()), + "Expect peers with supported protocol to be added." + ); + } } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 89dc2ff4405b2..5ffd36007f530 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -76,7 +76,7 @@ use sc_network_common::{ use sc_peerset::PeersetHandle; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::HeaderBackend; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::{ cmp, collections::{HashMap, HashSet}, @@ -282,7 +282,13 @@ where config.discovery_limit( u64::from(params.network_config.default_peers_set.out_peers) + 15, ); - config.with_kademlia(params.protocol_id.clone()); + let genesis_hash = params + .chain + .hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"); + config.with_kademlia(genesis_hash, params.fork_id.as_deref(), ¶ms.protocol_id); config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); config.use_kademlia_disjoint_query_paths( From 433a6f725c56403d09ecde838c5509059df92e3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 24 Oct 2022 19:48:04 +0200 Subject: [PATCH 276/348] contracts: Allow indeterministic instructions off-chain (#12469) * Allow indetermistic instructions off-chain * Apply suggestions from code review Co-authored-by: Sasha Gryaznov * fmt Co-authored-by: Sasha Gryaznov --- bin/node/runtime/src/lib.rs | 35 +- frame/contracts/README.md | 29 +- .../fixtures/delegate_call_simple.wat | 50 ++ .../contracts/fixtures/float_instruction.wat | 11 + frame/contracts/src/benchmarking/code.rs | 4 +- frame/contracts/src/benchmarking/mod.rs | 6 +- frame/contracts/src/exec.rs | 82 ++- frame/contracts/src/lib.rs | 39 +- frame/contracts/src/migration.rs | 197 ++++--- frame/contracts/src/schedule.rs | 23 +- frame/contracts/src/tests.rs | 545 ++++++++++++++++-- frame/contracts/src/wasm/code_cache.rs | 2 +- frame/contracts/src/wasm/mod.rs | 41 +- frame/contracts/src/wasm/prepare.rs | 30 +- frame/contracts/src/wasm/runtime.rs | 1 + 15 files changed, 926 insertions(+), 169 deletions(-) create mode 100644 frame/contracts/fixtures/delegate_call_simple.wat create mode 100644 frame/contracts/fixtures/float_instruction.wat diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f137b36eff036..5ba89e6595e72 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1959,7 +1959,16 @@ impl_runtime_apis! { input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); - Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) + Contracts::bare_call( + origin, + dest, + value, + gas_limit, + storage_deposit_limit, + input_data, + true, + pallet_contracts::Determinism::Deterministic, + ) } fn instantiate( @@ -1973,23 +1982,41 @@ impl_runtime_apis! { ) -> pallet_contracts_primitives::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); - Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) + Contracts::bare_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit, + code, + data, + salt, + true + ) } fn upload_code( origin: AccountId, code: Vec, storage_deposit_limit: Option, + determinism: pallet_contracts::Determinism, ) -> pallet_contracts_primitives::CodeUploadResult { - Contracts::bare_upload_code(origin, code, storage_deposit_limit) + Contracts::bare_upload_code( + origin, + code, + storage_deposit_limit, + determinism, + ) } fn get_storage( address: AccountId, key: Vec, ) -> pallet_contracts_primitives::GetStorageResult { - Contracts::get_storage(address, key) + Contracts::get_storage( + address, + key + ) } } diff --git a/frame/contracts/README.md b/frame/contracts/README.md index bd5e58d89d1ce..18d16889a3fe8 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -37,12 +37,37 @@ changes still persist. One gas is equivalent to one [weight](https://docs.substrate.io/v3/runtime/weights-and-fees) which is defined as one picosecond of execution time on the runtime's reference machine. -### Notable Scenarios +### Revert Behaviour -Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", +Contract call failures are not cascading. When failures occur in a sub-call, they do not "bubble up", and the call will only revert at the specific contract level. For example, if contract A calls contract B, and B fails, A can decide how to handle that failure, either proceeding or reverting A's changes. +### Offchain Execution + +In general, a contract execution needs to be deterministic so that all nodes come to the same +conclusion when executing it. To that end we disallow any instructions that could cause +indeterminism. Most notable are any floating point arithmetic. That said, sometimes contracts +are executed off-chain and hence are not subject to consensus. If code is only executed by a +single node and implicitly trusted by other actors is such a case. Trusted execution environments +come to mind. To that end we allow the execution of indeterminstic code for offchain usages +with the following constraints: + +1. No contract can ever be instantiated from an indeterministic code. The only way to execute +the code is to use a delegate call from a deterministic contract. +2. The code that wants to use this feature needs to depend on `pallet-contracts` and use `bare_call` +directly. This makes sure that by default `pallet-contracts` does not expose any indeterminism. + +## How to use + +When setting up the `Schedule` for your runtime make sure to set `InstructionWeights::fallback` +to a non zero value. The default is `0` and prevents the upload of any non deterministic code. + +An indeterministic code can be deployed on-chain by passing `Determinism::AllowIndeterministic` +to `upload_code`. A determinstic contract can then delegate call into it if and only if it +is ran by using `bare_call` and passing `Determinism::AllowIndeterministic` to it. **Never use +this argument when the contract is called from an on-chain transaction.** + ## Interface ### Dispatchable functions diff --git a/frame/contracts/fixtures/delegate_call_simple.wat b/frame/contracts/fixtures/delegate_call_simple.wat new file mode 100644 index 0000000000000..24ae5a13e33e5 --- /dev/null +++ b/frame/contracts/fixtures/delegate_call_simple.wat @@ -0,0 +1,50 @@ +;; Just delegate call into the passed code hash and assert success. +(module + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_delegate_call" (func $seal_delegate_call (param i32 i32 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 3 3)) + + ;; [0, 32) buffer where input is copied + + ;; [32, 36) size of the input buffer + (data (i32.const 32) "\20") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; Reading "callee" code_hash + (call $seal_input (i32.const 0) (i32.const 32)) + + ;; assert input size == 32 + (call $assert + (i32.eq + (i32.load (i32.const 32)) + (i32.const 32) + ) + ) + + ;; Delegate call into passed code hash + (call $assert + (i32.eq + (call $seal_delegate_call + (i32.const 0) ;; Set no call flags + (i32.const 0) ;; Pointer to "callee" code_hash. + (i32.const 0) ;; Input is ignored + (i32.const 0) ;; Length of the input + (i32.const 4294967295) ;; u32 max sentinel value: do not copy output + (i32.const 0) ;; Length is ignored in this case + ) + (i32.const 0) + ) + ) + ) + + (func (export "deploy")) +) diff --git a/frame/contracts/fixtures/float_instruction.wat b/frame/contracts/fixtures/float_instruction.wat new file mode 100644 index 0000000000000..c19b5c12cdcec --- /dev/null +++ b/frame/contracts/fixtures/float_instruction.wat @@ -0,0 +1,11 @@ +;; Module that contains a float instruction which is illegal in deterministic mode +(module + (func (export "call") + f32.const 1 + drop + ) + (func (export "deploy") + f32.const 2 + drop + ) +) diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 32ee2dbf93914..b14b107f34c90 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -24,7 +24,7 @@ //! we define this simple definition of a contract that can be passed to `create_code` that //! compiles it down into a `WasmModule` that can be used as a contract's code. -use crate::Config; +use crate::{Config, Determinism}; use frame_support::traits::Get; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; @@ -554,7 +554,7 @@ where fn inject_gas_metering(module: Module) -> Module { let schedule = T::Schedule::get(); - let gas_rules = schedule.rules(&module); + let gas_rules = schedule.rules(&module, Determinism::Deterministic); wasm_instrument::gas_metering::inject(module, &gas_rules, "seal0").unwrap() } diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index 86c7d2df674c7..a952eeb2cbd3a 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -371,7 +371,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::sized(c, Location::Call); let origin = RawOrigin::Signed(caller.clone()); - }: _(origin, code, None) + }: _(origin, code, None, Determinism::Deterministic) verify { // uploading the code reserves some balance in the callers account assert!(T::Currency::reserved_balance(&caller) > 0u32.into()); @@ -386,7 +386,7 @@ benchmarks! { T::Currency::make_free_balance_be(&caller, caller_funding::()); let WasmModule { code, hash, .. } = WasmModule::::dummy(); let origin = RawOrigin::Signed(caller.clone()); - let uploaded = >::bare_upload_code(caller.clone(), code, None)?; + let uploaded = >::bare_upload_code(caller.clone(), code, None, Determinism::Deterministic)?; assert_eq!(uploaded.code_hash, hash); assert_eq!(uploaded.deposit, T::Currency::reserved_balance(&caller)); assert!(>::code_exists(&hash)); @@ -2894,6 +2894,7 @@ benchmarks! { None, data, false, + Determinism::Deterministic, ) .result?; } @@ -2941,6 +2942,7 @@ benchmarks! { None, data, false, + Determinism::Deterministic, ) .result?; } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index bf35410d0bd4b..7955f076b84c4 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -18,7 +18,7 @@ use crate::{ gas::GasMeter, storage::{self, Storage, WriteOutcome}, - BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Error, Event, Nonce, + BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Determinism, Error, Event, Nonce, Pallet as Contracts, Schedule, }; use frame_support::{ @@ -355,6 +355,9 @@ pub trait Executable: Sized { /// Size of the instrumented code in bytes. fn code_len(&self) -> u32; + + /// The code does not contain any instructions which could lead to indeterminism. + fn is_deterministic(&self) -> bool; } /// The complete call stack of a contract execution. @@ -395,6 +398,8 @@ pub struct Stack<'a, T: Config, E> { /// All the bytes added to this field should be valid UTF-8. The buffer has no defined /// structure and is intended to be shown to users as-is for debugging purposes. debug_message: Option<&'a mut Vec>, + /// The determinism requirement of this call stack. + determinism: Determinism, /// No executable is held by the struct but influences its behaviour. _phantom: PhantomData, } @@ -601,6 +606,7 @@ where value: BalanceOf, input_data: Vec, debug_message: Option<&'a mut Vec>, + determinism: Determinism, ) -> Result { let (mut stack, executable) = Self::new( FrameArgs::Call { dest, cached_info: None, delegated_call: None }, @@ -610,6 +616,7 @@ where schedule, value, debug_message, + determinism, )?; stack.run(executable, input_data) } @@ -648,6 +655,7 @@ where schedule, value, debug_message, + Determinism::Deterministic, )?; let account_id = stack.top_frame().account_id.clone(); stack.run(executable, input_data).map(|ret| (account_id, ret)) @@ -662,9 +670,17 @@ where schedule: &'a Schedule, value: BalanceOf, debug_message: Option<&'a mut Vec>, + determinism: Determinism, ) -> Result<(Self, E), ExecError> { - let (first_frame, executable, nonce) = - Self::new_frame(args, value, gas_meter, storage_meter, Weight::zero(), schedule)?; + let (first_frame, executable, nonce) = Self::new_frame( + args, + value, + gas_meter, + storage_meter, + Weight::zero(), + schedule, + determinism, + )?; let stack = Self { origin, schedule, @@ -676,6 +692,7 @@ where first_frame, frames: Default::default(), debug_message, + determinism, _phantom: Default::default(), }; @@ -693,6 +710,7 @@ where storage_meter: &mut storage::meter::GenericMeter, gas_limit: Weight, schedule: &Schedule, + determinism: Determinism, ) -> Result<(Frame, E, Option), ExecError> { let (account_id, contract_info, executable, delegate_caller, entry_point, nonce) = match frame_args { @@ -729,6 +747,15 @@ where }, }; + // `AllowIndeterminism` will only be ever set in case of off-chain execution. + // Instantiations are never allowed even when executing off-chain. + if !(executable.is_deterministic() || + (matches!(determinism, Determinism::AllowIndeterminism) && + matches!(entry_point, ExportedFunction::Call))) + { + return Err(Error::::Indeterministic.into()) + } + let frame = Frame { delegate_caller, value_transferred, @@ -775,6 +802,7 @@ where nested_storage, gas_limit, self.schedule, + self.determinism, )?; self.frames.push(frame); Ok(executable) @@ -1328,15 +1356,18 @@ where } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { + let frame = top_frame_mut!(self); + if !E::from_storage(hash, self.schedule, &mut frame.nested_gas)?.is_deterministic() { + return Err(>::Indeterministic.into()) + } E::add_user(hash)?; - let top_frame = self.top_frame_mut(); - let prev_hash = top_frame.contract_info().code_hash; + let prev_hash = frame.contract_info().code_hash; E::remove_user(prev_hash); - top_frame.contract_info().code_hash = hash; + frame.contract_info().code_hash = hash; Contracts::::deposit_event( - vec![T::Hashing::hash_of(&top_frame.account_id), hash, prev_hash], + vec![T::Hashing::hash_of(&frame.account_id), hash, prev_hash], Event::ContractCodeUpdated { - contract: top_frame.account_id.clone(), + contract: frame.account_id.clone(), new_code_hash: hash, old_code_hash: prev_hash, }, @@ -1513,6 +1544,10 @@ mod tests { fn code_len(&self) -> u32 { 0 } + + fn is_deterministic(&self) -> bool { + true + } } fn exec_success() -> ExecResult { @@ -1551,6 +1586,7 @@ mod tests { value, vec![], None, + Determinism::Deterministic, ), Ok(_) ); @@ -1604,6 +1640,7 @@ mod tests { value, vec![], None, + Determinism::Deterministic, ) .unwrap(); @@ -1645,6 +1682,7 @@ mod tests { value, vec![], None, + Determinism::Deterministic, ) .unwrap(); @@ -1680,6 +1718,7 @@ mod tests { 55, vec![], None, + Determinism::Deterministic, ) .unwrap(); @@ -1731,6 +1770,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); let output = result.unwrap(); @@ -1763,6 +1803,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); let output = result.unwrap(); @@ -1793,6 +1834,7 @@ mod tests { 0, vec![1, 2, 3, 4], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -1873,6 +1915,7 @@ mod tests { value, vec![], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); @@ -1918,6 +1961,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); @@ -1951,6 +1995,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -1980,6 +2025,7 @@ mod tests { 0, vec![0], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2007,6 +2053,7 @@ mod tests { 0, vec![0], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2042,6 +2089,7 @@ mod tests { 0, vec![0], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2077,6 +2125,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); @@ -2235,6 +2284,7 @@ mod tests { min_balance * 10, vec![], None, + Determinism::Deterministic, ), Ok(_) ); @@ -2299,6 +2349,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ), Ok(_) ); @@ -2384,6 +2435,7 @@ mod tests { 0, vec![0], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); @@ -2450,6 +2502,7 @@ mod tests { 0, vec![], Some(&mut debug_buffer), + Determinism::Deterministic, ) .unwrap(); }); @@ -2483,6 +2536,7 @@ mod tests { 0, vec![], Some(&mut debug_buffer), + Determinism::Deterministic, ); assert!(result.is_err()); }); @@ -2516,6 +2570,7 @@ mod tests { 0, CHARLIE.encode(), None, + Determinism::Deterministic )); // Calling into oneself fails @@ -2529,6 +2584,7 @@ mod tests { 0, BOB.encode(), None, + Determinism::Deterministic ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2567,6 +2623,7 @@ mod tests { 0, vec![0], None, + Determinism::Deterministic ) .map_err(|e| e.error), >::ReentranceDenied, @@ -2601,6 +2658,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ) .unwrap(); @@ -2683,6 +2741,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ) .unwrap(); @@ -2886,6 +2945,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3012,6 +3072,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3047,6 +3108,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3082,6 +3144,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3143,6 +3206,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3204,6 +3268,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic )); }); } @@ -3235,6 +3300,7 @@ mod tests { 0, vec![], None, + Determinism::Deterministic, ); assert_matches!(result, Ok(_)); }); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index d48a71b85e9fe..b2eb0439d6ced 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -132,6 +132,7 @@ pub use crate::{ migration::Migration, pallet::*, schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, + wasm::Determinism, }; type CodeHash = ::Hash; @@ -206,7 +207,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -456,6 +457,10 @@ pub mod pallet { /// the in storage version to the current /// [`InstructionWeights::version`](InstructionWeights). /// + /// - `determinism`: If this is set to any other value but [`Determinism::Deterministic`] + /// then the only way to use this code is to delegate call into it from an offchain + /// execution. Set to [`Determinism::Deterministic`] if in doubt. + /// /// # Note /// /// Anyone can instantiate a contract from any uploaded code and thus prevent its removal. @@ -467,9 +472,11 @@ pub mod pallet { origin: OriginFor, code: Vec, storage_deposit_limit: Option< as codec::HasCompact>::Type>, + determinism: Determinism, ) -> DispatchResult { let origin = ensure_signed(origin)?; - Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into)).map(|_| ()) + Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism) + .map(|_| ()) } /// Remove the code stored under `code_hash` and refund the deposit to its owner. @@ -562,6 +569,7 @@ pub mod pallet { storage_deposit_limit.map(Into::into), data, None, + Determinism::Deterministic, ); if let Ok(retval) = &output.result { if retval.did_revert() { @@ -825,6 +833,8 @@ pub mod pallet { /// A more detailed error can be found on the node console if debug messages are enabled /// or in the debug buffer which is returned to RPC clients. CodeRejected, + /// An indetermistic code was used in a context where this is not permitted. + Indeterministic, } /// A mapping from an original code hash to the original code, untouched by instrumentation. @@ -921,6 +931,7 @@ where storage_deposit_limit: Option>, data: Vec, debug: bool, + determinism: Determinism, ) -> ContractExecResult> { let mut debug_message = if debug { Some(Vec::new()) } else { None }; let output = Self::internal_call( @@ -931,6 +942,7 @@ where storage_deposit_limit, data, debug_message.as_mut(), + determinism, ); ContractExecResult { result: output.result.map_err(|r| r.error), @@ -994,10 +1006,11 @@ where origin: T::AccountId, code: Vec, storage_deposit_limit: Option>, + determinism: Determinism, ) -> CodeUploadResult, BalanceOf> { let schedule = T::Schedule::get(); - let module = - PrefabWasmModule::from_code(code, &schedule, origin).map_err(|(err, _)| err)?; + let module = PrefabWasmModule::from_code(code, &schedule, origin, determinism) + .map_err(|(err, _)| err)?; let deposit = module.open_deposit(); if let Some(storage_deposit_limit) = storage_deposit_limit { ensure!(storage_deposit_limit >= deposit, >::StorageDepositLimitExhausted); @@ -1067,6 +1080,7 @@ where storage_deposit_limit: Option>, data: Vec, debug_message: Option<&mut Vec>, + determinism: Determinism, ) -> InternalCallOutput { let mut gas_meter = GasMeter::new(gas_limit); let mut storage_meter = match StorageMeter::new(&origin, storage_deposit_limit, value) { @@ -1088,6 +1102,7 @@ where value, data, debug_message, + determinism, ); InternalCallOutput { result, @@ -1115,11 +1130,16 @@ where let schedule = T::Schedule::get(); let (extra_deposit, executable) = match code { Code::Upload(binary) => { - let executable = PrefabWasmModule::from_code(binary, &schedule, origin.clone()) - .map_err(|(err, msg)| { - debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); - err - })?; + let executable = PrefabWasmModule::from_code( + binary, + &schedule, + origin.clone(), + Determinism::Deterministic, + ) + .map_err(|(err, msg)| { + debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); + err + })?; // The open deposit will be charged during execution when the // uploaded module does not already exist. This deposit is not part of the // storage meter because it is not transferred to the contract but @@ -1218,6 +1238,7 @@ sp_api::decl_runtime_apis! { origin: AccountId, code: Vec, storage_deposit_limit: Option, + determinism: Determinism, ) -> CodeUploadResult; /// Query a given storage key in a given contract. diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 5ea821aac7682..aa04d8b9b1084 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -32,65 +32,54 @@ use sp_std::{marker::PhantomData, prelude::*}; pub struct Migration(PhantomData); impl OnRuntimeUpgrade for Migration { fn on_runtime_upgrade() -> Weight { - let version = StorageVersion::get::>(); + let version = >::on_chain_storage_version(); let mut weight = Weight::zero(); if version < 4 { - weight = weight.saturating_add(v4::migrate::()); - StorageVersion::new(4).put::>(); + v4::migrate::(&mut weight); } if version < 5 { - weight = weight.saturating_add(v5::migrate::()); - StorageVersion::new(5).put::>(); + v5::migrate::(&mut weight); } if version < 6 { - weight = weight.saturating_add(v6::migrate::()); - StorageVersion::new(6).put::>(); + v6::migrate::(&mut weight); } if version < 7 { - weight = weight.saturating_add(v7::migrate::()); - StorageVersion::new(7).put::>(); + v7::migrate::(&mut weight); } if version < 8 { - weight = weight.saturating_add(v8::migrate::()); - StorageVersion::new(8).put::>(); + v8::migrate::(&mut weight); } + if version < 9 { + v9::migrate::(&mut weight); + } + + StorageVersion::new(9).put::>(); + weight.saturating_accrue(T::DbWeight::get().writes(1)); + weight } #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { - let version = StorageVersion::get::>(); - - if version < 7 { - return Ok(vec![]) - } + let version = >::on_chain_storage_version(); - if version < 8 { + if version == 8 { v8::pre_upgrade::()?; } - Ok(vec![]) + Ok(version.encode()) } #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), &'static str> { - let version = StorageVersion::get::>(); - - if version < 7 { - return Ok(()) - } - - if version < 8 { - v8::post_upgrade::()?; - } - - Ok(()) + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let version = Decode::decode(&mut state.as_ref()).map_err(|_| "Cannot decode version")?; + post_checks::post_upgrade::(version) } } @@ -98,10 +87,10 @@ impl OnRuntimeUpgrade for Migration { mod v4 { use super::*; - pub fn migrate() -> Weight { + pub fn migrate(weight: &mut Weight) { #[allow(deprecated)] migration::remove_storage_prefix(>::name().as_bytes(), b"CurrentSchedule", b""); - T::DbWeight::get().writes(1) + weight.saturating_accrue(T::DbWeight::get().writes(1)); } } @@ -169,11 +158,9 @@ mod v5 { #[storage_alias] type DeletionQueue = StorageValue, Vec>; - pub fn migrate() -> Weight { - let mut weight = Weight::zero(); - + pub fn migrate(weight: &mut Weight) { >::translate(|_key, old: OldContractInfo| { - weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); match old { OldContractInfo::Alive(old) => Some(ContractInfo:: { trie_id: old.trie_id, @@ -185,12 +172,10 @@ mod v5 { }); DeletionQueue::::translate(|old: Option>| { - weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect()) }) .ok(); - - weight } } @@ -214,14 +199,14 @@ mod v6 { } #[derive(Encode, Decode)] - struct PrefabWasmModule { + pub struct PrefabWasmModule { #[codec(compact)] - instruction_weights_version: u32, + pub instruction_weights_version: u32, #[codec(compact)] - initial: u32, + pub initial: u32, #[codec(compact)] - maximum: u32, - code: Vec, + pub maximum: u32, + pub code: Vec, } use v5::ContractInfo as OldContractInfo; @@ -258,11 +243,9 @@ mod v6 { #[storage_alias] type OwnerInfoOf = StorageMap, Identity, CodeHash, OwnerInfo>; - pub fn migrate() -> Weight { - let mut weight = Weight::zero(); - + pub fn migrate(weight: &mut Weight) { >::translate(|_key, old: OldContractInfo| { - weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); Some(ContractInfo:: { trie_id: old.trie_id, code_hash: old.code_hash, @@ -274,7 +257,7 @@ mod v6 { .expect("Infinite input; no dead input space; qed"); >::translate(|key, old: OldPrefabWasmModule| { - weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); >::insert( key, OwnerInfo { @@ -290,8 +273,6 @@ mod v6 { code: old.code, }) }); - - weight } } @@ -299,14 +280,14 @@ mod v6 { mod v7 { use super::*; - pub fn migrate() -> Weight { + pub fn migrate(weight: &mut Weight) { #[storage_alias] type AccountCounter = StorageValue, u64, ValueQuery>; #[storage_alias] type Nonce = StorageValue, u64, ValueQuery>; Nonce::::set(AccountCounter::::take()); - T::DbWeight::get().reads_writes(1, 2) + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)) } } @@ -317,23 +298,21 @@ mod v8 { use v6::ContractInfo as OldContractInfo; #[derive(Encode, Decode)] - struct ContractInfo { - trie_id: TrieId, - code_hash: CodeHash, - storage_bytes: u32, - storage_items: u32, - storage_byte_deposit: BalanceOf, - storage_item_deposit: BalanceOf, - storage_base_deposit: BalanceOf, + pub struct ContractInfo { + pub trie_id: TrieId, + pub code_hash: CodeHash, + pub storage_bytes: u32, + pub storage_items: u32, + pub storage_byte_deposit: BalanceOf, + pub storage_item_deposit: BalanceOf, + pub storage_base_deposit: BalanceOf, } #[storage_alias] type ContractInfoOf = StorageMap, Twox64Concat, ::AccountId, V>; - pub fn migrate() -> Weight { - let mut weight = Weight::zero(); - + pub fn migrate(weight: &mut Weight) { >>::translate_values(|old: OldContractInfo| { // Count storage items of this contract let mut storage_bytes = 0u32; @@ -359,8 +338,9 @@ mod v8 { // Reads: One read for each storage item plus the contract info itself. // Writes: Only the new contract info. - weight = weight - .saturating_add(T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1)); + weight.saturating_accrue( + T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1), + ); Some(ContractInfo { trie_id: old.trie_id, @@ -372,8 +352,6 @@ mod v8 { storage_base_deposit, }) }); - - weight } #[cfg(feature = "try-runtime")] @@ -385,9 +363,78 @@ mod v8 { } Ok(()) } +} - #[cfg(feature = "try-runtime")] - pub fn post_upgrade() -> Result<(), &'static str> { +/// Update `CodeStorage` with the new `determinism` field. +mod v9 { + use super::*; + use crate::Determinism; + use v6::PrefabWasmModule as OldPrefabWasmModule; + + #[derive(Encode, Decode)] + pub struct PrefabWasmModule { + #[codec(compact)] + pub instruction_weights_version: u32, + #[codec(compact)] + pub initial: u32, + #[codec(compact)] + pub maximum: u32, + pub code: Vec, + pub determinism: Determinism, + } + + #[storage_alias] + type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; + + pub fn migrate(weight: &mut Weight) { + >::translate_values(|old: OldPrefabWasmModule| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + Some(PrefabWasmModule { + instruction_weights_version: old.instruction_weights_version, + initial: old.initial, + maximum: old.maximum, + code: old.code, + determinism: Determinism::Deterministic, + }) + }); + } +} + +// Post checks always need to be run against the latest storage version. This is why we +// do not scope them in the per version modules. They always need to be ported to the latest +// version. +#[cfg(feature = "try-runtime")] +mod post_checks { + use super::*; + use crate::Determinism; + use sp_io::default_child_storage as child; + use v8::ContractInfo; + use v9::PrefabWasmModule; + + #[storage_alias] + type CodeStorage = StorageMap, Identity, CodeHash, PrefabWasmModule>; + + #[storage_alias] + type ContractInfoOf = + StorageMap, Twox64Concat, ::AccountId, V>; + + pub fn post_upgrade(old_version: StorageVersion) -> Result<(), &'static str> { + if old_version < 7 { + return Ok(()) + } + + if old_version < 8 { + v8::()?; + } + + if old_version < 9 { + v9::()?; + } + + Ok(()) + } + + fn v8() -> Result<(), &'static str> { use frame_support::traits::ReservableCurrency; for (key, value) in ContractInfoOf::>::iter() { let reserved = T::Currency::reserved_balance(&key); @@ -413,4 +460,14 @@ mod v8 { } Ok(()) } + + fn v9() -> Result<(), &'static str> { + for value in CodeStorage::::iter_values() { + ensure!( + value.determinism == Determinism::Deterministic, + "All pre-existing codes need to be deterministic." + ); + } + Ok(()) + } } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 790b74106860a..535517e756c61 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -18,7 +18,7 @@ //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{weights::WeightInfo, Config}; +use crate::{wasm::Determinism, weights::WeightInfo, Config}; use codec::{Decode, Encode}; use frame_support::DefaultNoBound; @@ -193,6 +193,13 @@ pub struct InstructionWeights { /// Changes to other parts of the schedule should not increment the version in /// order to avoid unnecessary re-instrumentations. pub version: u32, + /// Weight to be used for instructions which don't have benchmarks assigned. + /// + /// This weight is used whenever a code is uploaded with [`Determinism::AllowIndeterminism`] + /// and an instruction (usually a float instruction) is encountered. This weight is **not** + /// used if a contract is uploaded with [`Determinism::Deterministic`]. If this field is set to + /// `0` (the default) only deterministic codes are allowed to be uploaded. + pub fallback: u32, pub i64const: u32, pub i64load: u32, pub i64store: u32, @@ -526,6 +533,7 @@ impl Default for InstructionWeights { let max_pages = Limits::default().memory_pages; Self { version: 3, + fallback: 0, i64const: cost_instr!(instr_i64const, 1), i64load: cost_instr!(instr_i64load, 2), i64store: cost_instr!(instr_i64store, 2), @@ -659,10 +667,15 @@ impl Default for HostFnWeights { struct ScheduleRules<'a, T: Config> { schedule: &'a Schedule, params: Vec, + determinism: Determinism, } impl Schedule { - pub(crate) fn rules(&self, module: &elements::Module) -> impl gas_metering::Rules + '_ { + pub(crate) fn rules( + &self, + module: &elements::Module, + determinism: Determinism, + ) -> impl gas_metering::Rules + '_ { ScheduleRules { schedule: self, params: module @@ -674,6 +687,7 @@ impl Schedule { func.params().len() as u32 }) .collect(), + determinism, } } } @@ -756,7 +770,10 @@ impl<'a, T: Config> gas_metering::Rules for ScheduleRules<'a, T> { I32Rotr | I64Rotr => w.i64rotr, // Returning None makes the gas instrumentation fail which we intend for - // unsupported or unknown instructions. + // unsupported or unknown instructions. Offchain we might allow indeterminism and hence + // use the fallback weight for those instructions. + _ if matches!(self.determinism, Determinism::AllowIndeterminism) && w.fallback > 0 => + w.fallback, _ => return None, }; Some(weight) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 6a2144840143a..bc2ee31681d7f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -24,7 +24,7 @@ use crate::{ exec::{FixSizedKey, Frame}, storage::Storage, tests::test_utils::{get_contract, get_contract_checked}, - wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, + wasm::{Determinism, PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, DeletionQueue, Error, Pallet, Schedule, @@ -340,6 +340,7 @@ parameter_types! { // We want stack height to be always enabled for tests so that this // instrumentation path is always tested implicitly. schedule.limits.stack_height = Some(512); + schedule.instruction_weights.fallback = 1; schedule }; pub static DepositPerByte: BalanceOf = 1; @@ -522,7 +523,12 @@ fn instantiate_and_call_and_deposit_event() { // We determine the storage deposit limit after uploading because it depends on ALICEs free // balance which is changed by uploading a module. - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, None)); + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + None, + Determinism::Deterministic + )); // Drop previous events initialize_block(2); @@ -690,7 +696,13 @@ fn instantiate_unique_trie_id() { ExtBuilder::default().existential_deposit(500).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, None).unwrap(); + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + None, + Determinism::Deterministic, + ) + .unwrap(); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Instantiate the contract and store its trie id for later comparison. @@ -940,6 +952,7 @@ fn delegate_call() { RuntimeOrigin::signed(ALICE), callee_wasm, Some(codec::Compact(100_000)), + Determinism::Deterministic, )); assert_ok!(Contracts::call( @@ -1376,10 +1389,18 @@ fn crypto_hashes() { // We offset data in the contract tables by 1. let mut params = vec![(n + 1) as u8]; params.extend_from_slice(input); - let result = - >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, params, false) - .result - .unwrap(); + let result = >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + params, + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert!(!result.did_revert()); let expected = hash_fn(input.as_ref()); assert_eq!(&result.data[..*expected_size], &*expected); @@ -1407,9 +1428,18 @@ fn transfer_return_code() { // Contract has only the minimal balance so any transfer will fail. Balances::make_free_balance_be(&addr, min_balance); - let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![], false) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + vec![], + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough total balance in order to not go below the min balance @@ -1417,9 +1447,18 @@ fn transfer_return_code() { // the transfer still fails. Balances::make_free_balance_be(&addr, min_balance + 100); Balances::reserve(&addr, min_balance + 100).unwrap(); - let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], false) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + addr, + 0, + GAS_LIMIT, + None, + vec![], + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1454,6 +1493,7 @@ fn call_return_code() { None, AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1484,6 +1524,7 @@ fn call_return_code() { .cloned() .collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1506,6 +1547,7 @@ fn call_return_code() { .cloned() .collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1525,6 +1567,7 @@ fn call_return_code() { .cloned() .collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1543,6 +1586,7 @@ fn call_return_code() { .cloned() .collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1591,6 +1635,7 @@ fn instantiate_return_code() { None, callee_hash.clone(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1609,6 +1654,7 @@ fn instantiate_return_code() { None, callee_hash.clone(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1616,10 +1662,18 @@ fn instantiate_return_code() { // Contract has enough balance but the passed code hash is invalid Balances::make_free_balance_be(&addr, min_balance + 10_000); - let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![0; 33], false) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + vec![0; 33], + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. @@ -1631,6 +1685,7 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1645,6 +1700,7 @@ fn instantiate_return_code() { None, callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1717,8 +1773,16 @@ fn chain_extension_works() { // 0 = read input buffer and pass it through as output let input: Vec = ExtensionInput { extension_id: 0, func_id: 0, extra: &[99] }.into(); - let result = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + input.clone(), + false, + Determinism::Deterministic, + ); assert_eq!(TestExtension::last_seen_buffer(), input); assert_eq!(result.result.unwrap().data, input); @@ -1731,6 +1795,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 1, extra: &[] }.into(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1746,6 +1811,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[0] }.into(), false, + Determinism::Deterministic, ); assert_ok!(result.result); let gas_consumed = result.gas_consumed; @@ -1757,6 +1823,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[42] }.into(), false, + Determinism::Deterministic, ); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); @@ -1768,6 +1835,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 2, extra: &[95] }.into(), false, + Determinism::Deterministic, ); assert_ok!(result.result); assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); @@ -1781,6 +1849,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 0, func_id: 3, extra: &[] }.into(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1798,6 +1867,7 @@ fn chain_extension_works() { None, ExtensionInput { extension_id: 1, func_id: 0, extra: &[] }.into(), false, + Determinism::Deterministic, ) .result .unwrap(); @@ -1847,8 +1917,17 @@ fn chain_extension_temp_storage_works() { ); assert_ok!( - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false) - .result + Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + input.clone(), + false, + Determinism::Deterministic + ) + .result ); }) } @@ -2387,12 +2466,28 @@ fn reinstrument_does_charge() { // Call the contract two times without reinstrument - let result0 = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); + let result0 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + zero.clone(), + false, + Determinism::Deterministic, + ); assert!(!result0.result.unwrap().did_revert()); - let result1 = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); + let result1 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + zero.clone(), + false, + Determinism::Deterministic, + ); assert!(!result1.result.unwrap().did_revert()); // They should match because both where called with the same schedule. @@ -2405,8 +2500,16 @@ fn reinstrument_does_charge() { }); // This call should trigger reinstrumentation - let result2 = - Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); + let result2 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + zero.clone(), + false, + Determinism::Deterministic, + ); assert!(!result2.result.unwrap().did_revert()); assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( @@ -2433,7 +2536,16 @@ fn debug_message_works() { vec![], ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], true); + let result = Contracts::bare_call( + ALICE, + addr, + 0, + GAS_LIMIT, + None, + vec![], + true, + Determinism::Deterministic, + ); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2457,7 +2569,16 @@ fn debug_message_logging_disabled() { ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // disable logging by passing `false` - let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, vec![], false); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + vec![], + false, + Determinism::Deterministic, + ); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging assert_ok!(Contracts::call(RuntimeOrigin::signed(ALICE), addr, 0, GAS_LIMIT, None, vec![])); @@ -2481,7 +2602,16 @@ fn debug_message_invalid_utf8() { vec![], ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, None, vec![], true); + let result = Contracts::bare_call( + ALICE, + addr, + 0, + GAS_LIMIT, + None, + vec![], + true, + Determinism::Deterministic, + ); assert_err!(result.result, >::DebugMessageInvalidUTF8); }); } @@ -2532,6 +2662,7 @@ fn gas_estimation_nested_call_fixed_limit() { None, input.clone(), false, + Determinism::Deterministic, ); assert_ok!(&result.result); @@ -2548,6 +2679,7 @@ fn gas_estimation_nested_call_fixed_limit() { Some(result.storage_deposit.charge_or_zero()), input, false, + Determinism::Deterministic, ) .result ); @@ -2604,6 +2736,7 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, + Determinism::Deterministic, ); // contract encodes the result of the dispatch runtime let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); @@ -2620,6 +2753,7 @@ fn gas_estimation_call_runtime() { None, call.encode(), false, + Determinism::Deterministic, ) .result ); @@ -2668,10 +2802,18 @@ fn ecdsa_recover() { params.extend_from_slice(&signature); params.extend_from_slice(&message_hash); assert!(params.len() == 65 + 32); - let result = - >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, params, false) - .result - .unwrap(); + let result = >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + params, + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert!(!result.did_revert()); assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); }) @@ -2691,7 +2833,8 @@ fn upload_code_works() { assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, - Some(codec::Compact(1_000)) + Some(codec::Compact(1_000)), + Determinism::Deterministic, )); assert!(>::contains_key(code_hash)); @@ -2702,7 +2845,7 @@ fn upload_code_works() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -2727,7 +2870,12 @@ fn upload_code_limit_too_low() { initialize_block(2); assert_noop!( - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(100))), + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + Some(codec::Compact(100)), + Determinism::Deterministic + ), >::StorageDepositLimitExhausted, ); @@ -2746,7 +2894,12 @@ fn upload_code_not_enough_balance() { initialize_block(2); assert_noop!( - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm, Some(codec::Compact(1_000))), + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + Some(codec::Compact(1_000)), + Determinism::Deterministic + ), >::StorageDepositNotEnoughFunds, ); @@ -2767,7 +2920,8 @@ fn remove_code_works() { assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, - Some(codec::Compact(1_000)) + Some(codec::Compact(1_000)), + Determinism::Deterministic, )); assert!(>::contains_key(code_hash)); @@ -2781,7 +2935,7 @@ fn remove_code_works() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -2794,7 +2948,7 @@ fn remove_code_works() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Unreserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -2821,7 +2975,8 @@ fn remove_code_wrong_origin() { assert_ok!(Contracts::upload_code( RuntimeOrigin::signed(ALICE), wasm, - Some(codec::Compact(1_000)) + Some(codec::Compact(1_000)), + Determinism::Deterministic, )); assert_noop!( @@ -2836,7 +2991,7 @@ fn remove_code_wrong_origin() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -2969,7 +3124,7 @@ fn instantiate_with_zero_balance_works() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -3071,7 +3226,7 @@ fn instantiate_with_below_existential_deposit_works() { phase: Phase::Initialization, event: RuntimeEvent::Balances(pallet_balances::Event::Reserved { who: ALICE, - amount: 240, + amount: 241, }), topics: vec![], }, @@ -3261,7 +3416,12 @@ fn set_code_extrinsic() { )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), new_wasm, None,)); + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + new_wasm, + None, + Determinism::Deterministic + )); // Drop previous events initialize_block(2); @@ -3457,7 +3617,12 @@ fn contract_reverted() { let input = (flags.bits(), buffer).encode(); // We just upload the code for later use - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm.clone(), None)); + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Deterministic + )); // Calling extrinsic: revert leads to an error assert_err_ignore_postinfo!( @@ -3536,9 +3701,18 @@ fn contract_reverted() { ); // Calling directly: revert leads to success but the flags indicate the error - let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input, false) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + input, + false, + Determinism::Deterministic, + ) + .result + .unwrap(); assert_eq!(result.flags, flags); assert_eq!(result.data, buffer); }); @@ -3551,7 +3725,12 @@ fn code_rejected_error_works() { let _ = Balances::deposit_creating(&ALICE, 1_000_000); assert_noop!( - Contracts::upload_code(RuntimeOrigin::signed(ALICE), wasm.clone(), None), + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Deterministic + ), >::CodeRejected, ); @@ -3594,7 +3773,12 @@ fn set_code_hash() { vec![], )); // upload new code - assert_ok!(Contracts::upload_code(RuntimeOrigin::signed(ALICE), new_wasm.clone(), None)); + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + new_wasm.clone(), + None, + Determinism::Deterministic + )); System::reset_events(); @@ -3607,16 +3791,25 @@ fn set_code_hash() { None, new_code_hash.as_ref().to_vec(), true, + Determinism::Deterministic, ) .result .unwrap(); assert_return_code!(result, 1); // Second calls new contract code that returns 2 - let result = - Contracts::bare_call(ALICE, contract_addr.clone(), 0, GAS_LIMIT, None, vec![], true) - .result - .unwrap(); + let result = Contracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + GAS_LIMIT, + None, + vec![], + true, + Determinism::Deterministic, + ) + .result + .unwrap(); assert_return_code!(result, 2); // Checking for the last event only @@ -3950,3 +4143,243 @@ fn deposit_limit_honors_min_leftover() { assert_eq!(Balances::free_balance(&BOB), 1_000); }); } + +#[test] +fn cannot_instantiate_indeterministic_code() { + let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); + let (caller_wasm, _) = compile_module::("instantiate_return_code").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Try to instantiate directly from code + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + wasm.clone(), + vec![], + vec![], + ), + >::CodeRejected, + ); + assert_err!( + Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(wasm.clone()), + vec![], + vec![], + false, + ) + .result, + >::CodeRejected, + ); + + // Try to upload a non deterministic code as deterministic + assert_err!( + Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm.clone(), + None, + Determinism::Deterministic + ), + >::CodeRejected, + ); + + // Try to instantiate from already stored indeterministic code hash + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + None, + Determinism::AllowIndeterminism, + )); + assert_err_ignore_postinfo!( + Contracts::instantiate( + RuntimeOrigin::signed(ALICE), + 0, + GAS_LIMIT, + None, + code_hash, + vec![], + vec![], + ), + >::Indeterministic, + ); + assert_err!( + Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Existing(code_hash), + vec![], + vec![], + false, + ) + .result, + >::Indeterministic, + ); + + // Deploy contract which instantiates another contract + let addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(caller_wasm), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + // Try to instantiate `code_hash` from another contract in deterministic mode + assert_err!( + >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + code_hash.encode(), + false, + Determinism::Deterministic, + ) + .result, + >::Indeterministic, + ); + + // Instantiations are not allowed even in non determinism mode + assert_err!( + >::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + None, + code_hash.encode(), + false, + Determinism::AllowIndeterminism, + ) + .result, + >::Indeterministic, + ); + }); +} + +#[test] +fn cannot_set_code_indeterministic_code() { + let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); + let (caller_wasm, _) = compile_module::("set_code_hash").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Put the non deterministic contract on-chain + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + None, + Determinism::AllowIndeterminism, + )); + + // Create the contract that will call `seal_set_code_hash` + let caller_addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(caller_wasm), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + // We do not allow to set the code hash to a non determinstic wasm + assert_err!( + >::bare_call( + ALICE, + caller_addr.clone(), + 0, + GAS_LIMIT, + None, + code_hash.encode(), + false, + Determinism::AllowIndeterminism, + ) + .result, + >::Indeterministic, + ); + }); +} + +#[test] +fn delegate_call_indeterministic_code() { + let (wasm, code_hash) = compile_module::("float_instruction").unwrap(); + let (caller_wasm, _) = compile_module::("delegate_call_simple").unwrap(); + ExtBuilder::default().existential_deposit(200).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Put the non deterministic contract on-chain + assert_ok!(Contracts::upload_code( + RuntimeOrigin::signed(ALICE), + wasm, + None, + Determinism::AllowIndeterminism, + )); + + // Create the contract that will call `seal_delegate_call` + let caller_addr = Contracts::bare_instantiate( + ALICE, + 0, + GAS_LIMIT, + None, + Code::Upload(caller_wasm), + vec![], + vec![], + false, + ) + .result + .unwrap() + .account_id; + + // The delegate call will fail in deterministic mode + assert_err!( + >::bare_call( + ALICE, + caller_addr.clone(), + 0, + GAS_LIMIT, + None, + code_hash.encode(), + false, + Determinism::Deterministic, + ) + .result, + >::Indeterministic, + ); + + // The delegate call will work on non deterministic mode + assert_ok!( + >::bare_call( + ALICE, + caller_addr.clone(), + 0, + GAS_LIMIT, + None, + code_hash.encode(), + false, + Determinism::AllowIndeterminism, + ) + .result + ); + }); +} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 09e51d981360b..3ede6db6db5a1 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -201,7 +201,7 @@ pub fn reinstrument( // as the contract is already deployed and every change in size would be the result // of changes in the instrumentation algorithm controlled by the chain authors. prefab_module.code = WeakBoundedVec::force_from( - prepare::reinstrument_contract::(&original_code, schedule) + prepare::reinstrument_contract::(&original_code, schedule, prefab_module.determinism) .map_err(|_| >::CodeRejected)?, Some("Contract exceeds limit after re-instrumentation."), ); diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index b341ae3bd155d..053729730e679 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -37,6 +37,7 @@ use crate::{ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::dispatch::{DispatchError, DispatchResult}; use sp_core::crypto::UncheckedFrom; +use sp_runtime::RuntimeDebug; use sp_sandbox::{SandboxEnvironmentBuilder, SandboxInstance, SandboxMemory}; use sp_std::prelude::*; #[cfg(test)] @@ -66,6 +67,10 @@ pub struct PrefabWasmModule { maximum: u32, /// Code instrumented with the latest schedule. code: RelaxedCodeVec, + /// A code that might contain non deterministic features and is therefore never allowed + /// to be run on chain. Specifically this code can never be instantiated into a contract + /// and can just be used through a delegate call. + determinism: Determinism, /// The uninstrumented, pristine version of the code. /// /// It is not stored because the pristine code has its own storage item. The value @@ -102,6 +107,27 @@ pub struct OwnerInfo { refcount: u64, } +/// Defines the required determinism level of a wasm blob when either running or uploading code. +#[derive( + Clone, Copy, Encode, Decode, scale_info::TypeInfo, MaxEncodedLen, RuntimeDebug, PartialEq, Eq, +)] +pub enum Determinism { + /// The execution should be deterministic and hence no indeterministic instructions are + /// allowed. + /// + /// Dispatchables always use this mode in order to make on-chain execution deterministic. + Deterministic, + /// Allow calling or uploading an indeterministic code. + /// + /// This is only possible when calling into `pallet-contracts` directly via + /// [`crate::Pallet::bare_call`]. + /// + /// # Note + /// + /// **Never** use this mode for on-chain execution. + AllowIndeterminism, +} + impl ExportedFunction { /// The wasm export name for the function. fn identifier(&self) -> &str { @@ -124,11 +150,13 @@ where original_code: Vec, schedule: &Schedule, owner: AccountIdOf, + determinism: Determinism, ) -> Result { let module = prepare::prepare_contract( original_code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, schedule, owner, + determinism, )?; Ok(module) } @@ -258,6 +286,10 @@ where fn code_len(&self) -> u32 { self.code.len() as u32 } + + fn is_deterministic(&self) -> bool { + matches!(self.determinism, Determinism::Deterministic) + } } #[cfg(test)] @@ -551,8 +583,13 @@ mod tests { fn execute>(wat: &str, input_data: Vec, mut ext: E) -> ExecResult { let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); - let executable = - PrefabWasmModule::<::T>::from_code(wasm, &schedule, ALICE).unwrap(); + let executable = PrefabWasmModule::<::T>::from_code( + wasm, + &schedule, + ALICE, + Determinism::Deterministic, + ) + .unwrap(); executable.execute(ext.borrow_mut(), &ExportedFunction::Call, input_data) } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index e8873f604c9c7..3e6b9eee96680 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -22,7 +22,7 @@ use crate::{ chain_extension::ChainExtension, storage::meter::Diff, - wasm::{env_def::ImportSatisfyCheck, OwnerInfo, PrefabWasmModule}, + wasm::{env_def::ImportSatisfyCheck, Determinism, OwnerInfo, PrefabWasmModule}, AccountIdOf, CodeVec, Config, Error, Schedule, }; use codec::{Encode, MaxEncodedLen}; @@ -182,8 +182,8 @@ impl<'a, T: Config> ContractModule<'a, T> { Ok(()) } - fn inject_gas_metering(self) -> Result { - let gas_rules = self.schedule.rules(&self.module); + fn inject_gas_metering(self, determinism: Determinism) -> Result { + let gas_rules = self.schedule.rules(&self.module, determinism); let contract_module = wasm_instrument::gas_metering::inject(self.module, &gas_rules, "seal0") .map_err(|_| "gas instrumentation failed")?; @@ -369,6 +369,7 @@ fn get_memory_limits( fn check_and_instrument( original_code: &[u8], schedule: &Schedule, + determinism: Determinism, ) -> Result<(Vec, (u32, u32)), &'static str> { let result = (|| { let contract_module = ContractModule::new(original_code, schedule)?; @@ -376,17 +377,20 @@ fn check_and_instrument( contract_module.ensure_no_internal_memory()?; contract_module.ensure_table_size_limit(schedule.limits.table_size)?; contract_module.ensure_global_variable_limit(schedule.limits.globals)?; - contract_module.ensure_no_floating_types()?; contract_module.ensure_parameter_limit(schedule.limits.parameters)?; contract_module.ensure_br_table_size_limit(schedule.limits.br_table_size)?; + if matches!(determinism, Determinism::Deterministic) { + contract_module.ensure_no_floating_types()?; + } + // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; let memory_limits = get_memory_limits(contract_module.scan_imports::(&disallowed_imports)?, schedule)?; let code = contract_module - .inject_gas_metering()? + .inject_gas_metering(determinism)? .inject_stack_height_metering()? .into_wasm_code()?; @@ -404,9 +408,11 @@ fn do_preparation( original_code: CodeVec, schedule: &Schedule, owner: AccountIdOf, + determinism: Determinism, ) -> Result, (DispatchError, &'static str)> { - let (code, (initial, maximum)) = check_and_instrument::(original_code.as_ref(), schedule) - .map_err(|msg| (>::CodeRejected.into(), msg))?; + let (code, (initial, maximum)) = + check_and_instrument::(original_code.as_ref(), schedule, determinism) + .map_err(|msg| (>::CodeRejected.into(), msg))?; let original_code_len = original_code.len(); let mut module = PrefabWasmModule { @@ -414,6 +420,7 @@ fn do_preparation( initial, maximum, code: code.try_into().map_err(|_| (>::CodeTooLarge.into(), ""))?, + determinism, code_hash: T::Hashing::hash(&original_code), original_code: Some(original_code), owner_info: None, @@ -449,8 +456,9 @@ pub fn prepare_contract( original_code: CodeVec, schedule: &Schedule, owner: AccountIdOf, + determinism: Determinism, ) -> Result, (DispatchError, &'static str)> { - do_preparation::(original_code, schedule, owner) + do_preparation::(original_code, schedule, owner, determinism) } /// The same as [`prepare_contract`] but without constructing a new [`PrefabWasmModule`] @@ -461,8 +469,9 @@ pub fn prepare_contract( pub fn reinstrument_contract( original_code: &[u8], schedule: &Schedule, + determinism: Determinism, ) -> Result, &'static str> { - Ok(check_and_instrument::(original_code, schedule)?.0) + Ok(check_and_instrument::(original_code, schedule, determinism)?.0) } /// Alternate (possibly unsafe) preparation functions used only for benchmarking. @@ -495,6 +504,7 @@ pub mod benchmarking { maximum: memory_limits.1, code_hash: T::Hashing::hash(&original_code), original_code: Some(original_code.try_into().map_err(|_| "Original code too large")?), + determinism: Determinism::Deterministic, code: contract_module .into_wasm_code()? .try_into() @@ -572,7 +582,7 @@ mod tests { }, .. Default::default() }; - let r = do_preparation::(wasm, &schedule, ALICE); + let r = do_preparation::(wasm, &schedule, ALICE, Determinism::Deterministic); assert_matches::assert_matches!(r.map_err(|(_, msg)| msg), $($expected)*); } }; diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 6d7e6bcf69e5f..281cc30c4f053 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -2384,6 +2384,7 @@ pub mod env { /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another /// way, when using this API you lose the guarantee that an address always identifies a specific /// code hash. + /// /// 3. If a contract calls into itself after changing its code the new call would use /// the new code. However, if the original caller panics after returning from the sub call it /// would revert the changes made by `seal_set_code_hash` and the next caller would use From 3d6043afca80941bf05ca5681c6e9bbdff1e80ad Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Mon, 24 Oct 2022 19:55:25 +0200 Subject: [PATCH 277/348] Normalize keystore type and its usage across tests (#12553) * Normalize keystore type and usage across tests * Extract peer index from array index --- Cargo.lock | 3 +- client/beefy/src/tests.rs | 4 +-- client/consensus/babe/Cargo.toml | 2 +- client/consensus/babe/src/tests.rs | 48 ++++++++++++++-------------- client/finality-grandpa/Cargo.toml | 6 +--- client/finality-grandpa/src/tests.rs | 38 +++++++--------------- 6 files changed, 41 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 397846693e907..6a5b562d0a791 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7846,6 +7846,7 @@ dependencies = [ "sp-core", "sp-inherents", "sp-io", + "sp-keyring", "sp-keystore", "sp-runtime", "sp-timestamp", @@ -7853,7 +7854,6 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "tempfile", "thiserror", ] @@ -8130,7 +8130,6 @@ dependencies = [ "sp-tracing", "substrate-prometheus-endpoint", "substrate-test-runtime-client", - "tempfile", "thiserror", "tokio", ] diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index d280e4f08a531..f0647d7b142e6 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -29,12 +29,12 @@ use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, ImportedAux, }; -use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, PeersFullClient, TestNetFactory, }; use sc_utils::notification::NotificationReceiver; +use sp_keystore::testing::KeyStore as TestKeystore; use beefy_primitives::{ crypto::{AuthorityId, Signature}, @@ -333,7 +333,7 @@ pub(crate) fn make_beefy_ids(keys: &[BeefyKeyring]) -> Vec { } pub(crate) fn create_beefy_keystore(authority: BeefyKeyring) -> SyncCryptoStorePtr { - let keystore = Arc::new(LocalKeystore::in_memory()); + let keystore = Arc::new(TestKeystore::new()); SyncCryptoStore::ecdsa_generate_new(&*keystore, BeefyKeyType, Some(&authority.to_seed())) .expect("Creates authority key"); keystore diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index e559ec165a793..54da2d6e49e39 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -54,8 +54,8 @@ sp-version = { version = "5.0.0", path = "../../../primitives/version" } [dev-dependencies] rand_chacha = "0.2.2" -tempfile = "3.1.0" sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } +sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } sc-network = { version = "0.10.0-dev", path = "../../network" } sc-network-test = { version = "0.8.0", path = "../../network/test" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 24185dbce6795..abffcb0a2b4c3 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -28,7 +28,6 @@ use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; -use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; @@ -38,7 +37,11 @@ use sp_consensus_babe::{ }; use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; -use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; +use sp_keyring::Sr25519Keyring; +use sp_keystore::{ + testing::KeyStore as TestKeyStore, vrf::make_transcript as transcript_from_data, + SyncCryptoStore, +}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, @@ -363,6 +366,13 @@ fn rejects_empty_block() { }) } +fn create_keystore(authority: Sr25519Keyring) -> SyncCryptoStorePtr { + let keystore = Arc::new(TestKeyStore::new()); + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) + .expect("Generates authority key"); + keystore +} + fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { sp_tracing::try_init_simple(); let mutator = Arc::new(mutator) as Mutator; @@ -370,25 +380,19 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); let net = BabeTestNet::new(3); - let peers = &[(0, "//Alice"), (1, "//Bob"), (2, "//Charlie")]; + let peers = [Sr25519Keyring::Alice, Sr25519Keyring::Bob, Sr25519Keyring::Charlie]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); let mut babe_futures = Vec::new(); - let mut keystore_paths = Vec::new(); - for (peer_id, seed) in peers { + for (peer_id, auth_id) in peers.iter().enumerate() { let mut net = net.lock(); - let peer = net.peer(*peer_id); + let peer = net.peer(peer_id); let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("Full client has select_chain"); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)) - .expect("Generates authority key"); - keystore_paths.push(keystore_path); + let keystore = create_keystore(*auth_id); let mut got_own = false; let mut got_other = false; @@ -536,16 +540,14 @@ fn sig_is_not_pre_digest() { #[test] fn can_author_block() { sp_tracing::try_init_simple(); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); - let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) - .expect("Generates authority pair"); + + let authority = Sr25519Keyring::Alice; + let keystore = create_keystore(authority); let mut i = 0; let epoch = Epoch { start_slot: 0.into(), - authorities: vec![(public.into(), 1)], + authorities: vec![(authority.public().into(), 1)], randomness: [0; 32], epoch_index: 1, duration: 100, @@ -967,15 +969,13 @@ fn verify_slots_are_strictly_increasing() { #[test] fn babe_transcript_generation_match() { sp_tracing::try_init_simple(); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); - let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) - .expect("Generates authority pair"); + + let authority = Sr25519Keyring::Alice; + let _keystore = create_keystore(authority); let epoch = Epoch { start_slot: 0.into(), - authorities: vec![(public.into(), 1)], + authorities: vec![(authority.public().into(), 1)], randomness: [0; 32], epoch_index: 1, duration: 100, diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 83c6051946aff..288e579d8da29 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -51,12 +51,8 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.16.0", features = [ - "derive-codec", - "test-helpers", -] } +finality-grandpa = { version = "0.16.0", features = ["derive-codec", "test-helpers"] } serde = "1.0.136" -tempfile = "3.1.0" tokio = "1.17.0" sc-network = { version = "0.10.0-dev", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index c04754411af1a..39379e69c9157 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -41,7 +41,7 @@ use sp_finality_grandpa::{ AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, }; use sp_keyring::Ed25519Keyring; -use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_keystore::{testing::KeyStore as TestKeyStore, SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ codec::Encode, generic::{BlockId, DigestItem}, @@ -59,7 +59,6 @@ use authorities::AuthoritySet; use communication::grandpa_protocol_name; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_consensus::LongestChain; -use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::GRANDPA; type TestLinkHalf = @@ -213,14 +212,11 @@ fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { keys.iter().map(|&key| key.public().into()).map(|id| (id, 1)).collect() } -fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = - Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); +fn create_keystore(authority: Ed25519Keyring) -> SyncCryptoStorePtr { + let keystore = Arc::new(TestKeyStore::new()); SyncCryptoStore::ed25519_generate_new(&*keystore, GRANDPA, Some(&authority.to_seed())) .expect("Creates authority key"); - - (keystore, keystore_path) + keystore } fn block_until_complete( @@ -243,7 +239,7 @@ fn initialize_grandpa( let voters = stream::FuturesUnordered::new(); for (peer_id, key) in peers.iter().enumerate() { - let (keystore, _) = create_keystore(*key); + let keystore = create_keystore(*key); let (net_service, link) = { // temporary needed for some reason @@ -480,11 +476,9 @@ fn transition_3_voters_twice_1_full_observer() { let mut runtime = Runtime::new().unwrap(); - let mut keystore_paths = Vec::new(); let mut voters = Vec::new(); for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); + let keystore = create_keystore(local_key); let (net_service, link) = { let net = net.lock(); @@ -934,7 +928,6 @@ fn voter_persists_its_votes() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let mut keystore_paths = Vec::new(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -947,11 +940,7 @@ fn voter_persists_its_votes() { // create the communication layer for bob, but don't start any // voter. instead we'll listen for the prevote that alice casts // and cast our own manually - let bob_keystore = { - let (keystore, keystore_path) = create_keystore(peers[1]); - keystore_paths.push(keystore_path); - keystore - }; + let bob_keystore = create_keystore(peers[1]); let bob_network = { let config = Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -984,7 +973,7 @@ fn voter_persists_its_votes() { // spawn two voters for alice. // half-way through the test, we stop one and start the other. let (alice_voter1, abort) = future::abortable({ - let (keystore, _) = create_keystore(peers[0]); + let keystore = create_keystore(peers[0]); let (net_service, link) = { // temporary needed for some reason @@ -1018,7 +1007,7 @@ fn voter_persists_its_votes() { peers: &[Ed25519Keyring], net: Arc>, ) -> impl Future + Send { - let (keystore, _) = create_keystore(peers[0]); + let keystore = create_keystore(peers[0]); let mut net = net.lock(); // we add a new peer to the test network and we'll use @@ -1266,8 +1255,6 @@ fn voter_catches_up_to_latest_round_when_behind() { Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) }; - let mut keystore_paths = Vec::new(); - // spawn authorities for (peer_id, key) in peers.iter().enumerate() { let (client, link) = { @@ -1284,8 +1271,7 @@ fn voter_catches_up_to_latest_round_when_behind() { .for_each(move |_| future::ready(())), ); - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); + let keystore = create_keystore(*key); let voter = voter(Some(keystore), peer_id, link, net.clone()); @@ -1515,7 +1501,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); - let (keystore, _keystore_path) = create_keystore(peers[0]); + let keystore = create_keystore(peers[0]); let environment = test_environment(&link, Some(keystore), network_service.clone(), ()); let round_state = || finality_grandpa::round::State::genesis(Default::default()); @@ -1715,7 +1701,7 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let link = peer.data.lock().take().unwrap(); - let (keystore, _keystore_path) = create_keystore(alice); + let keystore = create_keystore(alice); test_environment(&link, Some(keystore), network_service.clone(), ()) }; From d6953869a5ab579d25551008eeb580c9efd649a9 Mon Sep 17 00:00:00 2001 From: Sacha Lansky Date: Mon, 24 Oct 2022 20:10:18 +0200 Subject: [PATCH 278/348] Update template pallet to latest enum syntax (#12552) --- bin/node-template/pallets/template/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 0b55d7ae86fcf..28d36ac2c6321 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -45,7 +45,7 @@ pub mod pallet { pub enum Event { /// Event documentation should end with an array that provides descriptive names for event /// parameters. [something, who] - SomethingStored(u32, T::AccountId), + SomethingStored { something: u32, who: T::AccountId }, } // Errors inform users that something went wrong. @@ -75,7 +75,7 @@ pub mod pallet { >::put(something); // Emit an event. - Self::deposit_event(Event::SomethingStored(something, who)); + Self::deposit_event(Event::SomethingStored { something, who }); // Return a successful DispatchResultWithPostInfo Ok(()) } From f2bc08a3071a91b71fec63cf2b22c707411cec0e Mon Sep 17 00:00:00 2001 From: yjh Date: Tue, 25 Oct 2022 15:04:32 +0800 Subject: [PATCH 279/348] feat: generalize some functions in sp-trie (#12376) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add to_memory_db to StorageProof * feat: add iter method and generalize iter_nodes * fmt * feat: generalize `encode_compact` like `decode_compact`, add to_compact_proof to StorageProof * fix to_compact_proof * improve by suggestions * improve by suggestions Co-authored-by: Bastian Köcher --- client/rpc/src/state/state_full.rs | 4 +- client/service/src/client/client.rs | 4 +- primitives/state-machine/src/lib.rs | 8 +-- primitives/state-machine/src/trie_backend.rs | 2 +- primitives/trie/src/lib.rs | 4 +- primitives/trie/src/storage_proof.rs | 63 ++++++++++++-------- primitives/trie/src/trie_codec.rs | 16 ++--- 7 files changed, 56 insertions(+), 45 deletions(-) diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 44e7d03bc1a0e..7fc7f840a9daf 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -346,7 +346,7 @@ where .and_then(|block| { self.client .read_proof(&block, &mut keys.iter().map(|key| key.0.as_ref())) - .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) + .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) .map_err(client_err) @@ -498,7 +498,7 @@ where &child_info, &mut keys.iter().map(|key| key.0.as_ref()), ) - .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) + .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) .map_err(client_err) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index c0414a3bebc42..e4909b89e3f16 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1190,8 +1190,8 @@ where let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( state, size_limit, start_key, )?; - // This is read proof only, we can use either LayoutV0 or LayoutV1. - let proof = sp_trie::encode_compact::>>(proof, root) + let proof = proof + .into_compact_proof::>(root) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; Ok((proof, count)) } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 4126aea478d5b..1f106593ede34 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1939,13 +1939,13 @@ mod tests { let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Always contains at least some nodes. - assert_eq!(proof.into_memory_db::().drain().len(), 3); + assert_eq!(proof.to_memory_db::().drain().len(), 3); assert_eq!(count, 1); let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); - assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); + assert_eq!(proof.to_memory_db::().drain().len(), 9); assert_eq!(count, 85); let (results, completed) = read_range_proof_check::( remote_root, @@ -1968,7 +1968,7 @@ mod tests { let remote_backend = trie_backend::tests::test_trie(state_version, None, None); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); - assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); + assert_eq!(proof.to_memory_db::().drain().len(), 11); assert_eq!(count, 132); let (results, completed) = read_range_proof_check::(remote_root, proof, None, None, None, None) @@ -2053,7 +2053,7 @@ mod tests { ) .unwrap(); // Always contains at least some nodes. - assert!(proof.clone().into_memory_db::().drain().len() > 0); + assert!(proof.to_memory_db::().drain().len() > 0); assert!(count < 3); // when doing child we include parent and first child key. let (result, completed_depth) = read_range_proof_check_with_child::( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 447c276a6049c..20bb2c592e925 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -980,7 +980,7 @@ pub mod tests { let proof = backend.extract_proof().unwrap(); let mut nodes = Vec::new(); - for node in proof.iter_nodes() { + for node in proof.into_iter_nodes() { let hash = BlakeTwo256::hash(&node); // Only insert the node/value that contains the important data. if hash != value_hash { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c2ab12dbee14c..f40ed4d9b53a5 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -57,10 +57,10 @@ pub use trie_db::{ pub use trie_stream::TrieStream; /// substrate trie layout -pub struct LayoutV0(sp_std::marker::PhantomData); +pub struct LayoutV0(PhantomData); /// substrate trie layout, with external value nodes. -pub struct LayoutV1(sp_std::marker::PhantomData); +pub struct LayoutV1(PhantomData); impl TrieLayout for LayoutV0 where diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 8fdb04ee20ed0..5351e8de6fd82 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,7 +18,11 @@ use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use scale_info::TypeInfo; -use sp_std::{collections::btree_set::BTreeSet, iter::IntoIterator, vec::Vec}; +use sp_std::{ + collections::btree_set::BTreeSet, + iter::{DoubleEndedIterator, IntoIterator}, + vec::Vec, +}; // Note that `LayoutV1` usage here (proof compaction) is compatible // with `LayoutV0`. use crate::LayoutV1 as Layout; @@ -54,10 +58,16 @@ impl StorageProof { self.trie_nodes.is_empty() } + /// Convert into an iterator over encoded trie nodes in lexicographical order constructed + /// from the proof. + pub fn into_iter_nodes(self) -> impl Sized + DoubleEndedIterator> { + self.trie_nodes.into_iter() + } + /// Create an iterator over encoded trie nodes in lexicographical order constructed /// from the proof. - pub fn iter_nodes(self) -> StorageProofNodeIterator { - StorageProofNodeIterator::new(self) + pub fn iter_nodes(&self) -> impl Sized + DoubleEndedIterator> { + self.trie_nodes.iter() } /// Convert into plain node vector. @@ -70,14 +80,19 @@ impl StorageProof { self.into() } + /// Creates a [`MemoryDB`](crate::MemoryDB) from `Self` reference. + pub fn to_memory_db(&self) -> crate::MemoryDB { + self.into() + } + /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the /// input proofs due to deduplication of trie nodes. pub fn merge(proofs: impl IntoIterator) -> Self { let trie_nodes = proofs .into_iter() - .flat_map(|proof| proof.iter_nodes()) - .collect::>() + .flat_map(|proof| proof.into_iter_nodes()) + .collect::>() .into_iter() .collect(); @@ -89,7 +104,17 @@ impl StorageProof { self, root: H::Out, ) -> Result>> { - crate::encode_compact::>(self, root) + let db = self.into_memory_db(); + crate::encode_compact::, crate::MemoryDB>(&db, &root) + } + + /// Encode as a compact proof with default trie layout. + pub fn to_compact_proof( + &self, + root: H::Out, + ) -> Result>> { + let db = self.to_memory_db(); + crate::encode_compact::, crate::MemoryDB>(&db, &root) } /// Returns the estimated encoded size of the compact proof. @@ -106,6 +131,12 @@ impl StorageProof { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { + From::from(&proof) + } +} + +impl From<&StorageProof> for crate::MemoryDB { + fn from(proof: &StorageProof) -> Self { let mut db = crate::MemoryDB::default(); proof.iter_nodes().for_each(|n| { db.insert(crate::EMPTY_PREFIX, &n); @@ -169,23 +200,3 @@ impl CompactProof { Ok((db, root)) } } - -/// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to -/// be traversed in any particular order. -pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, -} - -impl StorageProofNodeIterator { - fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { inner: proof.trie_nodes.into_iter() } - } -} - -impl Iterator for StorageProofNodeIterator { - type Item = Vec; - - fn next(&mut self) -> Option { - self.inner.next() - } -} diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 949f9a6e284eb..d5ae9a43fb1eb 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -20,7 +20,7 @@ //! This uses compact proof from trie crate and extends //! it to substrate specific layout and child trie system. -use crate::{CompactProof, HashDBT, StorageProof, TrieConfiguration, TrieHash, EMPTY_PREFIX}; +use crate::{CompactProof, HashDBT, TrieConfiguration, TrieHash, EMPTY_PREFIX}; use sp_std::{boxed::Box, vec::Vec}; use trie_db::{CError, Trie}; @@ -149,17 +149,17 @@ where /// Then parse all child trie root and compress main trie content first /// then all child trie contents. /// Child trie are ordered by the order of their roots in the top trie. -pub fn encode_compact( - proof: StorageProof, - root: TrieHash, +pub fn encode_compact( + partial_db: &DB, + root: &TrieHash, ) -> Result, CError>> where L: TrieConfiguration, + DB: HashDBT + hash_db::HashDBRef, { let mut child_tries = Vec::new(); - let partial_db = proof.into_memory_db(); let mut compact_proof = { - let trie = crate::TrieDBBuilder::::new(&partial_db, &root).build(); + let trie = crate::TrieDBBuilder::::new(partial_db, root).build(); let mut iter = trie.iter()?; @@ -191,13 +191,13 @@ where }; for child_root in child_tries { - if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { + if !HashDBT::::contains(partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). continue } - let trie = crate::TrieDBBuilder::::new(&partial_db, &child_root).build(); + let trie = crate::TrieDBBuilder::::new(partial_db, &child_root).build(); let child_proof = trie_db::encode_compact::(&trie)?; compact_proof.extend(child_proof); From 65a870ab4a7fff46a307eacc0ae3a8f7f0f61f5e Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 25 Oct 2022 14:34:20 -0400 Subject: [PATCH 280/348] Make Multisig Pallet Bounded (#12457) * Bounded multisig * just use u32 Co-authored-by: parity-processbot <> --- bin/node/runtime/src/lib.rs | 2 +- frame/multisig/src/benchmarking.rs | 12 +++++------ frame/multisig/src/lib.rs | 32 ++++++++++++++++++++---------- frame/multisig/src/tests.rs | 4 ++-- frame/utility/src/tests.rs | 2 +- 5 files changed, 31 insertions(+), 21 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5ba89e6595e72..a23fe64c90628 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -249,7 +249,7 @@ impl pallet_multisig::Config for Runtime { type Currency = Balances; type DepositBase = DepositBase; type DepositFactor = DepositFactor; - type MaxSignatories = ConstU16<100>; + type MaxSignatories = ConstU32<100>; type WeightInfo = pallet_multisig::weights::SubstrateWeight; } diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index d949414e31cb3..d5faf9ae8ac1a 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -69,7 +69,7 @@ benchmarks! { as_multi_create { // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get() as u32; + let s in 2 .. T::MaxSignatories::get(); // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; @@ -86,7 +86,7 @@ benchmarks! { as_multi_approve { // Signatories, need at least 3 people (so we don't complete the multisig) - let s in 3 .. T::MaxSignatories::get() as u32; + let s in 3 .. T::MaxSignatories::get(); // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; @@ -110,7 +110,7 @@ benchmarks! { as_multi_complete { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; + let s in 2 .. T::MaxSignatories::get(); // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; @@ -141,7 +141,7 @@ benchmarks! { approve_as_multi_create { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; + let s in 2 .. T::MaxSignatories::get(); // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; @@ -159,7 +159,7 @@ benchmarks! { approve_as_multi_approve { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; + let s in 2 .. T::MaxSignatories::get(); // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; @@ -190,7 +190,7 @@ benchmarks! { cancel_as_multi { // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; + let s in 2 .. T::MaxSignatories::get(); // Transaction Length, not a component let z = 10_000; let (mut signatories, call) = setup_multi::(s, z)?; diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index e3031cc830209..34d9124f9b69e 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -51,7 +51,7 @@ pub mod migrations; mod tests; pub mod weights; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{ DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, GetDispatchInfo, @@ -60,7 +60,7 @@ use frame_support::{ ensure, traits::{Currency, Get, ReservableCurrency}, weights::Weight, - RuntimeDebug, + BoundedVec, RuntimeDebug, }; use frame_system::{self as system, RawOrigin}; use scale_info::TypeInfo; @@ -94,7 +94,9 @@ type BalanceOf = /// A global extrinsic index, formed as the extrinsic index within a block, together with that /// block's height. This allows a transaction in which a multisig operation of a particular /// composite was created to be uniquely identified. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] +#[derive( + Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen, +)] pub struct Timepoint { /// The height of the chain at the point in time. height: BlockNumber, @@ -103,8 +105,12 @@ pub struct Timepoint { } /// An open multisig operation. -#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo)] -pub struct Multisig { +#[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug, TypeInfo, MaxEncodedLen)] +#[scale_info(skip_type_params(MaxApprovals))] +pub struct Multisig +where + MaxApprovals: Get, +{ /// The extrinsic when the multisig operation was opened. when: Timepoint, /// The amount held in reserve of the `depositor`, to be returned once the operation ends. @@ -112,7 +118,7 @@ pub struct Multisig { /// The account who opened it (i.e. the first to approve it). depositor: AccountId, /// The approvals achieved so far, including the depositor. Always sorted. - approvals: Vec, + approvals: BoundedVec, } type CallHash = [u8; 32]; @@ -159,7 +165,7 @@ pub mod pallet { /// The maximum amount of signatories allowed in the multisig. #[pallet::constant] - type MaxSignatories: Get; + type MaxSignatories: Get; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; @@ -170,7 +176,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); @@ -182,7 +187,7 @@ pub mod pallet { T::AccountId, Blake2_128Concat, [u8; 32], - Multisig, T::AccountId>, + Multisig, T::AccountId, T::MaxSignatories>, >; #[pallet::error] @@ -601,7 +606,9 @@ impl Pallet { if let Some(pos) = maybe_pos { // Record approval. - m.approvals.insert(pos, who.clone()); + m.approvals + .try_insert(pos, who.clone()) + .map_err(|_| Error::::TooManySignatories)?; >::insert(&id, call_hash, m); Self::deposit_event(Event::MultisigApproval { approving: who, @@ -629,6 +636,9 @@ impl Pallet { T::Currency::reserve(&who, deposit)?; + let initial_approvals = + vec![who.clone()].try_into().map_err(|_| Error::::TooManySignatories)?; + >::insert( &id, call_hash, @@ -636,7 +646,7 @@ impl Pallet { when: Self::timepoint(), deposit, depositor: who.clone(), - approvals: vec![who.clone()], + approvals: initial_approvals, }, ); Self::deposit_event(Event::NewMultisig { approving: who, multisig: id, call_hash }); diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index f753b6f386c56..206e566cf4cb6 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -24,7 +24,7 @@ use super::*; use crate as pallet_multisig; use frame_support::{ assert_noop, assert_ok, parameter_types, - traits::{ConstU16, ConstU32, ConstU64, Contains}, + traits::{ConstU32, ConstU64, Contains}, }; use sp_core::H256; use sp_runtime::{ @@ -107,7 +107,7 @@ impl Config for Test { type Currency = Balances; type DepositBase = ConstU64<1>; type DepositFactor = ConstU64<1>; - type MaxSignatories = ConstU16<3>; + type MaxSignatories = ConstU32<3>; type WeightInfo = (); } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index ebd8dda81adbc..81cec1c295c30 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -143,7 +143,7 @@ impl pallet_balances::Config for Test { parameter_types! { pub const MultisigDepositBase: u64 = 1; pub const MultisigDepositFactor: u64 = 1; - pub const MaxSignatories: u16 = 3; + pub const MaxSignatories: u32 = 3; } impl example::Config for Test {} From 759148506b6460a0ae6d2422d7b56b6452f0b114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BB=A4=E7=8B=90=E4=B8=80=E5=86=B2?= <43949039+anonymousGiga@users.noreply.github.com> Date: Wed, 26 Oct 2022 16:43:48 +0800 Subject: [PATCH 281/348] Fix error during build: failed to run custom build command for sc-network-bitswap (#12494) --- client/network/bitswap/build.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/bitswap/build.rs b/client/network/bitswap/build.rs index bd6222d546851..671277230a774 100644 --- a/client/network/bitswap/build.rs +++ b/client/network/bitswap/build.rs @@ -1,4 +1,4 @@ -const PROTOS: &[&str] = &["bitswap.v1.2.0.proto"]; +const PROTOS: &[&str] = &["src/schema/bitswap.v1.2.0.proto"]; fn main() { prost_build::compile_protos(PROTOS, &["src/schema"]).unwrap(); From 696bdc67af5fd83aca318263dbd405593b043ba3 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 26 Oct 2022 23:08:30 +0200 Subject: [PATCH 282/348] Update `pallet-multisig` benches (#12558) * Typo Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_multisig * remove functions * ".git/.scripts/bench-bot.sh" pallet dev pallet_multisig Signed-off-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> Co-authored-by: Shawn Tabrizi --- frame/multisig/src/lib.rs | 2 - frame/multisig/src/weights.rs | 249 ++++++++++++--------------- frame/transaction-payment/src/lib.rs | 2 +- frame/utility/src/lib.rs | 2 +- 4 files changed, 110 insertions(+), 145 deletions(-) diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 34d9124f9b69e..ae4efb76335a0 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -370,7 +370,6 @@ pub mod pallet { let z = call.using_encoded(|d| d.len()) as u32; T::WeightInfo::as_multi_create(s, z) - .max(T::WeightInfo::as_multi_create_store(s, z)) .max(T::WeightInfo::as_multi_approve(s, z)) .max(T::WeightInfo::as_multi_complete(s, z)) .saturating_add(*max_weight) @@ -434,7 +433,6 @@ pub mod pallet { T::WeightInfo::approve_as_multi_create(s) .max(T::WeightInfo::approve_as_multi_approve(s)) - .max(T::WeightInfo::approve_as_multi_complete(s)) .saturating_add(*max_weight) })] pub fn approve_as_multi( diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index d8d576775408a..de403b4d249e4 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,22 +18,26 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-26, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_multisig +// --chain=dev +// --header=./HEADER-APACHE2 // --output=./frame/multisig/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -46,213 +50,176 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight; fn as_multi_create(s: u32, z: u32, ) -> Weight; - fn as_multi_create_store(s: u32, z: u32, ) -> Weight; fn as_multi_approve(s: u32, z: u32, ) -> Weight; - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight; fn as_multi_complete(s: u32, z: u32, ) -> Weight; fn approve_as_multi_create(s: u32, ) -> Weight; fn approve_as_multi_approve(s: u32, ) -> Weight; - fn approve_as_multi_complete(s: u32, ) -> Weight; fn cancel_as_multi(s: u32, ) -> Weight; } /// Weights for pallet_multisig using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - fn as_multi_threshold_1(_z: u32, ) -> Weight { - Weight::from_ref_time(17_537_000 as u64) + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Minimum execution time: 20_283 nanoseconds. + Weight::from_ref_time(20_861_089 as u64) + // Standard Error: 5 + .saturating_add(Weight::from_ref_time(583 as u64).saturating_mul(z as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(36_535_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 52_699 nanoseconds. + Weight::from_ref_time(40_874_603 as u64) + // Standard Error: 546 + .saturating_add(Weight::from_ref_time(131_727 as u64).saturating_mul(s as u64)) + // Standard Error: 5 + .saturating_add(Weight::from_ref_time(1_537 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_918_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(25_524_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 39_843 nanoseconds. + Weight::from_ref_time(28_912_325 as u64) + // Standard Error: 734 + .saturating_add(Weight::from_ref_time(125_761 as u64).saturating_mul(s as u64)) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_542 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(45_877_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + // Minimum execution time: 54_980 nanoseconds. + Weight::from_ref_time(42_087_213 as u64) + // Standard Error: 786 + .saturating_add(Weight::from_ref_time(153_935 as u64).saturating_mul(s as u64)) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_545 as u64).saturating_mul(z as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - Weight::from_ref_time(34_309_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_802 nanoseconds. + Weight::from_ref_time(39_933_623 as u64) + // Standard Error: 1_059 + .saturating_add(Weight::from_ref_time(121_891 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:0) + /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { - Weight::from_ref_time(22_848_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 25_738 nanoseconds. + Weight::from_ref_time(27_676_766 as u64) + // Standard Error: 710 + .saturating_add(Weight::from_ref_time(125_733 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn approve_as_multi_complete(s: u32, ) -> Weight { - Weight::from_ref_time(63_239_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(3 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) + /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { - Weight::from_ref_time(51_254_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 36_591 nanoseconds. + Weight::from_ref_time(38_707_543 as u64) + // Standard Error: 881 + .saturating_add(Weight::from_ref_time(126_198 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } } // For backwards compatibility and tests impl WeightInfo for () { - fn as_multi_threshold_1(_z: u32, ) -> Weight { - Weight::from_ref_time(17_537_000 as u64) + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Minimum execution time: 20_283 nanoseconds. + Weight::from_ref_time(20_861_089 as u64) + // Standard Error: 5 + .saturating_add(Weight::from_ref_time(583 as u64).saturating_mul(z as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(36_535_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 52_699 nanoseconds. + Weight::from_ref_time(40_874_603 as u64) + // Standard Error: 546 + .saturating_add(Weight::from_ref_time(131_727 as u64).saturating_mul(s as u64)) + // Standard Error: 5 + .saturating_add(Weight::from_ref_time(1_537 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) - fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_918_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(95_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(25_524_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(94_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 39_843 nanoseconds. + Weight::from_ref_time(28_912_325 as u64) + // Standard Error: 734 + .saturating_add(Weight::from_ref_time(125_761 as u64).saturating_mul(s as u64)) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_542 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(39_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(91_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - Weight::from_ref_time(45_877_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(146_000 as u64).saturating_mul(s as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(z as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + // Minimum execution time: 54_980 nanoseconds. + Weight::from_ref_time(42_087_213 as u64) + // Standard Error: 786 + .saturating_add(Weight::from_ref_time(153_935 as u64).saturating_mul(s as u64)) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_545 as u64).saturating_mul(z as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - Weight::from_ref_time(34_309_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_802 nanoseconds. + Weight::from_ref_time(39_933_623 as u64) + // Standard Error: 1_059 + .saturating_add(Weight::from_ref_time(121_891 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:0) + /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { - Weight::from_ref_time(22_848_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 25_738 nanoseconds. + Weight::from_ref_time(27_676_766 as u64) + // Standard Error: 710 + .saturating_add(Weight::from_ref_time(125_733 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn approve_as_multi_complete(s: u32, ) -> Weight { - Weight::from_ref_time(63_239_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(161_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(3 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) - } - // Storage: Multisig Multisigs (r:1 w:1) - // Storage: Multisig Calls (r:1 w:1) + /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { - Weight::from_ref_time(51_254_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 36_591 nanoseconds. + Weight::from_ref_time(38_707_543 as u64) + // Standard Error: 881 + .saturating_add(Weight::from_ref_time(126_198 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 5027db41ec098..ce747fa6bd85c 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -658,7 +658,7 @@ where Self(fee) } - /// Returns the tip as being choosen by the transaction sender. + /// Returns the tip as being chosen by the transaction sender. pub fn tip(&self) -> BalanceOf { self.0 } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 9ae89097a9bc3..544add1da68d3 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -124,7 +124,7 @@ pub mod pallet { // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we - // algin the call size. The value is choosen big enough to hopefully never reach it. + // algin the call size. The value is chosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] From 676e35fdfdd2526a2df29efd41e6642046f2ddee Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 27 Oct 2022 10:54:07 +0200 Subject: [PATCH 283/348] [ci] cargo-check-benches against different base branches (#12557) --- scripts/ci/gitlab/pipeline/test.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index b405cfa1eb3a4..8baebb39be6dc 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -68,10 +68,12 @@ cargo-check-benches: - !reference [.rusty-cachier, before_script] - !reference [.pipeline-stopper-vars, script] # merges in the master branch on PRs + - | + export BASE=$(curl -s -H "Authorization: Bearer ${GITHUB_PR_TOKEN}" https://api.github.com/repos/paritytech/substrate/pulls/${$CI_COMMIT_REF_NAME} | jq .base.ref) - if [ $CI_COMMIT_REF_NAME != "master" ]; then - git fetch origin +master:master; + git fetch origin +${BASE}:${BASE}; git fetch origin +$CI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME; - git checkout master; + git checkout ${BASE}; git config user.email "ci@gitlab.parity.io"; git merge $CI_COMMIT_REF_NAME --verbose --no-edit; fi From 6195ef41946017ec860d2d8ad58f9bc18b5797bf Mon Sep 17 00:00:00 2001 From: Malte Kliemann Date: Thu, 27 Oct 2022 17:23:04 +0200 Subject: [PATCH 284/348] Fix typo (#12571) --- frame/membership/README.md | 4 ++-- frame/membership/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/membership/README.md b/frame/membership/README.md index a769be497050e..3499a3f864e48 100644 --- a/frame/membership/README.md +++ b/frame/membership/README.md @@ -1,6 +1,6 @@ # Membership Module -Allows control of membership of a set of `AccountId`s, useful for managing membership of of a +Allows control of membership of a set of `AccountId`s, useful for managing membership of a collective. A prime member may be set. -License: Apache-2.0 \ No newline at end of file +License: Apache-2.0 diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index af2f2ddcf42f0..4191bbcc5d86e 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -17,7 +17,7 @@ //! # Membership Module //! -//! Allows control of membership of a set of `AccountId`s, useful for managing membership of of a +//! Allows control of membership of a set of `AccountId`s, useful for managing membership of a //! collective. A prime member may be set // Ensure we're `no_std` when compiling for Wasm. From 83ed732cf34820724240f86e5ac7597552411c53 Mon Sep 17 00:00:00 2001 From: Mrisho Lukamba <69342343+MrishoLukamba@users.noreply.github.com> Date: Fri, 28 Oct 2022 15:16:18 +0300 Subject: [PATCH 285/348] replaced println with log Closes #12338 (#12348) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * replaced println with log Closes #12338 * fixed println * Apply suggestions from code review Co-authored-by: Bastian Köcher * Update utils/frame/benchmarking-cli/src/pallet/command.rs * Apply suggestions from code review * ".git/.scripts/fmt.sh" 1 Co-authored-by: Shawn Tabrizi Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> --- utils/frame/benchmarking-cli/src/pallet/command.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 6e413bdf7e2c5..242f0e685290f 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -40,6 +40,9 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_state_machine::StateMachine; use std::{collections::HashMap, fmt::Debug, fs, sync::Arc, time}; +/// Logging target +const LOG_TARGET: &'static str = "frame::benchmark::pallet"; + /// The inclusive range of a component. #[derive(Serialize, Debug, Clone, Eq, PartialEq)] pub(crate) struct ComponentRange { @@ -242,7 +245,8 @@ impl PalletCmd { let mut component_ranges = HashMap::<(Vec, Vec), Vec>::new(); for (pallet, extrinsic, components) in benchmarks_to_run { - println!( + log::info!( + target: LOG_TARGET, "Starting benchmark: {}::{}", String::from_utf8(pallet.clone()).expect("Encoded from String; qed"), String::from_utf8(extrinsic.clone()).expect("Encoded from String; qed"), @@ -402,7 +406,9 @@ impl PalletCmd { if let Ok(elapsed) = timer.elapsed() { if elapsed >= time::Duration::from_secs(5) { timer = time::SystemTime::now(); - println!( + + log::info!( + target: LOG_TARGET, "Running Benchmark: {}.{}({} args) {}/{} {}/{}", String::from_utf8(pallet.clone()) .expect("Encoded from String; qed"), @@ -492,7 +498,7 @@ impl PalletCmd { if let Some(path) = &self.json_file { fs::write(path, json)?; } else { - println!("{}", json); + print!("{json}"); return Ok(true) } } From 4e1e17cccd499dfe49e8c1bed01957953aa4c839 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 28 Oct 2022 15:25:49 +0200 Subject: [PATCH 286/348] Aura: Adds some compatibility mode to support old chains (#12492) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Aura: Adds some compatibility mode to support old chains In https://github.com/paritytech/substrate/pull/9132 we changed the way how we get the authorities from the runtime. Before this mentioned pr we would call `initialize_block` before calling the authorities runtime function. The problem with this was that when you have a block X that would switch the authority set, it would already be signed by an authority of the new set. This was wrong, as a block should only be signed by the current authority set. As this change is a hard fork, this pr brings back the possibility for users that have a chain running with this old logic to upgrade. They will need to use: ``` CompatibilityMode::UseInitializeBlock { until: some_block_in_the_future } ``` Using this compatibility mode will make the node behave like the old nodes, aka calling `initialize_block` before doing the actual runtime call to `authorities`. Then when the given `until` block is being build/imported the node switches to the new behaviour of not calling `initialize_block` before. This is a hard fork, so the `until` block should be chosen wisely as a point where all nodes in the network have upgraded. * Fixes * Make docs ready * Update client/consensus/aura/src/lib.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update client/consensus/aura/src/lib.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update client/consensus/aura/src/lib.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * FMT Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- bin/node-template/node/src/service.rs | 2 + client/consensus/aura/src/import_queue.rs | 84 ++++++++-------- client/consensus/aura/src/lib.rs | 115 +++++++++++++++++++--- client/service/src/client/client.rs | 2 +- primitives/consensus/aura/src/lib.rs | 2 +- 5 files changed, 147 insertions(+), 58 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 96de6e17f3bfd..ee8464688c79c 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -132,6 +132,7 @@ pub fn new_partial( registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), + compatibility_mode: Default::default(), })?; Ok(sc_service::PartialComponents { @@ -280,6 +281,7 @@ pub fn new_full(mut config: Configuration) -> Result block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), + compatibility_mode: Default::default(), }, )?; diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 90f15fef1b34b..b17feae45897e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -18,7 +18,9 @@ //! Module implementing the logic for verifying and importing AuRa blocks. -use crate::{aura_err, authorities, find_pre_digest, slot_author, AuthorityId, Error}; +use crate::{ + aura_err, authorities, find_pre_digest, slot_author, AuthorityId, CompatibilityMode, Error, +}; use codec::{Codec, Decode, Encode}; use log::{debug, info, trace}; use prometheus_endpoint::Registry; @@ -31,21 +33,15 @@ use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProvider use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{ - well_known_cache_keys::{self, Id as CacheKeyId}, - HeaderBackend, -}; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; use sp_consensus::Error as ConsensusError; -use sp_consensus_aura::{ - digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, - AURA_ENGINE_ID, -}; +use sp_consensus_aura::{digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi}; use sp_consensus_slots::Slot; use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, Header}, + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor}, DigestItem, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -109,32 +105,35 @@ where } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, create_inherent_data_providers: CIDP, check_for_equivocation: CheckForEquivocation, telemetry: Option, + compatibility_mode: CompatibilityMode, } -impl AuraVerifier { +impl AuraVerifier { pub(crate) fn new( client: Arc, create_inherent_data_providers: CIDP, check_for_equivocation: CheckForEquivocation, telemetry: Option, + compatibility_mode: CompatibilityMode, ) -> Self { Self { client, create_inherent_data_providers, check_for_equivocation, telemetry, + compatibility_mode, phantom: PhantomData, } } } -impl AuraVerifier +impl AuraVerifier where P: Send + Sync + 'static, CIDP: Send, @@ -172,9 +171,9 @@ where } #[async_trait::async_trait] -impl Verifier for AuraVerifier +impl Verifier for AuraVerifier> where - C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, + C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore, C::Api: BlockBuilderApi + AuraApi> + ApiExt, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, @@ -188,8 +187,13 @@ where ) -> Result<(BlockImportParams, Option)>>), String> { let hash = block.header.hash(); let parent_hash = *block.header.parent_hash(); - let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) - .map_err(|e| format!("Could not fetch authorities at {:?}: {}", parent_hash, e))?; + let authorities = authorities( + self.client.as_ref(), + parent_hash, + *block.header.number(), + &self.compatibility_mode, + ) + .map_err(|e| format!("Could not fetch authorities at {:?}: {}", parent_hash, e))?; let create_inherent_data_providers = self .create_inherent_data_providers @@ -259,28 +263,12 @@ where "pre_header" => ?pre_header, ); - // Look for an authorities-change log. - let maybe_keys = pre_header - .digest() - .logs() - .iter() - .filter_map(|l| { - l.try_to::>>(OpaqueDigestItemId::Consensus( - &AURA_ENGINE_ID, - )) - }) - .find_map(|l| match l { - ConsensusLog::AuthoritiesChange(a) => - Some(vec![(well_known_cache_keys::AUTHORITIES, a.encode())]), - _ => None, - }); - block.header = pre_header; block.post_digests.push(seal); block.fork_choice = Some(ForkChoiceStrategy::LongestChain); block.post_hash = Some(hash); - Ok((block, maybe_keys)) + Ok((block, None)) }, CheckedHeader::Deferred(a, b) => { debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); @@ -323,7 +311,7 @@ impl Default for CheckForEquivocation { } /// Parameters of [`import_queue`]. -pub struct ImportQueueParams<'a, Block, I, C, S, CIDP> { +pub struct ImportQueueParams<'a, Block: BlockT, I, C, S, CIDP> { /// The block import to use. pub block_import: I, /// The justification import. @@ -340,6 +328,10 @@ pub struct ImportQueueParams<'a, Block, I, C, S, CIDP> { pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode>, } /// Start an import queue for the Aura consensus algorithm. @@ -353,6 +345,7 @@ pub fn import_queue( registry, check_for_equivocation, telemetry, + compatibility_mode, }: ImportQueueParams, ) -> Result, sp_consensus::Error> where @@ -377,18 +370,19 @@ where CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = build_verifier::(BuildVerifierParams { + let verifier = build_verifier::(BuildVerifierParams { client, create_inherent_data_providers, check_for_equivocation, telemetry, + compatibility_mode, }); Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } /// Parameters of [`build_verifier`]. -pub struct BuildVerifierParams { +pub struct BuildVerifierParams { /// The client to interact with the chain. pub client: Arc, /// Something that can create the inherent data providers. @@ -397,21 +391,27 @@ pub struct BuildVerifierParams { pub check_for_equivocation: CheckForEquivocation, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, } /// Build the [`AuraVerifier`] -pub fn build_verifier( +pub fn build_verifier( BuildVerifierParams { client, create_inherent_data_providers, check_for_equivocation, telemetry, - }: BuildVerifierParams, -) -> AuraVerifier { - AuraVerifier::<_, P, _>::new( + compatibility_mode, + }: BuildVerifierParams, +) -> AuraVerifier { + AuraVerifier::<_, P, _, _>::new( client, create_inherent_data_providers, check_for_equivocation, telemetry, + compatibility_mode, ) } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 734cecca9b30b..50a02726cf56a 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -44,7 +44,7 @@ use sc_consensus_slots::{ SlotInfo, StorageChanges, }; use sc_telemetry::TelemetryHandle; -use sp_api::ProvideRuntimeApi; +use sp_api::{Core, ProvideRuntimeApi}; use sp_application_crypto::{AppKey, AppPublic}; use sp_blockchain::{HeaderBackend, Result as CResult}; use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; @@ -74,6 +74,43 @@ pub use sp_consensus_aura::{ type AuthorityId

=

::Public; +/// Run `AURA` in a compatibility mode. +/// +/// This is required for when the chain was launched and later there +/// was a consensus breaking change. +#[derive(Debug, Clone)] +pub enum CompatibilityMode { + /// Don't use any compatibility mode. + None, + /// Call `initialize_block` before doing any runtime calls. + /// + /// Previously the node would execute `initialize_block` before fetchting the authorities + /// from the runtime. This behaviour changed in: + /// + /// By calling `initialize_block` before fetching the authorities, on a block that + /// would enact a new validator set, the block would already be build/sealed by an + /// authority of the new set. With this mode disabled (the default) a block that enacts a new + /// set isn't sealed/built by an authority of the new set, however to make new nodes be able to + /// sync old chains this compatibility mode exists. + UseInitializeBlock { + /// The block number until this compatibility mode should be executed. The first runtime + /// call in the context of the `until` block (importing it/building it) will disable the + /// compatibility mode (i.e. at `until` the default rules will apply). When enabling this + /// compatibility mode the `until` block should be a future block on which all nodes will + /// have upgraded to a release that includes the updated compatibility mode configuration. + /// At `until` block there will be a hard fork when the authority set changes, between the + /// old nodes (running with `initialize_block`, i.e. without the compatibility mode + /// configuration) and the new nodes. + until: N, + }, +} + +impl Default for CompatibilityMode { + fn default() -> Self { + Self::None + } +} + /// Get the slot duration for Aura. pub fn slot_duration(client: &C) -> CResult where @@ -106,7 +143,7 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A } /// Parameters of [`start_aura`]. -pub struct StartAuraParams { +pub struct StartAuraParams { /// The duration of a slot. pub slot_duration: SlotDuration, /// The client to interact with the chain. @@ -140,6 +177,10 @@ pub struct StartAuraParams { pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, } /// Start the aura worker. The returned future should be run in a futures executor. @@ -159,7 +200,8 @@ pub fn start_aura( block_proposal_slot_portion, max_block_proposal_slot_portion, telemetry, - }: StartAuraParams, + compatibility_mode, + }: StartAuraParams>, ) -> Result, sp_consensus::Error> where P: Pair + Send + Sync, @@ -191,6 +233,7 @@ where telemetry, block_proposal_slot_portion, max_block_proposal_slot_portion, + compatibility_mode, }); Ok(sc_consensus_slots::start_slot_worker( @@ -203,7 +246,7 @@ where } /// Parameters of [`build_aura_worker`]. -pub struct BuildAuraWorkerParams { +pub struct BuildAuraWorkerParams { /// The client to interact with the chain. pub client: Arc, /// The block import. @@ -231,6 +274,10 @@ pub struct BuildAuraWorkerParams { pub max_block_proposal_slot_portion: Option, /// Telemetry instance used to report telemetry metrics. pub telemetry: Option, + /// Compatibility mode that should be used. + /// + /// If in doubt, use `Default::default()`. + pub compatibility_mode: CompatibilityMode, } /// Build the aura worker. @@ -249,7 +296,8 @@ pub fn build_aura_worker( max_block_proposal_slot_portion, telemetry, force_authoring, - }: BuildAuraWorkerParams, + compatibility_mode, + }: BuildAuraWorkerParams>, ) -> impl sc_consensus_slots::SimpleSlotWorker< B, Proposer = PF::Proposer, @@ -286,11 +334,12 @@ where telemetry, block_proposal_slot_portion, max_block_proposal_slot_portion, + compatibility_mode, _key_type: PhantomData::

, } } -struct AuraWorker { +struct AuraWorker { client: Arc, block_import: I, env: E, @@ -302,12 +351,13 @@ struct AuraWorker { block_proposal_slot_portion: SlotProportion, max_block_proposal_slot_portion: Option, telemetry: Option, + compatibility_mode: CompatibilityMode, _key_type: PhantomData

, } #[async_trait::async_trait] impl sc_consensus_slots::SimpleSlotWorker - for AuraWorker + for AuraWorker> where B: BlockT, C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, @@ -345,7 +395,12 @@ where header: &B::Header, _slot: Slot, ) -> Result { - authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) + authorities( + self.client.as_ref(), + header.hash(), + *header.number() + 1u32.into(), + &self.compatibility_mode, + ) } fn authorities_len(&self, epoch_data: &Self::AuxData) -> Option { @@ -535,16 +590,42 @@ pub fn find_pre_digest(header: &B::Header) -> Resul pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) } -fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> +fn authorities( + client: &C, + parent_hash: B::Hash, + context_block_number: NumberFor, + compatibility_mode: &CompatibilityMode>, +) -> Result, ConsensusError> where A: Codec + Debug, B: BlockT, - C: ProvideRuntimeApi + BlockOf, + C: ProvideRuntimeApi, C::Api: AuraApi, { - client - .runtime_api() - .authorities(at) + let runtime_api = client.runtime_api(); + + match compatibility_mode { + CompatibilityMode::None => {}, + // Use `initialize_block` until we hit the block that should disable the mode. + CompatibilityMode::UseInitializeBlock { until } => + if *until > context_block_number { + runtime_api + .initialize_block( + &BlockId::Hash(parent_hash), + &B::Header::new( + context_block_number, + Default::default(), + Default::default(), + parent_hash, + Default::default(), + ), + ) + .map_err(|_| sp_consensus::Error::InvalidAuthoritiesSet)?; + }, + } + + runtime_api + .authorities(&BlockId::Hash(parent_hash)) .ok() .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } @@ -631,6 +712,7 @@ mod tests { InherentDataProviders = (InherentDataProvider,), >, >, + u64, >; type AuraPeer = Peer<(), PeersClient>; @@ -660,6 +742,7 @@ mod tests { }), CheckForEquivocation::Yes, None, + CompatibilityMode::None, ) } @@ -749,6 +832,7 @@ mod tests { block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, telemetry: None, + compatibility_mode: CompatibilityMode::None, }) .expect("Starts aura"), ); @@ -769,7 +853,8 @@ mod tests { assert_eq!(client.chain_info().best_number, 0); assert_eq!( - authorities(&client, &BlockId::Hash(client.chain_info().best_hash)).unwrap(), + authorities(&client, client.chain_info().best_hash, 1, &CompatibilityMode::None) + .unwrap(), vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), @@ -814,6 +899,7 @@ mod tests { _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, + compatibility_mode: Default::default(), }; let head = Header::new( @@ -866,6 +952,7 @@ mod tests { _key_type: PhantomData::, block_proposal_slot_portion: SlotProportion::new(0.5), max_block_proposal_slot_portion: None, + compatibility_mode: Default::default(), }; let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e4909b89e3f16..eb946436671e4 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -641,7 +641,7 @@ where if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. - warn!("Error imporing state: State root mismatch."); + warn!("Error importing state: State root mismatch."); return Err(Error::InvalidStateRoot) } None diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index 3e47adf0bf92f..2c6a97b934137 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -89,7 +89,7 @@ sp_api::decl_runtime_apis! { /// Currently, only the value provided by this type at genesis will be used. fn slot_duration() -> SlotDuration; - // Return the current set of authorities. + /// Return the current set of authorities. fn authorities() -> Vec; } } From c4d36065764ee23aeb3ccd181c4b6ecea8d2447a Mon Sep 17 00:00:00 2001 From: clangenb <37865735+clangenb@users.noreply.github.com> Date: Fri, 28 Oct 2022 15:38:25 +0200 Subject: [PATCH 287/348] bump ed25519-zebra; fixes `full_crypto` feature flag in `no_std` (#12576) --- Cargo.lock | 6 +++--- primitives/core/Cargo.toml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a5b562d0a791..c42772e793e41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1782,15 +1782,15 @@ dependencies = [ [[package]] name = "ed25519-zebra" -version = "3.0.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403ef3e961ab98f0ba902771d29f842058578bb1ce7e3c59dad5a6a93e784c69" +checksum = "7c24f403d068ad0b359e577a77f92392118be3f3c927538f2bb544a5ecd828c6" dependencies = [ "curve25519-dalek 3.0.2", + "hashbrown 0.12.3", "hex", "rand_core 0.6.2", "sha2 0.9.8", - "thiserror", "zeroize", ] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index b7bc6dfdce496..dcb61b33f347f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -48,7 +48,7 @@ bitflags = "1.3" # full crypto array-bytes = { version = "4.1", optional = true } -ed25519-zebra = { version = "3.0.0", default-features = false, optional = true} +ed25519-zebra = { version = "3.1.0", default-features = false, optional = true } blake2 = { version = "0.10.4", default-features = false, optional = true } schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", @@ -99,7 +99,7 @@ std = [ "serde", "blake2/std", "array-bytes", - "ed25519-zebra", + "ed25519-zebra/std", "base58", "substrate-bip39", "tiny-bip39", From 547e856831650525bbd4372fb659fcefd6ddf1d8 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Fri, 28 Oct 2022 17:18:09 +0200 Subject: [PATCH 288/348] Utility: add more tests for batch/batchAll/forceBatch (#12506) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Utility: add more tests for batch/batchAll/forceBatch * remove unnecessary * batch works with council * add more tests * better call examples * shorter code * Update frame/utility/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/utility/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/utility/src/lib.rs Co-authored-by: Bastian Köcher * update TestBaseCallFilter * fix * fix? * fix Co-authored-by: Bastian Köcher --- Cargo.lock | 2 + frame/utility/Cargo.toml | 2 + frame/utility/src/lib.rs | 18 +-- frame/utility/src/tests.rs | 221 ++++++++++++++++++++++++++++++++++++- 4 files changed, 232 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c42772e793e41..cbf02a3fa6a50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6327,6 +6327,8 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", + "pallet-collective", + "pallet-timestamp", "parity-scale-codec", "scale-info", "sp-core", diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index cbe2892d72dc7..ac4f52c6bb9f3 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -25,6 +25,8 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-collective = { version = "4.0.0-dev", path = "../collective" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } sp-core = { version = "6.0.0", path = "../../primitives/core" } [features] diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 544add1da68d3..41710be930b90 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -164,13 +164,13 @@ pub mod pallet { impl Pallet { /// Send a batch of dispatch calls. /// - /// May be called from any origin. + /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then the calls are dispatched without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -291,13 +291,13 @@ pub mod pallet { /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// - /// May be called from any origin. + /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then the calls are dispatched without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. @@ -403,13 +403,13 @@ pub mod pallet { /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// - /// May be called from any origin. + /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// - /// If origin is root then call are dispatch without checking origin filter. (This includes - /// bypassing `frame_system::Config::BaseCallFilter`). + /// If origin is root then the calls are dispatch without checking origin filter. (This + /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// # /// - Complexity: O(C) where C is the number of calls to be batched. diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 81cec1c295c30..c374f5ae21099 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -27,15 +27,18 @@ use frame_support::{ dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable, Pays}, error::BadOrigin, parameter_types, storage, - traits::{ConstU32, ConstU64, Contains}, + traits::{ConstU32, ConstU64, Contains, GenesisBuild}, weights::Weight, }; +use pallet_collective::{EnsureProportionAtLeast, Instance1}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, Hash, IdentityLookup}, }; +type BlockNumber = u64; + // example module to test behaviors. #[frame_support::pallet] pub mod example { @@ -82,6 +85,42 @@ pub mod example { } } +mod mock_democracy { + pub use pallet::*; + #[frame_support::pallet] + pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + Sized { + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + type ExternalMajorityOrigin: EnsureOrigin; + } + + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn external_propose_majority(origin: OriginFor) -> DispatchResult { + T::ExternalMajorityOrigin::ensure_origin(origin)?; + Self::deposit_event(Event::::ExternalProposed); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + ExternalProposed, + } + } +} + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -92,9 +131,12 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Council: pallet_collective::, Utility: utility::{Pallet, Call, Event}, Example: example::{Pallet, Call}, + Democracy: mock_democracy::{Pallet, Call, Event}, } ); @@ -140,10 +182,34 @@ impl pallet_balances::Config for Test { type AccountStore = System; type WeightInfo = (); } + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<3>; + type WeightInfo = (); +} + +const MOTION_DURATION_IN_BLOCKS: BlockNumber = 3; parameter_types! { pub const MultisigDepositBase: u64 = 1; pub const MultisigDepositFactor: u64 = 1; pub const MaxSignatories: u32 = 3; + pub const MotionDuration: BlockNumber = MOTION_DURATION_IN_BLOCKS; + pub const MaxProposals: u32 = 100; + pub const MaxMembers: u32 = 100; +} + +type CouncilCollective = pallet_collective::Instance1; +impl pallet_collective::Config for Test { + type RuntimeOrigin = RuntimeOrigin; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type MotionDuration = MotionDuration; + type MaxProposals = MaxProposals; + type MaxMembers = MaxMembers; + type DefaultVote = pallet_collective::PrimeDefaultVote; + type WeightInfo = (); } impl example::Config for Test {} @@ -159,10 +225,16 @@ impl Contains for TestBaseCallFilter { RuntimeCall::System(frame_system::Call::remark { .. }) => true, // For tests RuntimeCall::Example(_) => true, + // For council origin tests. + RuntimeCall::Democracy(_) => true, _ => false, } } } +impl mock_democracy::Config for Test { + type RuntimeEvent = RuntimeEvent; + type ExternalMajorityOrigin = EnsureProportionAtLeast; +} impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeCall = RuntimeCall; @@ -175,6 +247,7 @@ type UtilityCall = crate::Call; use frame_system::Call as SystemCall; use pallet_balances::{Call as BalancesCall, Error as BalancesError}; +use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -183,6 +256,14 @@ pub fn new_test_ext() -> sp_io::TestExternalities { } .assimilate_storage(&mut t) .unwrap(); + + pallet_collective::GenesisConfig:: { + members: vec![1, 2, 3], + phantom: Default::default(), + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -679,3 +760,139 @@ fn none_origin_does_not_work() { assert_noop!(Utility::batch_all(RuntimeOrigin::none(), vec![]), BadOrigin); }) } + +#[test] +fn batch_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + // fails because inherents expect the origin to be none. + assert_ok!(Utility::batch( + RuntimeOrigin::signed(1), + vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] + )); + System::assert_last_event( + utility::Event::BatchInterrupted { + index: 0, + error: frame_system::Error::::CallFiltered.into(), + } + .into(), + ); + }) +} + +#[test] +fn force_batch_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + // fails because inherents expect the origin to be none. + assert_ok!(Utility::force_batch( + RuntimeOrigin::root(), + vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 }),] + )); + System::assert_last_event(utility::Event::BatchCompletedWithErrors.into()); + }) +} + +#[test] +fn batch_all_doesnt_work_with_inherents() { + new_test_ext().execute_with(|| { + let batch_all = RuntimeCall::Utility(UtilityCall::batch_all { + calls: vec![RuntimeCall::Timestamp(TimestampCall::set { now: 42 })], + }); + let info = batch_all.get_dispatch_info(); + + // fails because inherents expect the origin to be none. + assert_noop!( + batch_all.dispatch(RuntimeOrigin::signed(1)), + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(info.weight), + pays_fee: Pays::Yes + }, + error: frame_system::Error::::CallFiltered.into(), + } + ); + }) +} + +#[test] +fn batch_works_with_council_origin() { + new_test_ext().execute_with(|| { + let proposal = RuntimeCall::Utility(UtilityCall::batch { + calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Council::propose( + RuntimeOrigin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + + assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(Council::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); + + System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { + proposal_hash: hash, + result: Ok(()), + })); + }) +} + +#[test] +fn force_batch_works_with_council_origin() { + new_test_ext().execute_with(|| { + let proposal = RuntimeCall::Utility(UtilityCall::force_batch { + calls: vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})], + }); + let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); + let proposal_weight = proposal.get_dispatch_info().weight; + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Council::propose( + RuntimeOrigin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + + assert_ok!(Council::vote(RuntimeOrigin::signed(1), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(2), hash, 0, true)); + assert_ok!(Council::vote(RuntimeOrigin::signed(3), hash, 0, true)); + + System::set_block_number(4); + assert_ok!(Council::close( + RuntimeOrigin::signed(4), + hash, + 0, + proposal_weight, + proposal_len + )); + + System::assert_last_event(RuntimeEvent::Council(pallet_collective::Event::Executed { + proposal_hash: hash, + result: Ok(()), + })); + }) +} + +#[test] +fn batch_all_works_with_council_origin() { + new_test_ext().execute_with(|| { + assert_ok!(Utility::batch_all( + RuntimeOrigin::from(pallet_collective::RawOrigin::Members(3, 3)), + vec![RuntimeCall::Democracy(mock_democracy::Call::external_propose_majority {})] + )); + }) +} From be259234bfee056bef970ac372e04a74411c5224 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 28 Oct 2022 22:39:59 +0200 Subject: [PATCH 289/348] Treat near-zero intercept values as zero when calculating weights (#12573) * Treat near-zero intercept values as zero when calculating weights * Simplify comparison Co-authored-by: Oliver Tale-Yazdi * Update contract weights Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- frame/benchmarking/src/analysis.rs | 20 +- frame/contracts/src/weights.rs | 1708 ++++++++++++++++------------ 2 files changed, 987 insertions(+), 741 deletions(-) diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index db8ee599ef731..0b77a92347d03 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -170,7 +170,8 @@ fn linear_regression( x_vars: usize, ) -> Option<(f64, Vec, Vec)> { let (intercept, params, errors) = raw_linear_regression(&xs, &ys, x_vars, true)?; - if intercept > 0.0 { + if intercept >= -0.0001 { + // The intercept is positive, or is effectively zero. return Some((intercept, params, errors[1..].to_vec())) } @@ -748,4 +749,21 @@ mod tests { assert_eq!(extrinsic_time.base, 3_000_000_000); assert_eq!(extrinsic_time.slopes, vec![3_000_000_000]); } + + #[test] + fn intercept_of_a_little_under_zero_is_rounded_up_to_zero() { + // Analytically this should result in an intercept of 0, but + // due to numerical imprecision this will generate an intercept + // equal to roughly -0.0000000000000004440892098500626 + let data = vec![ + benchmark_result(vec![(BenchmarkParameter::n, 1)], 2, 0, 0, 0), + benchmark_result(vec![(BenchmarkParameter::n, 2)], 4, 0, 0, 0), + benchmark_result(vec![(BenchmarkParameter::n, 3)], 6, 0, 0, 0), + ]; + + let extrinsic_time = + Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + assert_eq!(extrinsic_time.base, 0); + assert_eq!(extrinsic_time.slopes, vec![2000]); + } } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 46c86b670a7fe..c42cc41a0bd9c 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,25 +18,25 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-10-28, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet-contracts // --extrinsic=* +// --heap-pages=4096 // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_contracts -// --chain=dev -// --output=./frame/contracts/src/weights.rs -// --template=./.maintain/frame-weight-template.hbs +// --output=frame/contracts/src/weights.rs +// --header=HEADER-APACHE2 +// --template=.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -167,15 +167,17 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(3_108_000 as u64) + // Minimum execution time: 2_904 nanoseconds. + Weight::from_ref_time(3_036_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(15_377_000 as u64) - // Standard Error: 414 - .saturating_add(Weight::from_ref_time(892_761 as u64).saturating_mul(k as u64)) + // Minimum execution time: 14_553 nanoseconds. + Weight::from_ref_time(14_161_469 as u64) + // Standard Error: 593 + .saturating_add(Weight::from_ref_time(894_982 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -183,18 +185,21 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(3_127_000 as u64) - // Standard Error: 3_813 - .saturating_add(Weight::from_ref_time(1_379_402 as u64).saturating_mul(q as u64)) + // Minimum execution time: 3_121 nanoseconds. + Weight::from_ref_time(14_262_977 as u64) + // Standard Error: 3_272 + .saturating_add(Weight::from_ref_time(1_232_597 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(22_849_000 as u64) - // Standard Error: 26 - .saturating_add(Weight::from_ref_time(45_513 as u64).saturating_mul(c as u64)) + // Minimum execution time: 28_625 nanoseconds. + Weight::from_ref_time(31_685_032 as u64) + // Standard Error: 57 + .saturating_add(Weight::from_ref_time(44_024 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -205,9 +210,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(258_275_000 as u64) - // Standard Error: 30 - .saturating_add(Weight::from_ref_time(45_885 as u64).saturating_mul(c as u64)) + // Minimum execution time: 355_167 nanoseconds. + Weight::from_ref_time(330_201_049 as u64) + // Standard Error: 64 + .saturating_add(Weight::from_ref_time(45_952 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -222,11 +228,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(2_115_889_000 as u64) - // Standard Error: 323 - .saturating_add(Weight::from_ref_time(89_541 as u64).saturating_mul(c as u64)) - // Standard Error: 19 - .saturating_add(Weight::from_ref_time(664 as u64).saturating_mul(s as u64)) + // Minimum execution time: 2_165_939 nanoseconds. + Weight::from_ref_time(346_673_842 as u64) + // Standard Error: 73 + .saturating_add(Weight::from_ref_time(107_020 as u64).saturating_mul(c as u64)) + // Standard Error: 4 + .saturating_add(Weight::from_ref_time(1_754 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(9 as u64)) } @@ -239,9 +246,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(211_349_000 as u64) + // Minimum execution time: 262_523 nanoseconds. + Weight::from_ref_time(255_633_789 as u64) // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_532 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_485 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -251,7 +259,8 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(181_771_000 as u64) + // Minimum execution time: 233_124 nanoseconds. + Weight::from_ref_time(233_826_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -261,9 +270,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(55_917_000 as u64) - // Standard Error: 25 - .saturating_add(Weight::from_ref_time(46_148 as u64).saturating_mul(c as u64)) + // Minimum execution time: 61_170 nanoseconds. + Weight::from_ref_time(61_653_502 as u64) + // Standard Error: 47 + .saturating_add(Weight::from_ref_time(45_553 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -272,7 +282,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_966_000 as u64) + // Minimum execution time: 37_278 nanoseconds. + Weight::from_ref_time(37_822_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -280,7 +291,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_963_000 as u64) + // Minimum execution time: 40_329 nanoseconds. + Weight::from_ref_time(41_017_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -291,9 +303,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(247_526_000 as u64) - // Standard Error: 29_505 - .saturating_add(Weight::from_ref_time(35_107_467 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_717 nanoseconds. + Weight::from_ref_time(347_610_656 as u64) + // Standard Error: 37_159 + .saturating_add(Weight::from_ref_time(35_506_783 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -304,9 +317,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(253_782_000 as u64) - // Standard Error: 256_004 - .saturating_add(Weight::from_ref_time(201_621_188 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_158 nanoseconds. + Weight::from_ref_time(289_982_824 as u64) + // Standard Error: 426_543 + .saturating_add(Weight::from_ref_time(202_123_887 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -318,9 +332,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(249_892_000 as u64) - // Standard Error: 234_750 - .saturating_add(Weight::from_ref_time(258_209_168 as u64).saturating_mul(r as u64)) + // Minimum execution time: 349_141 nanoseconds. + Weight::from_ref_time(297_477_406 as u64) + // Standard Error: 403_411 + .saturating_add(Weight::from_ref_time(258_999_975 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -332,9 +347,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(248_674_000 as u64) - // Standard Error: 23_962 - .saturating_add(Weight::from_ref_time(38_319_695 as u64).saturating_mul(r as u64)) + // Minimum execution time: 347_419 nanoseconds. + Weight::from_ref_time(350_048_279 as u64) + // Standard Error: 22_511 + .saturating_add(Weight::from_ref_time(38_799_515 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -345,9 +361,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(244_345_000 as u64) - // Standard Error: 15_429 - .saturating_add(Weight::from_ref_time(14_751_125 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_634 nanoseconds. + Weight::from_ref_time(344_394_100 as u64) + // Standard Error: 12_563 + .saturating_add(Weight::from_ref_time(14_529_298 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -358,9 +375,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(247_045_000 as u64) - // Standard Error: 25_949 - .saturating_add(Weight::from_ref_time(35_001_004 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_246 nanoseconds. + Weight::from_ref_time(349_406_095 as u64) + // Standard Error: 28_842 + .saturating_add(Weight::from_ref_time(35_066_080 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -371,9 +389,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(247_176_000 as u64) - // Standard Error: 24_500 - .saturating_add(Weight::from_ref_time(34_447_506 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_003 nanoseconds. + Weight::from_ref_time(349_076_713 as u64) + // Standard Error: 30_507 + .saturating_add(Weight::from_ref_time(34_662_539 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -384,10 +403,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(247_278_000 as u64) - // Standard Error: 48_613 - .saturating_add(Weight::from_ref_time(108_381_432 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 346_063 nanoseconds. + Weight::from_ref_time(350_054_605 as u64) + // Standard Error: 68_711 + .saturating_add(Weight::from_ref_time(107_584_776 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -397,9 +417,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(246_996_000 as u64) - // Standard Error: 24_585 - .saturating_add(Weight::from_ref_time(34_615_777 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_057 nanoseconds. + Weight::from_ref_time(349_489_403 as u64) + // Standard Error: 38_116 + .saturating_add(Weight::from_ref_time(34_750_791 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -410,9 +431,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(247_207_000 as u64) - // Standard Error: 30_691 - .saturating_add(Weight::from_ref_time(34_626_552 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_722 nanoseconds. + Weight::from_ref_time(348_659_357 as u64) + // Standard Error: 33_077 + .saturating_add(Weight::from_ref_time(34_845_216 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -423,9 +445,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(247_538_000 as u64) - // Standard Error: 27_128 - .saturating_add(Weight::from_ref_time(34_277_292 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_805 nanoseconds. + Weight::from_ref_time(347_579_287 as u64) + // Standard Error: 28_498 + .saturating_add(Weight::from_ref_time(34_529_480 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -436,9 +459,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(247_398_000 as u64) - // Standard Error: 28_375 - .saturating_add(Weight::from_ref_time(34_573_437 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_943 nanoseconds. + Weight::from_ref_time(348_941_819 as u64) + // Standard Error: 32_278 + .saturating_add(Weight::from_ref_time(34_580_817 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -450,10 +474,11 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(247_732_000 as u64) - // Standard Error: 42_671 - .saturating_add(Weight::from_ref_time(123_110_886 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 345_753 nanoseconds. + Weight::from_ref_time(354_733_779 as u64) + // Standard Error: 71_767 + .saturating_add(Weight::from_ref_time(105_098_275 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -463,9 +488,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(170_547_000 as u64) - // Standard Error: 9_163 - .saturating_add(Weight::from_ref_time(15_589_088 as u64).saturating_mul(r as u64)) + // Minimum execution time: 222_866 nanoseconds. + Weight::from_ref_time(226_212_157 as u64) + // Standard Error: 17_326 + .saturating_add(Weight::from_ref_time(15_544_104 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -476,9 +502,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(247_264_000 as u64) - // Standard Error: 23_913 - .saturating_add(Weight::from_ref_time(32_753_431 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_704 nanoseconds. + Weight::from_ref_time(348_160_999 as u64) + // Standard Error: 31_560 + .saturating_add(Weight::from_ref_time(32_888_428 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -489,9 +516,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(282_721_000 as u64) - // Standard Error: 1_978 - .saturating_add(Weight::from_ref_time(9_625_699 as u64).saturating_mul(n as u64)) + // Minimum execution time: 380_814 nanoseconds. + Weight::from_ref_time(403_973_055 as u64) + // Standard Error: 2_690 + .saturating_add(Weight::from_ref_time(9_597_443 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -502,9 +530,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(244_356_000 as u64) - // Standard Error: 184_002 - .saturating_add(Weight::from_ref_time(2_423_400 as u64).saturating_mul(r as u64)) + // Minimum execution time: 340_930 nanoseconds. + Weight::from_ref_time(342_218_828 as u64) + // Standard Error: 80_578 + .saturating_add(Weight::from_ref_time(1_547_971 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -515,9 +544,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(246_157_000 as u64) - // Standard Error: 324 - .saturating_add(Weight::from_ref_time(188_687 as u64).saturating_mul(n as u64)) + // Minimum execution time: 343_035 nanoseconds. + Weight::from_ref_time(345_347_882 as u64) + // Standard Error: 509 + .saturating_add(Weight::from_ref_time(228_526 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -530,9 +560,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(247_356_000 as u64) - // Standard Error: 232_552 - .saturating_add(Weight::from_ref_time(55_027_099 as u64).saturating_mul(r as u64)) + // Minimum execution time: 344_997 nanoseconds. + Weight::from_ref_time(346_431_655 as u64) + // Standard Error: 79_924 + .saturating_add(Weight::from_ref_time(53_530_044 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -546,10 +577,11 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(247_069_000 as u64) - // Standard Error: 59_538 - .saturating_add(Weight::from_ref_time(128_858_347 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 345_926 nanoseconds. + Weight::from_ref_time(351_620_774 as u64) + // Standard Error: 69_858 + .saturating_add(Weight::from_ref_time(130_846_895 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -559,9 +591,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(244_659_000 as u64) - // Standard Error: 70_677 - .saturating_add(Weight::from_ref_time(239_930_533 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_582 nanoseconds. + Weight::from_ref_time(353_141_157 as u64) + // Standard Error: 99_764 + .saturating_add(Weight::from_ref_time(231_149_152 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -573,11 +606,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(1_198_350_000 as u64) - // Standard Error: 2_049_556 - .saturating_add(Weight::from_ref_time(70_072_141 as u64).saturating_mul(t as u64)) - // Standard Error: 496_378 - .saturating_add(Weight::from_ref_time(35_566_752 as u64).saturating_mul(n as u64)) + // Minimum execution time: 1_269_467 nanoseconds. + Weight::from_ref_time(566_371_969 as u64) + // Standard Error: 435_714 + .saturating_add(Weight::from_ref_time(180_441_805 as u64).saturating_mul(t as u64)) + // Standard Error: 119_668 + .saturating_add(Weight::from_ref_time(70_111_002 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -590,18 +624,20 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(177_240_000 as u64) - // Standard Error: 18_991 - .saturating_add(Weight::from_ref_time(26_377_453 as u64).saturating_mul(r as u64)) + // Minimum execution time: 231_528 nanoseconds. + Weight::from_ref_time(235_676_870 as u64) + // Standard Error: 20_851 + .saturating_add(Weight::from_ref_time(26_215_920 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(247_795_000 as u64) - // Standard Error: 349_627 - .saturating_add(Weight::from_ref_time(411_421_066 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_509 nanoseconds. + Weight::from_ref_time(301_173_874 as u64) + // Standard Error: 436_884 + .saturating_add(Weight::from_ref_time(415_194_325 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -610,31 +646,34 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(405_147_000 as u64) - // Standard Error: 1_074_466 - .saturating_add(Weight::from_ref_time(120_331_835 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) - .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 500_452 nanoseconds. + Weight::from_ref_time(641_506_544 as u64) + // Standard Error: 1_308_973 + .saturating_add(Weight::from_ref_time(98_769_541 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(52 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(50 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(403_403_000 as u64) - // Standard Error: 890_083 - .saturating_add(Weight::from_ref_time(88_023_518 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) - .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 500_941 nanoseconds. + Weight::from_ref_time(610_446_049 as u64) + // Standard Error: 1_018_173 + .saturating_add(Weight::from_ref_time(65_568_627 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(49 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(247_929_000 as u64) - // Standard Error: 311_780 - .saturating_add(Weight::from_ref_time(400_904_526 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_820 nanoseconds. + Weight::from_ref_time(302_335_076 as u64) + // Standard Error: 472_676 + .saturating_add(Weight::from_ref_time(395_286_593 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -643,20 +682,22 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(372_378_000 as u64) - // Standard Error: 1_007_061 - .saturating_add(Weight::from_ref_time(92_326_546 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(4 as u64)) - .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 457_830 nanoseconds. + Weight::from_ref_time(582_524_868 as u64) + // Standard Error: 1_161_813 + .saturating_add(Weight::from_ref_time(67_291_419 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(48 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(250_165_000 as u64) - // Standard Error: 300_205 - .saturating_add(Weight::from_ref_time(339_092_950 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_249 nanoseconds. + Weight::from_ref_time(317_329_583 as u64) + // Standard Error: 400_112 + .saturating_add(Weight::from_ref_time(331_362_971 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -664,19 +705,21 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(352_873_000 as u64) - // Standard Error: 908_425 - .saturating_add(Weight::from_ref_time(176_951_688 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 446_902 nanoseconds. + Weight::from_ref_time(556_989_117 as u64) + // Standard Error: 1_029_959 + .saturating_add(Weight::from_ref_time(159_623_361 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(248_980_000 as u64) - // Standard Error: 295_923 - .saturating_add(Weight::from_ref_time(306_145_709 as u64).saturating_mul(r as u64)) + // Minimum execution time: 347_064 nanoseconds. + Weight::from_ref_time(316_879_171 as u64) + // Standard Error: 355_083 + .saturating_add(Weight::from_ref_time(304_763_264 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -684,19 +727,21 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(340_418_000 as u64) - // Standard Error: 749_537 - .saturating_add(Weight::from_ref_time(80_040_174 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(7 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 435_275 nanoseconds. + Weight::from_ref_time(527_900_488 as u64) + // Standard Error: 872_763 + .saturating_add(Weight::from_ref_time(60_604_211 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(250_254_000 as u64) - // Standard Error: 328_900 - .saturating_add(Weight::from_ref_time(424_144_552 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_671 nanoseconds. + Weight::from_ref_time(306_307_186 as u64) + // Standard Error: 438_525 + .saturating_add(Weight::from_ref_time(422_575_502 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -705,13 +750,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(377_849_000 as u64) - // Standard Error: 1_130_582 - .saturating_add(Weight::from_ref_time(188_240_273 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) - .saturating_add(T::DbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(5 as u64)) - .saturating_add(T::DbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 470_344 nanoseconds. + Weight::from_ref_time(613_380_073 as u64) + // Standard Error: 1_333_864 + .saturating_add(Weight::from_ref_time(164_398_286 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(51 as u64)) + .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().writes(48 as u64)) + .saturating_add(T::DbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -720,12 +766,13 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(249_701_000 as u64) - // Standard Error: 449_896 - .saturating_add(Weight::from_ref_time(1_370_712_846 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 348_312 nanoseconds. + Weight::from_ref_time(303_017_886 as u64) + // Standard Error: 568_589 + .saturating_add(Weight::from_ref_time(1_351_508_682 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) @@ -735,10 +782,11 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(251_015_000 as u64) - // Standard Error: 7_081_378 - .saturating_add(Weight::from_ref_time(17_258_935_295 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 348_997 nanoseconds. + Weight::from_ref_time(349_975_000 as u64) + // Standard Error: 5_689_469 + .saturating_add(Weight::from_ref_time(24_991_501_544 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((160 as u64).saturating_mul(r as u64))) @@ -750,9 +798,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(250_660_000 as u64) - // Standard Error: 6_402_989 - .saturating_add(Weight::from_ref_time(17_044_780_887 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_751 nanoseconds. + Weight::from_ref_time(349_330_000 as u64) + // Standard Error: 7_125_421 + .saturating_add(Weight::from_ref_time(24_917_646_052 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -766,11 +815,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_920_718_000 as u64) - // Standard Error: 11_123_062 - .saturating_add(Weight::from_ref_time(642_326_761 as u64).saturating_mul(t as u64)) - // Standard Error: 9_490 - .saturating_add(Weight::from_ref_time(8_845_752 as u64).saturating_mul(c as u64)) + // Minimum execution time: 16_275_711 nanoseconds. + Weight::from_ref_time(15_203_241_602 as u64) + // Standard Error: 3_582_276 + .saturating_add(Weight::from_ref_time(1_179_848_168 as u64).saturating_mul(t as u64)) + // Standard Error: 5_371 + .saturating_add(Weight::from_ref_time(9_712_484 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(163 as u64)) @@ -785,12 +835,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(253_804_000 as u64) - // Standard Error: 19_742_198 - .saturating_add(Weight::from_ref_time(22_250_412_835 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 353_168 nanoseconds. + Weight::from_ref_time(353_901_000 as u64) + // Standard Error: 18_701_989 + .saturating_add(Weight::from_ref_time(30_028_672_644 as u64).saturating_mul(r as u64)) + .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(5 as u64)) .saturating_add(T::DbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) @@ -803,9 +854,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_921_591_000 as u64) - // Standard Error: 22_601 - .saturating_add(Weight::from_ref_time(125_945_348 as u64).saturating_mul(s as u64)) + // Minimum execution time: 18_226_984 nanoseconds. + Weight::from_ref_time(18_032_466_983 as u64) + // Standard Error: 75_544 + .saturating_add(Weight::from_ref_time(121_087_538 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(247 as u64)) @@ -818,9 +870,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(247_842_000 as u64) - // Standard Error: 219_853 - .saturating_add(Weight::from_ref_time(58_013_100 as u64).saturating_mul(r as u64)) + // Minimum execution time: 344_334 nanoseconds. + Weight::from_ref_time(345_939_404 as u64) + // Standard Error: 98_994 + .saturating_add(Weight::from_ref_time(58_015_295 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -831,9 +884,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(305_607_000 as u64) - // Standard Error: 71_234 - .saturating_add(Weight::from_ref_time(323_093_184 as u64).saturating_mul(n as u64)) + // Minimum execution time: 402_492 nanoseconds. + Weight::from_ref_time(402_789_000 as u64) + // Standard Error: 54_174 + .saturating_add(Weight::from_ref_time(324_036_889 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -844,9 +898,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(247_351_000 as u64) - // Standard Error: 271_656 - .saturating_add(Weight::from_ref_time(74_344_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 343_493 nanoseconds. + Weight::from_ref_time(345_116_214 as u64) + // Standard Error: 90_515 + .saturating_add(Weight::from_ref_time(71_282_485 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -857,9 +912,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(315_626_000 as u64) - // Standard Error: 56_955 - .saturating_add(Weight::from_ref_time(246_316_261 as u64).saturating_mul(n as u64)) + // Minimum execution time: 415_650 nanoseconds. + Weight::from_ref_time(415_894_000 as u64) + // Standard Error: 58_077 + .saturating_add(Weight::from_ref_time(248_416_317 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -870,9 +926,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(246_887_000 as u64) - // Standard Error: 286_822 - .saturating_add(Weight::from_ref_time(52_242_599 as u64).saturating_mul(r as u64)) + // Minimum execution time: 343_890 nanoseconds. + Weight::from_ref_time(345_542_924 as u64) + // Standard Error: 87_564 + .saturating_add(Weight::from_ref_time(47_361_375 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -883,9 +940,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(294_818_000 as u64) - // Standard Error: 52_394 - .saturating_add(Weight::from_ref_time(96_353_967 as u64).saturating_mul(n as u64)) + // Minimum execution time: 391_629 nanoseconds. + Weight::from_ref_time(392_793_000 as u64) + // Standard Error: 51_120 + .saturating_add(Weight::from_ref_time(99_288_467 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -896,9 +954,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(245_334_000 as u64) - // Standard Error: 303_979 - .saturating_add(Weight::from_ref_time(50_180_800 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_701 nanoseconds. + Weight::from_ref_time(343_318_489 as u64) + // Standard Error: 103_756 + .saturating_add(Weight::from_ref_time(47_301_910 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -909,9 +968,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(288_995_000 as u64) - // Standard Error: 51_161 - .saturating_add(Weight::from_ref_time(96_331_905 as u64).saturating_mul(n as u64)) + // Minimum execution time: 389_898 nanoseconds. + Weight::from_ref_time(390_489_000 as u64) + // Standard Error: 52_301 + .saturating_add(Weight::from_ref_time(99_344_068 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -922,9 +982,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(249_935_000 as u64) - // Standard Error: 662_188 - .saturating_add(Weight::from_ref_time(3_025_889_600 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_789 nanoseconds. + Weight::from_ref_time(350_451_620 as u64) + // Standard Error: 195_939 + .saturating_add(Weight::from_ref_time(2_962_830_479 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -935,9 +996,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(248_094_000 as u64) - // Standard Error: 535_446 - .saturating_add(Weight::from_ref_time(2_086_979_500 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_537 nanoseconds. + Weight::from_ref_time(347_926_836 as u64) + // Standard Error: 110_557 + .saturating_add(Weight::from_ref_time(2_058_656_763 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -949,319 +1011,371 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(248_254_000 as u64) - // Standard Error: 1_340_955 - .saturating_add(Weight::from_ref_time(1_065_939_603 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_672 nanoseconds. + Weight::from_ref_time(347_628_000 as u64) + // Standard Error: 2_742_329 + .saturating_add(Weight::from_ref_time(1_370_383_386 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) - .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(T::DbWeight::get().reads((225 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((150 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_705_000 as u64) - // Standard Error: 7_175 - .saturating_add(Weight::from_ref_time(983_586 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_029 nanoseconds. + Weight::from_ref_time(118_800_774 as u64) + // Standard Error: 4_234 + .saturating_add(Weight::from_ref_time(866_719 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(69_659_000 as u64) - // Standard Error: 1_140 - .saturating_add(Weight::from_ref_time(3_004_307 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_145 nanoseconds. + Weight::from_ref_time(118_644_813 as u64) + // Standard Error: 2_692 + .saturating_add(Weight::from_ref_time(2_874_276 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(69_621_000 as u64) - // Standard Error: 2_871 - .saturating_add(Weight::from_ref_time(2_715_451 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_142 nanoseconds. + Weight::from_ref_time(118_474_161 as u64) + // Standard Error: 6_997 + .saturating_add(Weight::from_ref_time(2_769_076 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(69_459_000 as u64) - // Standard Error: 3_284 - .saturating_add(Weight::from_ref_time(2_581_283 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_020 nanoseconds. + Weight::from_ref_time(118_499_103 as u64) + // Standard Error: 1_266 + .saturating_add(Weight::from_ref_time(2_351_397 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_558_000 as u64) - // Standard Error: 2_509 - .saturating_add(Weight::from_ref_time(2_906_689 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_008 nanoseconds. + Weight::from_ref_time(118_398_803 as u64) + // Standard Error: 750 + .saturating_add(Weight::from_ref_time(2_480_061 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_407_000 as u64) - // Standard Error: 1_697 - .saturating_add(Weight::from_ref_time(1_702_534 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_046 nanoseconds. + Weight::from_ref_time(118_332_903 as u64) + // Standard Error: 234 + .saturating_add(Weight::from_ref_time(1_426_355 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_477_000 as u64) - // Standard Error: 1_733 - .saturating_add(Weight::from_ref_time(2_192_641 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_998 nanoseconds. + Weight::from_ref_time(117_910_870 as u64) + // Standard Error: 4_629 + .saturating_add(Weight::from_ref_time(1_992_697 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(69_424_000 as u64) - // Standard Error: 2_096 - .saturating_add(Weight::from_ref_time(2_435_708 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_998 nanoseconds. + Weight::from_ref_time(118_059_139 as u64) + // Standard Error: 1_985 + .saturating_add(Weight::from_ref_time(2_164_252 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_883_000 as u64) - // Standard Error: 209 - .saturating_add(Weight::from_ref_time(6_874 as u64).saturating_mul(e as u64)) + // Minimum execution time: 121_282 nanoseconds. + Weight::from_ref_time(121_483_289 as u64) + // Standard Error: 57 + .saturating_add(Weight::from_ref_time(4_013 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(69_422_000 as u64) - // Standard Error: 5_030 - .saturating_add(Weight::from_ref_time(7_638_087 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_128 nanoseconds. + Weight::from_ref_time(118_869_764 as u64) + // Standard Error: 8_890 + .saturating_add(Weight::from_ref_time(6_496_684 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(83_136_000 as u64) - // Standard Error: 7_751 - .saturating_add(Weight::from_ref_time(9_543_944 as u64).saturating_mul(r as u64)) + // Minimum execution time: 131_764 nanoseconds. + Weight::from_ref_time(133_009_514 as u64) + // Standard Error: 9_697 + .saturating_add(Weight::from_ref_time(8_310_784 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_281_000 as u64) - // Standard Error: 1_967 - .saturating_add(Weight::from_ref_time(596_591 as u64).saturating_mul(p as u64)) + // Minimum execution time: 141_100 nanoseconds. + Weight::from_ref_time(142_282_540 as u64) + // Standard Error: 867 + .saturating_add(Weight::from_ref_time(536_814 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(69_702_000 as u64) - // Standard Error: 1_112 - .saturating_add(Weight::from_ref_time(1_094_685 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_535 nanoseconds. + Weight::from_ref_time(119_017_162 as u64) + // Standard Error: 1_175 + .saturating_add(Weight::from_ref_time(911_141 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(69_720_000 as u64) - // Standard Error: 1_768 - .saturating_add(Weight::from_ref_time(1_045_163 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_472 nanoseconds. + Weight::from_ref_time(118_790_737 as u64) + // Standard Error: 758 + .saturating_add(Weight::from_ref_time(962_148 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(69_716_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_541_622 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_551 nanoseconds. + Weight::from_ref_time(119_070_827 as u64) + // Standard Error: 2_411 + .saturating_add(Weight::from_ref_time(1_367_622 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_315_000 as u64) - // Standard Error: 1_081 - .saturating_add(Weight::from_ref_time(1_646_437 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_958 nanoseconds. + Weight::from_ref_time(121_649_052 as u64) + // Standard Error: 1_557 + .saturating_add(Weight::from_ref_time(1_447_477 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_256_000 as u64) - // Standard Error: 1_749 - .saturating_add(Weight::from_ref_time(1_664_891 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_866 nanoseconds. + Weight::from_ref_time(121_040_400 as u64) + // Standard Error: 6_203 + .saturating_add(Weight::from_ref_time(1_548_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(69_616_000 as u64) - // Standard Error: 1_303 - .saturating_add(Weight::from_ref_time(1_017_254 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_076 nanoseconds. + Weight::from_ref_time(118_381_010 as u64) + // Standard Error: 1_814 + .saturating_add(Weight::from_ref_time(938_699 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(70_943_000 as u64) - // Standard Error: 120_139 - .saturating_add(Weight::from_ref_time(183_262_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_030 nanoseconds. + Weight::from_ref_time(120_331_261 as u64) + // Standard Error: 333_364 + .saturating_add(Weight::from_ref_time(227_636_638 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_953_000 as u64) - // Standard Error: 2_591 - .saturating_add(Weight::from_ref_time(1_458_584 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_056 nanoseconds. + Weight::from_ref_time(118_554_799 as u64) + // Standard Error: 1_269 + .saturating_add(Weight::from_ref_time(1_331_717 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_439_000 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_486_431 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_009 nanoseconds. + Weight::from_ref_time(118_678_537 as u64) + // Standard Error: 1_699 + .saturating_add(Weight::from_ref_time(1_328_153 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(69_366_000 as u64) - // Standard Error: 2_674 - .saturating_add(Weight::from_ref_time(1_506_755 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_031 nanoseconds. + Weight::from_ref_time(118_478_616 as u64) + // Standard Error: 1_533 + .saturating_add(Weight::from_ref_time(1_335_254 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(69_386_000 as u64) - // Standard Error: 3_040 - .saturating_add(Weight::from_ref_time(1_509_928 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_910 nanoseconds. + Weight::from_ref_time(118_471_672 as u64) + // Standard Error: 1_275 + .saturating_add(Weight::from_ref_time(1_345_140 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(69_432_000 as u64) - // Standard Error: 1_875 - .saturating_add(Weight::from_ref_time(1_439_336 as u64).saturating_mul(r as u64)) + // Minimum execution time: 119_504 nanoseconds. + Weight::from_ref_time(118_940_207 as u64) + // Standard Error: 2_512 + .saturating_add(Weight::from_ref_time(1_307_954 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(69_488_000 as u64) - // Standard Error: 1_654 - .saturating_add(Weight::from_ref_time(1_429_318 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_009 nanoseconds. + Weight::from_ref_time(118_366_251 as u64) + // Standard Error: 1_116 + .saturating_add(Weight::from_ref_time(1_322_232 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_414_000 as u64) - // Standard Error: 2_773 - .saturating_add(Weight::from_ref_time(1_499_116 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_020 nanoseconds. + Weight::from_ref_time(118_222_792 as u64) + // Standard Error: 1_973 + .saturating_add(Weight::from_ref_time(1_336_896 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_427_000 as u64) - // Standard Error: 2_404 - .saturating_add(Weight::from_ref_time(1_969_697 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_038 nanoseconds. + Weight::from_ref_time(118_757_016 as u64) + // Standard Error: 7_932 + .saturating_add(Weight::from_ref_time(1_867_807 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_793_000 as u64) - // Standard Error: 1_765 - .saturating_add(Weight::from_ref_time(1_924_169 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_041 nanoseconds. + Weight::from_ref_time(118_717_403 as u64) + // Standard Error: 8_010 + .saturating_add(Weight::from_ref_time(1_868_876 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_551_000 as u64) - // Standard Error: 3_955 - .saturating_add(Weight::from_ref_time(1_988_825 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_057 nanoseconds. + Weight::from_ref_time(118_560_106 as u64) + // Standard Error: 1_362 + .saturating_add(Weight::from_ref_time(1_869_820 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(69_510_000 as u64) - // Standard Error: 3_797 - .saturating_add(Weight::from_ref_time(1_976_317 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_045 nanoseconds. + Weight::from_ref_time(118_318_194 as u64) + // Standard Error: 179 + .saturating_add(Weight::from_ref_time(1_873_670 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(69_399_000 as u64) - // Standard Error: 3_255 - .saturating_add(Weight::from_ref_time(1_975_054 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_000 nanoseconds. + Weight::from_ref_time(118_520_606 as u64) + // Standard Error: 3_656 + .saturating_add(Weight::from_ref_time(1_870_762 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(69_425_000 as u64) - // Standard Error: 1_738 - .saturating_add(Weight::from_ref_time(1_959_864 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_035 nanoseconds. + Weight::from_ref_time(118_464_151 as u64) + // Standard Error: 1_073 + .saturating_add(Weight::from_ref_time(1_870_309 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_514_000 as u64) - // Standard Error: 9_227 - .saturating_add(Weight::from_ref_time(1_984_750 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_071 nanoseconds. + Weight::from_ref_time(118_470_365 as u64) + // Standard Error: 1_224 + .saturating_add(Weight::from_ref_time(1_870_522 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(69_415_000 as u64) - // Standard Error: 2_052 - .saturating_add(Weight::from_ref_time(1_967_146 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_032 nanoseconds. + Weight::from_ref_time(118_407_669 as u64) + // Standard Error: 769 + .saturating_add(Weight::from_ref_time(1_883_635 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(69_348_000 as u64) - // Standard Error: 3_647 - .saturating_add(Weight::from_ref_time(1_996_141 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_044 nanoseconds. + Weight::from_ref_time(118_324_106 as u64) + // Standard Error: 523 + .saturating_add(Weight::from_ref_time(1_874_728 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(69_448_000 as u64) - // Standard Error: 1_960 - .saturating_add(Weight::from_ref_time(1_956_889 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_111 nanoseconds. + Weight::from_ref_time(118_538_748 as u64) + // Standard Error: 3_498 + .saturating_add(Weight::from_ref_time(1_868_932 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_443_000 as u64) - // Standard Error: 20_301 - .saturating_add(Weight::from_ref_time(2_072_285 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_060 nanoseconds. + Weight::from_ref_time(118_421_175 as u64) + // Standard Error: 962 + .saturating_add(Weight::from_ref_time(1_850_713 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_458_000 as u64) - // Standard Error: 3_210 - .saturating_add(Weight::from_ref_time(1_970_367 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_050 nanoseconds. + Weight::from_ref_time(118_774_937 as u64) + // Standard Error: 1_717 + .saturating_add(Weight::from_ref_time(1_838_127 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(69_429_000 as u64) - // Standard Error: 3_475 - .saturating_add(Weight::from_ref_time(1_977_800 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_997 nanoseconds. + Weight::from_ref_time(118_626_785 as u64) + // Standard Error: 1_382 + .saturating_add(Weight::from_ref_time(1_842_530 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_414_000 as u64) - // Standard Error: 2_659 - .saturating_add(Weight::from_ref_time(2_797_503 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_064 nanoseconds. + Weight::from_ref_time(118_404_203 as u64) + // Standard Error: 2_437 + .saturating_add(Weight::from_ref_time(2_489_569 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(69_460_000 as u64) - // Standard Error: 2_399 - .saturating_add(Weight::from_ref_time(2_646_574 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_949 nanoseconds. + Weight::from_ref_time(118_525_228 as u64) + // Standard Error: 2_231 + .saturating_add(Weight::from_ref_time(2_453_863 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(69_441_000 as u64) - // Standard Error: 2_179 - .saturating_add(Weight::from_ref_time(2_860_283 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_032 nanoseconds. + Weight::from_ref_time(118_311_431 as u64) + // Standard Error: 175 + .saturating_add(Weight::from_ref_time(2_532_745 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_401_000 as u64) - // Standard Error: 2_639 - .saturating_add(Weight::from_ref_time(2_582_137 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_987 nanoseconds. + Weight::from_ref_time(118_573_115 as u64) + // Standard Error: 1_814 + .saturating_add(Weight::from_ref_time(2_438_519 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_426_000 as u64) - // Standard Error: 2_379 - .saturating_add(Weight::from_ref_time(1_986_578 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_001 nanoseconds. + Weight::from_ref_time(118_527_518 as u64) + // Standard Error: 1_040 + .saturating_add(Weight::from_ref_time(1_874_359 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_599_000 as u64) - // Standard Error: 2_918 - .saturating_add(Weight::from_ref_time(1_991_834 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_051 nanoseconds. + Weight::from_ref_time(118_315_649 as u64) + // Standard Error: 163 + .saturating_add(Weight::from_ref_time(1_851_162 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(71_142_000 as u64) - // Standard Error: 2_659 - .saturating_add(Weight::from_ref_time(1_948_956 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_038 nanoseconds. + Weight::from_ref_time(118_558_677 as u64) + // Standard Error: 2_258 + .saturating_add(Weight::from_ref_time(1_845_713 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_407_000 as u64) - // Standard Error: 4_100 - .saturating_add(Weight::from_ref_time(2_030_715 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_985 nanoseconds. + Weight::from_ref_time(118_660_597 as u64) + // Standard Error: 5_893 + .saturating_add(Weight::from_ref_time(1_887_423 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_534_000 as u64) - // Standard Error: 2_305 - .saturating_add(Weight::from_ref_time(1_989_346 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_101 nanoseconds. + Weight::from_ref_time(118_522_424 as u64) + // Standard Error: 1_094 + .saturating_add(Weight::from_ref_time(1_867_495 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(69_432_000 as u64) - // Standard Error: 2_747 - .saturating_add(Weight::from_ref_time(2_002_548 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_068 nanoseconds. + Weight::from_ref_time(119_012_941 as u64) + // Standard Error: 9_129 + .saturating_add(Weight::from_ref_time(1_877_850 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_465_000 as u64) - // Standard Error: 9_644 - .saturating_add(Weight::from_ref_time(2_045_830 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_105 nanoseconds. + Weight::from_ref_time(118_559_474 as u64) + // Standard Error: 2_055 + .saturating_add(Weight::from_ref_time(1_868_662 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_435_000 as u64) - // Standard Error: 2_271 - .saturating_add(Weight::from_ref_time(2_001_986 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_004 nanoseconds. + Weight::from_ref_time(118_370_466 as u64) + // Standard Error: 818 + .saturating_add(Weight::from_ref_time(1_873_459 as u64).saturating_mul(r as u64)) } } @@ -1269,15 +1383,17 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - Weight::from_ref_time(3_108_000 as u64) + // Minimum execution time: 2_904 nanoseconds. + Weight::from_ref_time(3_036_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - Weight::from_ref_time(15_377_000 as u64) - // Standard Error: 414 - .saturating_add(Weight::from_ref_time(892_761 as u64).saturating_mul(k as u64)) + // Minimum execution time: 14_553 nanoseconds. + Weight::from_ref_time(14_161_469 as u64) + // Standard Error: 593 + .saturating_add(Weight::from_ref_time(894_982 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -1285,18 +1401,21 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - Weight::from_ref_time(3_127_000 as u64) - // Standard Error: 3_813 - .saturating_add(Weight::from_ref_time(1_379_402 as u64).saturating_mul(q as u64)) + // Minimum execution time: 3_121 nanoseconds. + Weight::from_ref_time(14_262_977 as u64) + // Standard Error: 3_272 + .saturating_add(Weight::from_ref_time(1_232_597 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Contracts PristineCode (r:1 w:0) // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - Weight::from_ref_time(22_849_000 as u64) - // Standard Error: 26 - .saturating_add(Weight::from_ref_time(45_513 as u64).saturating_mul(c as u64)) + // Minimum execution time: 28_625 nanoseconds. + Weight::from_ref_time(31_685_032 as u64) + // Standard Error: 57 + .saturating_add(Weight::from_ref_time(44_024 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1307,9 +1426,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - Weight::from_ref_time(258_275_000 as u64) - // Standard Error: 30 - .saturating_add(Weight::from_ref_time(45_885 as u64).saturating_mul(c as u64)) + // Minimum execution time: 355_167 nanoseconds. + Weight::from_ref_time(330_201_049 as u64) + // Standard Error: 64 + .saturating_add(Weight::from_ref_time(45_952 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1324,11 +1444,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - Weight::from_ref_time(2_115_889_000 as u64) - // Standard Error: 323 - .saturating_add(Weight::from_ref_time(89_541 as u64).saturating_mul(c as u64)) - // Standard Error: 19 - .saturating_add(Weight::from_ref_time(664 as u64).saturating_mul(s as u64)) + // Minimum execution time: 2_165_939 nanoseconds. + Weight::from_ref_time(346_673_842 as u64) + // Standard Error: 73 + .saturating_add(Weight::from_ref_time(107_020 as u64).saturating_mul(c as u64)) + // Standard Error: 4 + .saturating_add(Weight::from_ref_time(1_754 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(9 as u64)) } @@ -1341,9 +1462,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - Weight::from_ref_time(211_349_000 as u64) + // Minimum execution time: 262_523 nanoseconds. + Weight::from_ref_time(255_633_789 as u64) // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_532 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_485 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } @@ -1353,7 +1475,8 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - Weight::from_ref_time(181_771_000 as u64) + // Minimum execution time: 233_124 nanoseconds. + Weight::from_ref_time(233_826_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1363,9 +1486,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - Weight::from_ref_time(55_917_000 as u64) - // Standard Error: 25 - .saturating_add(Weight::from_ref_time(46_148 as u64).saturating_mul(c as u64)) + // Minimum execution time: 61_170 nanoseconds. + Weight::from_ref_time(61_653_502 as u64) + // Standard Error: 47 + .saturating_add(Weight::from_ref_time(45_553 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1374,7 +1498,8 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - Weight::from_ref_time(37_966_000 as u64) + // Minimum execution time: 37_278 nanoseconds. + Weight::from_ref_time(37_822_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1382,7 +1507,8 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - Weight::from_ref_time(40_963_000 as u64) + // Minimum execution time: 40_329 nanoseconds. + Weight::from_ref_time(41_017_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -1393,9 +1519,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - Weight::from_ref_time(247_526_000 as u64) - // Standard Error: 29_505 - .saturating_add(Weight::from_ref_time(35_107_467 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_717 nanoseconds. + Weight::from_ref_time(347_610_656 as u64) + // Standard Error: 37_159 + .saturating_add(Weight::from_ref_time(35_506_783 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1406,9 +1533,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - Weight::from_ref_time(253_782_000 as u64) - // Standard Error: 256_004 - .saturating_add(Weight::from_ref_time(201_621_188 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_158 nanoseconds. + Weight::from_ref_time(289_982_824 as u64) + // Standard Error: 426_543 + .saturating_add(Weight::from_ref_time(202_123_887 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1420,9 +1548,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(249_892_000 as u64) - // Standard Error: 234_750 - .saturating_add(Weight::from_ref_time(258_209_168 as u64).saturating_mul(r as u64)) + // Minimum execution time: 349_141 nanoseconds. + Weight::from_ref_time(297_477_406 as u64) + // Standard Error: 403_411 + .saturating_add(Weight::from_ref_time(258_999_975 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1434,9 +1563,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(248_674_000 as u64) - // Standard Error: 23_962 - .saturating_add(Weight::from_ref_time(38_319_695 as u64).saturating_mul(r as u64)) + // Minimum execution time: 347_419 nanoseconds. + Weight::from_ref_time(350_048_279 as u64) + // Standard Error: 22_511 + .saturating_add(Weight::from_ref_time(38_799_515 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1447,9 +1577,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - Weight::from_ref_time(244_345_000 as u64) - // Standard Error: 15_429 - .saturating_add(Weight::from_ref_time(14_751_125 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_634 nanoseconds. + Weight::from_ref_time(344_394_100 as u64) + // Standard Error: 12_563 + .saturating_add(Weight::from_ref_time(14_529_298 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1460,9 +1591,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - Weight::from_ref_time(247_045_000 as u64) - // Standard Error: 25_949 - .saturating_add(Weight::from_ref_time(35_001_004 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_246 nanoseconds. + Weight::from_ref_time(349_406_095 as u64) + // Standard Error: 28_842 + .saturating_add(Weight::from_ref_time(35_066_080 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1473,9 +1605,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - Weight::from_ref_time(247_176_000 as u64) - // Standard Error: 24_500 - .saturating_add(Weight::from_ref_time(34_447_506 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_003 nanoseconds. + Weight::from_ref_time(349_076_713 as u64) + // Standard Error: 30_507 + .saturating_add(Weight::from_ref_time(34_662_539 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1486,10 +1619,11 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - Weight::from_ref_time(247_278_000 as u64) - // Standard Error: 48_613 - .saturating_add(Weight::from_ref_time(108_381_432 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 346_063 nanoseconds. + Weight::from_ref_time(350_054_605 as u64) + // Standard Error: 68_711 + .saturating_add(Weight::from_ref_time(107_584_776 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1499,9 +1633,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - Weight::from_ref_time(246_996_000 as u64) - // Standard Error: 24_585 - .saturating_add(Weight::from_ref_time(34_615_777 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_057 nanoseconds. + Weight::from_ref_time(349_489_403 as u64) + // Standard Error: 38_116 + .saturating_add(Weight::from_ref_time(34_750_791 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1512,9 +1647,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - Weight::from_ref_time(247_207_000 as u64) - // Standard Error: 30_691 - .saturating_add(Weight::from_ref_time(34_626_552 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_722 nanoseconds. + Weight::from_ref_time(348_659_357 as u64) + // Standard Error: 33_077 + .saturating_add(Weight::from_ref_time(34_845_216 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1525,9 +1661,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - Weight::from_ref_time(247_538_000 as u64) - // Standard Error: 27_128 - .saturating_add(Weight::from_ref_time(34_277_292 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_805 nanoseconds. + Weight::from_ref_time(347_579_287 as u64) + // Standard Error: 28_498 + .saturating_add(Weight::from_ref_time(34_529_480 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1538,9 +1675,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - Weight::from_ref_time(247_398_000 as u64) - // Standard Error: 28_375 - .saturating_add(Weight::from_ref_time(34_573_437 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_943 nanoseconds. + Weight::from_ref_time(348_941_819 as u64) + // Standard Error: 32_278 + .saturating_add(Weight::from_ref_time(34_580_817 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1552,10 +1690,11 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - Weight::from_ref_time(247_732_000 as u64) - // Standard Error: 42_671 - .saturating_add(Weight::from_ref_time(123_110_886 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 345_753 nanoseconds. + Weight::from_ref_time(354_733_779 as u64) + // Standard Error: 71_767 + .saturating_add(Weight::from_ref_time(105_098_275 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1565,9 +1704,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - Weight::from_ref_time(170_547_000 as u64) - // Standard Error: 9_163 - .saturating_add(Weight::from_ref_time(15_589_088 as u64).saturating_mul(r as u64)) + // Minimum execution time: 222_866 nanoseconds. + Weight::from_ref_time(226_212_157 as u64) + // Standard Error: 17_326 + .saturating_add(Weight::from_ref_time(15_544_104 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1578,9 +1718,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - Weight::from_ref_time(247_264_000 as u64) - // Standard Error: 23_913 - .saturating_add(Weight::from_ref_time(32_753_431 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_704 nanoseconds. + Weight::from_ref_time(348_160_999 as u64) + // Standard Error: 31_560 + .saturating_add(Weight::from_ref_time(32_888_428 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1591,9 +1732,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(282_721_000 as u64) - // Standard Error: 1_978 - .saturating_add(Weight::from_ref_time(9_625_699 as u64).saturating_mul(n as u64)) + // Minimum execution time: 380_814 nanoseconds. + Weight::from_ref_time(403_973_055 as u64) + // Standard Error: 2_690 + .saturating_add(Weight::from_ref_time(9_597_443 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1604,9 +1746,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - Weight::from_ref_time(244_356_000 as u64) - // Standard Error: 184_002 - .saturating_add(Weight::from_ref_time(2_423_400 as u64).saturating_mul(r as u64)) + // Minimum execution time: 340_930 nanoseconds. + Weight::from_ref_time(342_218_828 as u64) + // Standard Error: 80_578 + .saturating_add(Weight::from_ref_time(1_547_971 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1617,9 +1760,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(246_157_000 as u64) - // Standard Error: 324 - .saturating_add(Weight::from_ref_time(188_687 as u64).saturating_mul(n as u64)) + // Minimum execution time: 343_035 nanoseconds. + Weight::from_ref_time(345_347_882 as u64) + // Standard Error: 509 + .saturating_add(Weight::from_ref_time(228_526 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1632,9 +1776,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - Weight::from_ref_time(247_356_000 as u64) - // Standard Error: 232_552 - .saturating_add(Weight::from_ref_time(55_027_099 as u64).saturating_mul(r as u64)) + // Minimum execution time: 344_997 nanoseconds. + Weight::from_ref_time(346_431_655 as u64) + // Standard Error: 79_924 + .saturating_add(Weight::from_ref_time(53_530_044 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1648,10 +1793,11 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - Weight::from_ref_time(247_069_000 as u64) - // Standard Error: 59_538 - .saturating_add(Weight::from_ref_time(128_858_347 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 345_926 nanoseconds. + Weight::from_ref_time(351_620_774 as u64) + // Standard Error: 69_858 + .saturating_add(Weight::from_ref_time(130_846_895 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: System Account (r:1 w:0) @@ -1661,9 +1807,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - Weight::from_ref_time(244_659_000 as u64) - // Standard Error: 70_677 - .saturating_add(Weight::from_ref_time(239_930_533 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_582 nanoseconds. + Weight::from_ref_time(353_141_157 as u64) + // Standard Error: 99_764 + .saturating_add(Weight::from_ref_time(231_149_152 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1675,11 +1822,12 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - Weight::from_ref_time(1_198_350_000 as u64) - // Standard Error: 2_049_556 - .saturating_add(Weight::from_ref_time(70_072_141 as u64).saturating_mul(t as u64)) - // Standard Error: 496_378 - .saturating_add(Weight::from_ref_time(35_566_752 as u64).saturating_mul(n as u64)) + // Minimum execution time: 1_269_467 nanoseconds. + Weight::from_ref_time(566_371_969 as u64) + // Standard Error: 435_714 + .saturating_add(Weight::from_ref_time(180_441_805 as u64).saturating_mul(t as u64)) + // Standard Error: 119_668 + .saturating_add(Weight::from_ref_time(70_111_002 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1692,18 +1840,20 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - Weight::from_ref_time(177_240_000 as u64) - // Standard Error: 18_991 - .saturating_add(Weight::from_ref_time(26_377_453 as u64).saturating_mul(r as u64)) + // Minimum execution time: 231_528 nanoseconds. + Weight::from_ref_time(235_676_870 as u64) + // Standard Error: 20_851 + .saturating_add(Weight::from_ref_time(26_215_920 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - Weight::from_ref_time(247_795_000 as u64) - // Standard Error: 349_627 - .saturating_add(Weight::from_ref_time(411_421_066 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_509 nanoseconds. + Weight::from_ref_time(301_173_874 as u64) + // Standard Error: 436_884 + .saturating_add(Weight::from_ref_time(415_194_325 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1712,31 +1862,34 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - Weight::from_ref_time(405_147_000 as u64) - // Standard Error: 1_074_466 - .saturating_add(Weight::from_ref_time(120_331_835 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) - .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 500_452 nanoseconds. + Weight::from_ref_time(641_506_544 as u64) + // Standard Error: 1_308_973 + .saturating_add(Weight::from_ref_time(98_769_541 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(52 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(50 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - Weight::from_ref_time(403_403_000 as u64) - // Standard Error: 890_083 - .saturating_add(Weight::from_ref_time(88_023_518 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) - .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 500_941 nanoseconds. + Weight::from_ref_time(610_446_049 as u64) + // Standard Error: 1_018_173 + .saturating_add(Weight::from_ref_time(65_568_627 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(49 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - Weight::from_ref_time(247_929_000 as u64) - // Standard Error: 311_780 - .saturating_add(Weight::from_ref_time(400_904_526 as u64).saturating_mul(r as u64)) + // Minimum execution time: 345_820 nanoseconds. + Weight::from_ref_time(302_335_076 as u64) + // Standard Error: 472_676 + .saturating_add(Weight::from_ref_time(395_286_593 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1745,20 +1898,22 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(372_378_000 as u64) - // Standard Error: 1_007_061 - .saturating_add(Weight::from_ref_time(92_326_546 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(4 as u64)) - .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 457_830 nanoseconds. + Weight::from_ref_time(582_524_868 as u64) + // Standard Error: 1_161_813 + .saturating_add(Weight::from_ref_time(67_291_419 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(48 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - Weight::from_ref_time(250_165_000 as u64) - // Standard Error: 300_205 - .saturating_add(Weight::from_ref_time(339_092_950 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_249 nanoseconds. + Weight::from_ref_time(317_329_583 as u64) + // Standard Error: 400_112 + .saturating_add(Weight::from_ref_time(331_362_971 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1766,19 +1921,21 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(352_873_000 as u64) - // Standard Error: 908_425 - .saturating_add(Weight::from_ref_time(176_951_688 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 446_902 nanoseconds. + Weight::from_ref_time(556_989_117 as u64) + // Standard Error: 1_029_959 + .saturating_add(Weight::from_ref_time(159_623_361 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - Weight::from_ref_time(248_980_000 as u64) - // Standard Error: 295_923 - .saturating_add(Weight::from_ref_time(306_145_709 as u64).saturating_mul(r as u64)) + // Minimum execution time: 347_064 nanoseconds. + Weight::from_ref_time(316_879_171 as u64) + // Standard Error: 355_083 + .saturating_add(Weight::from_ref_time(304_763_264 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1786,19 +1943,21 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(340_418_000 as u64) - // Standard Error: 749_537 - .saturating_add(Weight::from_ref_time(80_040_174 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(7 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 435_275 nanoseconds. + Weight::from_ref_time(527_900_488 as u64) + // Standard Error: 872_763 + .saturating_add(Weight::from_ref_time(60_604_211 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - Weight::from_ref_time(250_254_000 as u64) - // Standard Error: 328_900 - .saturating_add(Weight::from_ref_time(424_144_552 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_671 nanoseconds. + Weight::from_ref_time(306_307_186 as u64) + // Standard Error: 438_525 + .saturating_add(Weight::from_ref_time(422_575_502 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1807,13 +1966,14 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(377_849_000 as u64) - // Standard Error: 1_130_582 - .saturating_add(Weight::from_ref_time(188_240_273 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) - .saturating_add(RocksDbWeight::get().reads((15 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) - .saturating_add(RocksDbWeight::get().writes((15 as u64).saturating_mul(n as u64))) + // Minimum execution time: 470_344 nanoseconds. + Weight::from_ref_time(613_380_073 as u64) + // Standard Error: 1_333_864 + .saturating_add(Weight::from_ref_time(164_398_286 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(51 as u64)) + .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().writes(48 as u64)) + .saturating_add(RocksDbWeight::get().writes((7 as u64).saturating_mul(n as u64))) } // Storage: System Account (r:1 w:0) // Storage: Contracts ContractInfoOf (r:1 w:1) @@ -1822,12 +1982,13 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - Weight::from_ref_time(249_701_000 as u64) - // Standard Error: 449_896 - .saturating_add(Weight::from_ref_time(1_370_712_846 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 348_312 nanoseconds. + Weight::from_ref_time(303_017_886 as u64) + // Standard Error: 568_589 + .saturating_add(Weight::from_ref_time(1_351_508_682 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((80 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:1 w:0) @@ -1837,10 +1998,11 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - Weight::from_ref_time(251_015_000 as u64) - // Standard Error: 7_081_378 - .saturating_add(Weight::from_ref_time(17_258_935_295 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 348_997 nanoseconds. + Weight::from_ref_time(349_975_000 as u64) + // Standard Error: 5_689_469 + .saturating_add(Weight::from_ref_time(24_991_501_544 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((160 as u64).saturating_mul(r as u64))) @@ -1852,9 +2014,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - Weight::from_ref_time(250_660_000 as u64) - // Standard Error: 6_402_989 - .saturating_add(Weight::from_ref_time(17_044_780_887 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_751 nanoseconds. + Weight::from_ref_time(349_330_000 as u64) + // Standard Error: 7_125_421 + .saturating_add(Weight::from_ref_time(24_917_646_052 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1868,11 +2031,12 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - Weight::from_ref_time(11_920_718_000 as u64) - // Standard Error: 11_123_062 - .saturating_add(Weight::from_ref_time(642_326_761 as u64).saturating_mul(t as u64)) - // Standard Error: 9_490 - .saturating_add(Weight::from_ref_time(8_845_752 as u64).saturating_mul(c as u64)) + // Minimum execution time: 16_275_711 nanoseconds. + Weight::from_ref_time(15_203_241_602 as u64) + // Standard Error: 3_582_276 + .saturating_add(Weight::from_ref_time(1_179_848_168 as u64).saturating_mul(t as u64)) + // Standard Error: 5_371 + .saturating_add(Weight::from_ref_time(9_712_484 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(163 as u64)) @@ -1887,12 +2051,13 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - Weight::from_ref_time(253_804_000 as u64) - // Standard Error: 19_742_198 - .saturating_add(Weight::from_ref_time(22_250_412_835 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 353_168 nanoseconds. + Weight::from_ref_time(353_901_000 as u64) + // Standard Error: 18_701_989 + .saturating_add(Weight::from_ref_time(30_028_672_644 as u64).saturating_mul(r as u64)) + .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(5 as u64)) .saturating_add(RocksDbWeight::get().writes((400 as u64).saturating_mul(r as u64))) } // Storage: System Account (r:81 w:81) @@ -1905,9 +2070,10 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - Weight::from_ref_time(13_921_591_000 as u64) - // Standard Error: 22_601 - .saturating_add(Weight::from_ref_time(125_945_348 as u64).saturating_mul(s as u64)) + // Minimum execution time: 18_226_984 nanoseconds. + Weight::from_ref_time(18_032_466_983 as u64) + // Standard Error: 75_544 + .saturating_add(Weight::from_ref_time(121_087_538 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(247 as u64)) @@ -1920,9 +2086,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - Weight::from_ref_time(247_842_000 as u64) - // Standard Error: 219_853 - .saturating_add(Weight::from_ref_time(58_013_100 as u64).saturating_mul(r as u64)) + // Minimum execution time: 344_334 nanoseconds. + Weight::from_ref_time(345_939_404 as u64) + // Standard Error: 98_994 + .saturating_add(Weight::from_ref_time(58_015_295 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1933,9 +2100,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(305_607_000 as u64) - // Standard Error: 71_234 - .saturating_add(Weight::from_ref_time(323_093_184 as u64).saturating_mul(n as u64)) + // Minimum execution time: 402_492 nanoseconds. + Weight::from_ref_time(402_789_000 as u64) + // Standard Error: 54_174 + .saturating_add(Weight::from_ref_time(324_036_889 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1946,9 +2114,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - Weight::from_ref_time(247_351_000 as u64) - // Standard Error: 271_656 - .saturating_add(Weight::from_ref_time(74_344_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 343_493 nanoseconds. + Weight::from_ref_time(345_116_214 as u64) + // Standard Error: 90_515 + .saturating_add(Weight::from_ref_time(71_282_485 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1959,9 +2128,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(315_626_000 as u64) - // Standard Error: 56_955 - .saturating_add(Weight::from_ref_time(246_316_261 as u64).saturating_mul(n as u64)) + // Minimum execution time: 415_650 nanoseconds. + Weight::from_ref_time(415_894_000 as u64) + // Standard Error: 58_077 + .saturating_add(Weight::from_ref_time(248_416_317 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1972,9 +2142,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - Weight::from_ref_time(246_887_000 as u64) - // Standard Error: 286_822 - .saturating_add(Weight::from_ref_time(52_242_599 as u64).saturating_mul(r as u64)) + // Minimum execution time: 343_890 nanoseconds. + Weight::from_ref_time(345_542_924 as u64) + // Standard Error: 87_564 + .saturating_add(Weight::from_ref_time(47_361_375 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1985,9 +2156,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(294_818_000 as u64) - // Standard Error: 52_394 - .saturating_add(Weight::from_ref_time(96_353_967 as u64).saturating_mul(n as u64)) + // Minimum execution time: 391_629 nanoseconds. + Weight::from_ref_time(392_793_000 as u64) + // Standard Error: 51_120 + .saturating_add(Weight::from_ref_time(99_288_467 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1998,9 +2170,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - Weight::from_ref_time(245_334_000 as u64) - // Standard Error: 303_979 - .saturating_add(Weight::from_ref_time(50_180_800 as u64).saturating_mul(r as u64)) + // Minimum execution time: 341_701 nanoseconds. + Weight::from_ref_time(343_318_489 as u64) + // Standard Error: 103_756 + .saturating_add(Weight::from_ref_time(47_301_910 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2011,9 +2184,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - Weight::from_ref_time(288_995_000 as u64) - // Standard Error: 51_161 - .saturating_add(Weight::from_ref_time(96_331_905 as u64).saturating_mul(n as u64)) + // Minimum execution time: 389_898 nanoseconds. + Weight::from_ref_time(390_489_000 as u64) + // Standard Error: 52_301 + .saturating_add(Weight::from_ref_time(99_344_068 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2024,9 +2198,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - Weight::from_ref_time(249_935_000 as u64) - // Standard Error: 662_188 - .saturating_add(Weight::from_ref_time(3_025_889_600 as u64).saturating_mul(r as u64)) + // Minimum execution time: 348_789 nanoseconds. + Weight::from_ref_time(350_451_620 as u64) + // Standard Error: 195_939 + .saturating_add(Weight::from_ref_time(2_962_830_479 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2037,9 +2212,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - Weight::from_ref_time(248_094_000 as u64) - // Standard Error: 535_446 - .saturating_add(Weight::from_ref_time(2_086_979_500 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_537 nanoseconds. + Weight::from_ref_time(347_926_836 as u64) + // Standard Error: 110_557 + .saturating_add(Weight::from_ref_time(2_058_656_763 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2051,318 +2227,370 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - Weight::from_ref_time(248_254_000 as u64) - // Standard Error: 1_340_955 - .saturating_add(Weight::from_ref_time(1_065_939_603 as u64).saturating_mul(r as u64)) + // Minimum execution time: 346_672 nanoseconds. + Weight::from_ref_time(347_628_000 as u64) + // Standard Error: 2_742_329 + .saturating_add(Weight::from_ref_time(1_370_383_386 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) - .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) + .saturating_add(RocksDbWeight::get().reads((225 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((150 as u64).saturating_mul(r as u64))) } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - Weight::from_ref_time(70_705_000 as u64) - // Standard Error: 7_175 - .saturating_add(Weight::from_ref_time(983_586 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_029 nanoseconds. + Weight::from_ref_time(118_800_774 as u64) + // Standard Error: 4_234 + .saturating_add(Weight::from_ref_time(866_719 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - Weight::from_ref_time(69_659_000 as u64) - // Standard Error: 1_140 - .saturating_add(Weight::from_ref_time(3_004_307 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_145 nanoseconds. + Weight::from_ref_time(118_644_813 as u64) + // Standard Error: 2_692 + .saturating_add(Weight::from_ref_time(2_874_276 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - Weight::from_ref_time(69_621_000 as u64) - // Standard Error: 2_871 - .saturating_add(Weight::from_ref_time(2_715_451 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_142 nanoseconds. + Weight::from_ref_time(118_474_161 as u64) + // Standard Error: 6_997 + .saturating_add(Weight::from_ref_time(2_769_076 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - Weight::from_ref_time(69_459_000 as u64) - // Standard Error: 3_284 - .saturating_add(Weight::from_ref_time(2_581_283 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_020 nanoseconds. + Weight::from_ref_time(118_499_103 as u64) + // Standard Error: 1_266 + .saturating_add(Weight::from_ref_time(2_351_397 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_558_000 as u64) - // Standard Error: 2_509 - .saturating_add(Weight::from_ref_time(2_906_689 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_008 nanoseconds. + Weight::from_ref_time(118_398_803 as u64) + // Standard Error: 750 + .saturating_add(Weight::from_ref_time(2_480_061 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - Weight::from_ref_time(69_407_000 as u64) - // Standard Error: 1_697 - .saturating_add(Weight::from_ref_time(1_702_534 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_046 nanoseconds. + Weight::from_ref_time(118_332_903 as u64) + // Standard Error: 234 + .saturating_add(Weight::from_ref_time(1_426_355 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - Weight::from_ref_time(69_477_000 as u64) - // Standard Error: 1_733 - .saturating_add(Weight::from_ref_time(2_192_641 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_998 nanoseconds. + Weight::from_ref_time(117_910_870 as u64) + // Standard Error: 4_629 + .saturating_add(Weight::from_ref_time(1_992_697 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - Weight::from_ref_time(69_424_000 as u64) - // Standard Error: 2_096 - .saturating_add(Weight::from_ref_time(2_435_708 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_998 nanoseconds. + Weight::from_ref_time(118_059_139 as u64) + // Standard Error: 1_985 + .saturating_add(Weight::from_ref_time(2_164_252 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - Weight::from_ref_time(72_883_000 as u64) - // Standard Error: 209 - .saturating_add(Weight::from_ref_time(6_874 as u64).saturating_mul(e as u64)) + // Minimum execution time: 121_282 nanoseconds. + Weight::from_ref_time(121_483_289 as u64) + // Standard Error: 57 + .saturating_add(Weight::from_ref_time(4_013 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - Weight::from_ref_time(69_422_000 as u64) - // Standard Error: 5_030 - .saturating_add(Weight::from_ref_time(7_638_087 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_128 nanoseconds. + Weight::from_ref_time(118_869_764 as u64) + // Standard Error: 8_890 + .saturating_add(Weight::from_ref_time(6_496_684 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - Weight::from_ref_time(83_136_000 as u64) - // Standard Error: 7_751 - .saturating_add(Weight::from_ref_time(9_543_944 as u64).saturating_mul(r as u64)) + // Minimum execution time: 131_764 nanoseconds. + Weight::from_ref_time(133_009_514 as u64) + // Standard Error: 9_697 + .saturating_add(Weight::from_ref_time(8_310_784 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - Weight::from_ref_time(93_281_000 as u64) - // Standard Error: 1_967 - .saturating_add(Weight::from_ref_time(596_591 as u64).saturating_mul(p as u64)) + // Minimum execution time: 141_100 nanoseconds. + Weight::from_ref_time(142_282_540 as u64) + // Standard Error: 867 + .saturating_add(Weight::from_ref_time(536_814 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - Weight::from_ref_time(69_702_000 as u64) - // Standard Error: 1_112 - .saturating_add(Weight::from_ref_time(1_094_685 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_535 nanoseconds. + Weight::from_ref_time(119_017_162 as u64) + // Standard Error: 1_175 + .saturating_add(Weight::from_ref_time(911_141 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - Weight::from_ref_time(69_720_000 as u64) - // Standard Error: 1_768 - .saturating_add(Weight::from_ref_time(1_045_163 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_472 nanoseconds. + Weight::from_ref_time(118_790_737 as u64) + // Standard Error: 758 + .saturating_add(Weight::from_ref_time(962_148 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - Weight::from_ref_time(69_716_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_541_622 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_551 nanoseconds. + Weight::from_ref_time(119_070_827 as u64) + // Standard Error: 2_411 + .saturating_add(Weight::from_ref_time(1_367_622 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - Weight::from_ref_time(72_315_000 as u64) - // Standard Error: 1_081 - .saturating_add(Weight::from_ref_time(1_646_437 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_958 nanoseconds. + Weight::from_ref_time(121_649_052 as u64) + // Standard Error: 1_557 + .saturating_add(Weight::from_ref_time(1_447_477 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - Weight::from_ref_time(72_256_000 as u64) - // Standard Error: 1_749 - .saturating_add(Weight::from_ref_time(1_664_891 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_866 nanoseconds. + Weight::from_ref_time(121_040_400 as u64) + // Standard Error: 6_203 + .saturating_add(Weight::from_ref_time(1_548_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - Weight::from_ref_time(69_616_000 as u64) - // Standard Error: 1_303 - .saturating_add(Weight::from_ref_time(1_017_254 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_076 nanoseconds. + Weight::from_ref_time(118_381_010 as u64) + // Standard Error: 1_814 + .saturating_add(Weight::from_ref_time(938_699 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - Weight::from_ref_time(70_943_000 as u64) - // Standard Error: 120_139 - .saturating_add(Weight::from_ref_time(183_262_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_030 nanoseconds. + Weight::from_ref_time(120_331_261 as u64) + // Standard Error: 333_364 + .saturating_add(Weight::from_ref_time(227_636_638 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - Weight::from_ref_time(70_953_000 as u64) - // Standard Error: 2_591 - .saturating_add(Weight::from_ref_time(1_458_584 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_056 nanoseconds. + Weight::from_ref_time(118_554_799 as u64) + // Standard Error: 1_269 + .saturating_add(Weight::from_ref_time(1_331_717 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - Weight::from_ref_time(69_439_000 as u64) - // Standard Error: 1_822 - .saturating_add(Weight::from_ref_time(1_486_431 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_009 nanoseconds. + Weight::from_ref_time(118_678_537 as u64) + // Standard Error: 1_699 + .saturating_add(Weight::from_ref_time(1_328_153 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - Weight::from_ref_time(69_366_000 as u64) - // Standard Error: 2_674 - .saturating_add(Weight::from_ref_time(1_506_755 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_031 nanoseconds. + Weight::from_ref_time(118_478_616 as u64) + // Standard Error: 1_533 + .saturating_add(Weight::from_ref_time(1_335_254 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - Weight::from_ref_time(69_386_000 as u64) - // Standard Error: 3_040 - .saturating_add(Weight::from_ref_time(1_509_928 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_910 nanoseconds. + Weight::from_ref_time(118_471_672 as u64) + // Standard Error: 1_275 + .saturating_add(Weight::from_ref_time(1_345_140 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - Weight::from_ref_time(69_432_000 as u64) - // Standard Error: 1_875 - .saturating_add(Weight::from_ref_time(1_439_336 as u64).saturating_mul(r as u64)) + // Minimum execution time: 119_504 nanoseconds. + Weight::from_ref_time(118_940_207 as u64) + // Standard Error: 2_512 + .saturating_add(Weight::from_ref_time(1_307_954 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - Weight::from_ref_time(69_488_000 as u64) - // Standard Error: 1_654 - .saturating_add(Weight::from_ref_time(1_429_318 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_009 nanoseconds. + Weight::from_ref_time(118_366_251 as u64) + // Standard Error: 1_116 + .saturating_add(Weight::from_ref_time(1_322_232 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - Weight::from_ref_time(69_414_000 as u64) - // Standard Error: 2_773 - .saturating_add(Weight::from_ref_time(1_499_116 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_020 nanoseconds. + Weight::from_ref_time(118_222_792 as u64) + // Standard Error: 1_973 + .saturating_add(Weight::from_ref_time(1_336_896 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - Weight::from_ref_time(69_427_000 as u64) - // Standard Error: 2_404 - .saturating_add(Weight::from_ref_time(1_969_697 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_038 nanoseconds. + Weight::from_ref_time(118_757_016 as u64) + // Standard Error: 7_932 + .saturating_add(Weight::from_ref_time(1_867_807 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - Weight::from_ref_time(70_793_000 as u64) - // Standard Error: 1_765 - .saturating_add(Weight::from_ref_time(1_924_169 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_041 nanoseconds. + Weight::from_ref_time(118_717_403 as u64) + // Standard Error: 8_010 + .saturating_add(Weight::from_ref_time(1_868_876 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - Weight::from_ref_time(69_551_000 as u64) - // Standard Error: 3_955 - .saturating_add(Weight::from_ref_time(1_988_825 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_057 nanoseconds. + Weight::from_ref_time(118_560_106 as u64) + // Standard Error: 1_362 + .saturating_add(Weight::from_ref_time(1_869_820 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - Weight::from_ref_time(69_510_000 as u64) - // Standard Error: 3_797 - .saturating_add(Weight::from_ref_time(1_976_317 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_045 nanoseconds. + Weight::from_ref_time(118_318_194 as u64) + // Standard Error: 179 + .saturating_add(Weight::from_ref_time(1_873_670 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - Weight::from_ref_time(69_399_000 as u64) - // Standard Error: 3_255 - .saturating_add(Weight::from_ref_time(1_975_054 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_000 nanoseconds. + Weight::from_ref_time(118_520_606 as u64) + // Standard Error: 3_656 + .saturating_add(Weight::from_ref_time(1_870_762 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - Weight::from_ref_time(69_425_000 as u64) - // Standard Error: 1_738 - .saturating_add(Weight::from_ref_time(1_959_864 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_035 nanoseconds. + Weight::from_ref_time(118_464_151 as u64) + // Standard Error: 1_073 + .saturating_add(Weight::from_ref_time(1_870_309 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - Weight::from_ref_time(69_514_000 as u64) - // Standard Error: 9_227 - .saturating_add(Weight::from_ref_time(1_984_750 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_071 nanoseconds. + Weight::from_ref_time(118_470_365 as u64) + // Standard Error: 1_224 + .saturating_add(Weight::from_ref_time(1_870_522 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - Weight::from_ref_time(69_415_000 as u64) - // Standard Error: 2_052 - .saturating_add(Weight::from_ref_time(1_967_146 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_032 nanoseconds. + Weight::from_ref_time(118_407_669 as u64) + // Standard Error: 769 + .saturating_add(Weight::from_ref_time(1_883_635 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - Weight::from_ref_time(69_348_000 as u64) - // Standard Error: 3_647 - .saturating_add(Weight::from_ref_time(1_996_141 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_044 nanoseconds. + Weight::from_ref_time(118_324_106 as u64) + // Standard Error: 523 + .saturating_add(Weight::from_ref_time(1_874_728 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - Weight::from_ref_time(69_448_000 as u64) - // Standard Error: 1_960 - .saturating_add(Weight::from_ref_time(1_956_889 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_111 nanoseconds. + Weight::from_ref_time(118_538_748 as u64) + // Standard Error: 3_498 + .saturating_add(Weight::from_ref_time(1_868_932 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - Weight::from_ref_time(69_443_000 as u64) - // Standard Error: 20_301 - .saturating_add(Weight::from_ref_time(2_072_285 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_060 nanoseconds. + Weight::from_ref_time(118_421_175 as u64) + // Standard Error: 962 + .saturating_add(Weight::from_ref_time(1_850_713 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - Weight::from_ref_time(69_458_000 as u64) - // Standard Error: 3_210 - .saturating_add(Weight::from_ref_time(1_970_367 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_050 nanoseconds. + Weight::from_ref_time(118_774_937 as u64) + // Standard Error: 1_717 + .saturating_add(Weight::from_ref_time(1_838_127 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - Weight::from_ref_time(69_429_000 as u64) - // Standard Error: 3_475 - .saturating_add(Weight::from_ref_time(1_977_800 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_997 nanoseconds. + Weight::from_ref_time(118_626_785 as u64) + // Standard Error: 1_382 + .saturating_add(Weight::from_ref_time(1_842_530 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - Weight::from_ref_time(69_414_000 as u64) - // Standard Error: 2_659 - .saturating_add(Weight::from_ref_time(2_797_503 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_064 nanoseconds. + Weight::from_ref_time(118_404_203 as u64) + // Standard Error: 2_437 + .saturating_add(Weight::from_ref_time(2_489_569 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - Weight::from_ref_time(69_460_000 as u64) - // Standard Error: 2_399 - .saturating_add(Weight::from_ref_time(2_646_574 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_949 nanoseconds. + Weight::from_ref_time(118_525_228 as u64) + // Standard Error: 2_231 + .saturating_add(Weight::from_ref_time(2_453_863 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - Weight::from_ref_time(69_441_000 as u64) - // Standard Error: 2_179 - .saturating_add(Weight::from_ref_time(2_860_283 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_032 nanoseconds. + Weight::from_ref_time(118_311_431 as u64) + // Standard Error: 175 + .saturating_add(Weight::from_ref_time(2_532_745 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - Weight::from_ref_time(69_401_000 as u64) - // Standard Error: 2_639 - .saturating_add(Weight::from_ref_time(2_582_137 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_987 nanoseconds. + Weight::from_ref_time(118_573_115 as u64) + // Standard Error: 1_814 + .saturating_add(Weight::from_ref_time(2_438_519 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - Weight::from_ref_time(69_426_000 as u64) - // Standard Error: 2_379 - .saturating_add(Weight::from_ref_time(1_986_578 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_001 nanoseconds. + Weight::from_ref_time(118_527_518 as u64) + // Standard Error: 1_040 + .saturating_add(Weight::from_ref_time(1_874_359 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - Weight::from_ref_time(69_599_000 as u64) - // Standard Error: 2_918 - .saturating_add(Weight::from_ref_time(1_991_834 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_051 nanoseconds. + Weight::from_ref_time(118_315_649 as u64) + // Standard Error: 163 + .saturating_add(Weight::from_ref_time(1_851_162 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - Weight::from_ref_time(71_142_000 as u64) - // Standard Error: 2_659 - .saturating_add(Weight::from_ref_time(1_948_956 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_038 nanoseconds. + Weight::from_ref_time(118_558_677 as u64) + // Standard Error: 2_258 + .saturating_add(Weight::from_ref_time(1_845_713 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - Weight::from_ref_time(69_407_000 as u64) - // Standard Error: 4_100 - .saturating_add(Weight::from_ref_time(2_030_715 as u64).saturating_mul(r as u64)) + // Minimum execution time: 117_985 nanoseconds. + Weight::from_ref_time(118_660_597 as u64) + // Standard Error: 5_893 + .saturating_add(Weight::from_ref_time(1_887_423 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - Weight::from_ref_time(69_534_000 as u64) - // Standard Error: 2_305 - .saturating_add(Weight::from_ref_time(1_989_346 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_101 nanoseconds. + Weight::from_ref_time(118_522_424 as u64) + // Standard Error: 1_094 + .saturating_add(Weight::from_ref_time(1_867_495 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - Weight::from_ref_time(69_432_000 as u64) - // Standard Error: 2_747 - .saturating_add(Weight::from_ref_time(2_002_548 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_068 nanoseconds. + Weight::from_ref_time(119_012_941 as u64) + // Standard Error: 9_129 + .saturating_add(Weight::from_ref_time(1_877_850 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - Weight::from_ref_time(69_465_000 as u64) - // Standard Error: 9_644 - .saturating_add(Weight::from_ref_time(2_045_830 as u64).saturating_mul(r as u64)) + // Minimum execution time: 120_105 nanoseconds. + Weight::from_ref_time(118_559_474 as u64) + // Standard Error: 2_055 + .saturating_add(Weight::from_ref_time(1_868_662 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - Weight::from_ref_time(69_435_000 as u64) - // Standard Error: 2_271 - .saturating_add(Weight::from_ref_time(2_001_986 as u64).saturating_mul(r as u64)) + // Minimum execution time: 118_004 nanoseconds. + Weight::from_ref_time(118_370_466 as u64) + // Standard Error: 818 + .saturating_add(Weight::from_ref_time(1_873_459 as u64).saturating_mul(r as u64)) } } From e8cbc1e87ed84b1abf3176326a90cff991534b30 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Sat, 29 Oct 2022 11:22:58 +0200 Subject: [PATCH 290/348] =?UTF-8?q?[Enhancement]=20Convert=20fast-unstake?= =?UTF-8?q?=20to=20use=20StakingInterface,=20decouplin=E2=80=A6=20(#12424)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [Enhancement] Convert fast-unstake to use StakingInterface, decoupling it from Staking * Update primitives/staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update primitives/staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix validator comment * remove todo * ideas from Kian (#12474) Co-authored-by: kianenigma * Rename StakingInterface -> Staking for nomination-pools * Staking fixes * StakingInterface changes * fix fast-unstake * fix nomination-pools * Fix fast-unstake tests * Fix benches for fast-unstake * fix is_unbonding * fix nomination pools * fix node code * add mock comments * fix imports * remove todo * more fixes * more fixes * bench fixes * more fixes * more fixes * import fix * more fixes * more bench fix * refix * refix * Update primitives/staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * is_unbonding returns a result * fix * review fixes * more review fixes * more fixes * more fixes * Update frame/fast-unstake/src/benchmarking.rs Co-authored-by: Squirrel * remove redundant CurrencyBalance from nom-pools * remove CB * rephrase * Apply suggestions from code review * Update frame/nomination-pools/src/tests.rs * finish damn renamed * clippy fix * fix Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: kianenigma Co-authored-by: parity-processbot <> Co-authored-by: Squirrel --- bin/node/runtime/src/lib.rs | 6 +- frame/fast-unstake/Cargo.toml | 13 +- frame/fast-unstake/src/benchmarking.rs | 40 ++--- frame/fast-unstake/src/lib.rs | 85 +++++------ frame/fast-unstake/src/mock.rs | 3 +- frame/fast-unstake/src/tests.rs | 14 +- frame/fast-unstake/src/types.rs | 7 +- .../nomination-pools/benchmarking/src/lib.rs | 54 ++++--- .../nomination-pools/benchmarking/src/mock.rs | 3 +- frame/nomination-pools/src/lib.rs | 79 ++++------ frame/nomination-pools/src/mock.rs | 93 ++++++++---- frame/nomination-pools/src/tests.rs | 29 ++-- .../nomination-pools/test-staking/src/mock.rs | 3 +- frame/staking/src/pallet/impls.rs | 107 +++++++++---- frame/staking/src/pallet/mod.rs | 2 +- primitives/staking/src/lib.rs | 141 ++++++++++++------ 16 files changed, 373 insertions(+), 306 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index a23fe64c90628..d49312bd6fe3f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -585,7 +585,8 @@ impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ControlOrigin = frame_system::EnsureRoot; type Deposit = ConstU128<{ DOLLARS }>; - type DepositCurrency = Balances; + type Currency = Balances; + type Staking = Staking; type WeightInfo = (); } @@ -773,11 +774,10 @@ impl pallet_nomination_pools::Config for Runtime { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type StakingInterface = pallet_staking::Pallet; + type Staking = Staking; type PostUnbondingPoolsWindow = PostUnbondPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index 69aeaff35993c..ea1abeb0b48c5 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -24,10 +24,6 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } sp-staking = { default-features = false, path = "../../primitives/staking" } - -pallet-balances = { default-features = false, path = "../balances" } -pallet-timestamp = { default-features = false, path = "../timestamp" } -pallet-staking = { default-features = false, path = "../staking" } frame-election-provider-support = { default-features = false, path = "../election-provider-support" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } @@ -37,6 +33,10 @@ pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } substrate-test-utils = { version = "4.0.0-dev", path = "../../test-utils" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } +pallet-staking = { path = "../staking" } +pallet-balances = { path = "../balances" } +pallet-timestamp = { path = "../timestamp" } + [features] default = ["std"] @@ -53,9 +53,6 @@ std = [ "sp-runtime/std", "sp-std/std", - "pallet-staking/std", - "pallet-balances/std", - "pallet-timestamp/std", "frame-election-provider-support/std", "frame-benchmarking/std", @@ -63,6 +60,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", + "sp-staking/runtime-benchmarks" ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 8770cc6b64c0d..762a59c96bcfa 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -26,22 +26,15 @@ use frame_support::{ traits::{Currency, EnsureOrigin, Get, Hooks}, }; use frame_system::RawOrigin; -use pallet_staking::Pallet as Staking; -use sp_runtime::traits::{StaticLookup, Zero}; -use sp_staking::EraIndex; +use sp_runtime::traits::Zero; +use sp_staking::{EraIndex, StakingInterface}; use sp_std::prelude::*; const USER_SEED: u32 = 0; const DEFAULT_BACKER_PER_VALIDATOR: u32 = 128; const MAX_VALIDATORS: u32 = 128; -type CurrencyOf = ::Currency; - -fn l( - who: T::AccountId, -) -> <::Lookup as StaticLookup>::Source { - T::Lookup::unlookup(who) -} +type CurrencyOf = ::Currency; fn create_unexposed_nominator() -> T::AccountId { let account = frame_benchmarking::account::("nominator_42", 0, USER_SEED); @@ -53,18 +46,9 @@ fn fund_and_bond_account(account: &T::AccountId) { let stake = CurrencyOf::::minimum_balance() * 100u32.into(); CurrencyOf::::make_free_balance_be(&account, stake * 10u32.into()); - let account_lookup = l::(account.clone()); // bond and nominate ourselves, this will guarantee that we are not backing anyone. - assert_ok!(Staking::::bond( - RawOrigin::Signed(account.clone()).into(), - account_lookup.clone(), - stake, - pallet_staking::RewardDestination::Controller, - )); - assert_ok!(Staking::::nominate( - RawOrigin::Signed(account.clone()).into(), - vec![account_lookup] - )); + assert_ok!(T::Staking::bond(account, stake, account)); + assert_ok!(T::Staking::nominate(account, vec![account.clone()])); } pub(crate) fn fast_unstake_events() -> Vec> { @@ -91,13 +75,11 @@ fn setup_staking(v: u32, until: EraIndex) { .map(|s| { let who = frame_benchmarking::account::("nominator", era, s); let value = ed; - pallet_staking::IndividualExposure { who, value } + (who, value) }) .collect::>(); - let exposure = - pallet_staking::Exposure { total: Default::default(), own: Default::default(), others }; validators.iter().for_each(|v| { - Staking::::add_era_stakers(era, v.clone(), exposure.clone()); + T::Staking::add_era_stakers(&era, &v, others.clone()); }); } } @@ -137,13 +119,13 @@ benchmarks! { // on_idle, when we check some number of eras, on_idle_check { // number of eras multiplied by validators in that era. - let x in (::BondingDuration::get() * 1) .. (::BondingDuration::get() * MAX_VALIDATORS); + let x in (T::Staking::bonding_duration() * 1) .. (T::Staking::bonding_duration() * MAX_VALIDATORS); - let v = x / ::BondingDuration::get(); - let u = ::BondingDuration::get(); + let u = T::Staking::bonding_duration(); + let v = x / u; ErasToCheckPerBlock::::put(u); - pallet_staking::CurrentEra::::put(u); + T::Staking::set_current_era(u); // setup staking with v validators and u eras of data (0..=u) setup_staking::(v, u); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 8fdb7a79dd537..0477bb251aa00 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -80,18 +80,16 @@ macro_rules! log { pub mod pallet { use super::*; use crate::types::*; - use frame_election_provider_support::ElectionProviderBase; use frame_support::{ pallet_prelude::*, traits::{Defensive, ReservableCurrency}, }; - use frame_system::{pallet_prelude::*, RawOrigin}; - use pallet_staking::Pallet as Staking; + use frame_system::pallet_prelude::*; use sp_runtime::{ traits::{Saturating, Zero}, DispatchResult, }; - use sp_staking::EraIndex; + use sp_staking::{EraIndex, StakingInterface}; use sp_std::{prelude::*, vec::Vec}; pub use weights::WeightInfo; @@ -101,7 +99,7 @@ pub mod pallet { pub struct MaxChecking(sp_std::marker::PhantomData); impl frame_support::traits::Get for MaxChecking { fn get() -> u32 { - ::BondingDuration::get() + 1 + T::Staking::bonding_duration() + 1 } } @@ -109,14 +107,14 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + pallet_staking::Config { + pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent> + TryInto>; /// The currency used for deposits. - type DepositCurrency: ReservableCurrency>; + type Currency: ReservableCurrency; /// Deposit to take for unstaking, to make sure we're able to slash the it in order to cover /// the costs of resources on unsuccessful unstake. @@ -125,6 +123,9 @@ pub mod pallet { /// The origin that can control this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; + /// The access to staking functionality. + type Staking: StakingInterface, AccountId = Self::AccountId>; + /// The weight information of this pallet. type WeightInfo: WeightInfo; } @@ -221,28 +222,25 @@ pub mod pallet { let ctrl = ensure_signed(origin)?; ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); - - let ledger = - pallet_staking::Ledger::::get(&ctrl).ok_or(Error::::NotController)?; - ensure!(!Queue::::contains_key(&ledger.stash), Error::::AlreadyQueued); + let stash_account = + T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; + ensure!(!Queue::::contains_key(&stash_account), Error::::AlreadyQueued); ensure!( - Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != ledger.stash), + Head::::get() + .map_or(true, |UnstakeRequest { stash, .. }| stash_account != stash), Error::::AlreadyHead ); - // second part of the && is defensive. - ensure!( - ledger.active == ledger.total && ledger.unlocking.is_empty(), - Error::::NotFullyBonded - ); + + ensure!(!T::Staking::is_unbonding(&stash_account)?, Error::::NotFullyBonded); // chill and fully unstake. - Staking::::chill(RawOrigin::Signed(ctrl.clone()).into())?; - Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; + T::Staking::chill(&stash_account)?; + T::Staking::fully_unbond(&stash_account)?; - T::DepositCurrency::reserve(&ledger.stash, T::Deposit::get())?; + T::Currency::reserve(&stash_account, T::Deposit::get())?; // enqueue them. - Queue::::insert(ledger.stash, T::Deposit::get()); + Queue::::insert(stash_account, T::Deposit::get()); Ok(()) } @@ -259,18 +257,18 @@ pub mod pallet { ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); - let stash = pallet_staking::Ledger::::get(&ctrl) - .map(|l| l.stash) - .ok_or(Error::::NotController)?; - ensure!(Queue::::contains_key(&stash), Error::::NotQueued); + let stash_account = + T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; + ensure!(Queue::::contains_key(&stash_account), Error::::NotQueued); ensure!( - Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != stash), + Head::::get() + .map_or(true, |UnstakeRequest { stash, .. }| stash_account != stash), Error::::AlreadyHead ); - let deposit = Queue::::take(stash.clone()); + let deposit = Queue::::take(stash_account.clone()); if let Some(deposit) = deposit.defensive() { - let remaining = T::DepositCurrency::unreserve(&stash, deposit); + let remaining = T::Currency::unreserve(&stash_account, deposit); if !remaining.is_zero() { frame_support::defensive!("`not enough balance to unreserve`"); ErasToCheckPerBlock::::put(0); @@ -314,7 +312,7 @@ pub mod pallet { // NOTE: here we're assuming that the number of validators has only ever increased, // meaning that the number of exposures to check is either this per era, or less. - let validator_count = pallet_staking::ValidatorCount::::get(); + let validator_count = T::Staking::desired_validator_count(); // determine the number of eras to check. This is based on both `ErasToCheckPerBlock` // and `remaining_weight` passed on to us from the runtime executive. @@ -330,8 +328,7 @@ pub mod pallet { } } - if <::ElectionProvider as ElectionProviderBase>::ongoing() - { + if T::Staking::election_ongoing() { // NOTE: we assume `ongoing` does not consume any weight. // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already @@ -366,8 +363,8 @@ pub mod pallet { ); // the range that we're allowed to check in this round. - let current_era = pallet_staking::CurrentEra::::get().unwrap_or_default(); - let bonding_duration = ::BondingDuration::get(); + let current_era = T::Staking::current_era(); + let bonding_duration = T::Staking::bonding_duration(); // prune all the old eras that we don't care about. This will help us keep the bound // of `checked`. checked.retain(|e| *e >= current_era.saturating_sub(bonding_duration)); @@ -401,16 +398,9 @@ pub mod pallet { ); if unchecked_eras_to_check.is_empty() { - // `stash` is not exposed in any era now -- we can let go of them now. - let num_slashing_spans = Staking::::slashing_spans(&stash).iter().count() as u32; + let result = T::Staking::force_unstake(stash.clone()); - let result = pallet_staking::Pallet::::force_unstake( - RawOrigin::Root.into(), - stash.clone(), - num_slashing_spans, - ); - - let remaining = T::DepositCurrency::unreserve(&stash, deposit); + let remaining = T::Currency::unreserve(&stash, deposit); if !remaining.is_zero() { frame_support::defensive!("`not enough balance to unreserve`"); ErasToCheckPerBlock::::put(0); @@ -426,7 +416,7 @@ pub mod pallet { let mut eras_checked = 0u32; let is_exposed = unchecked_eras_to_check.iter().any(|e| { eras_checked.saturating_inc(); - Self::is_exposed_in_era(&stash, e) + T::Staking::is_exposed_in_era(&stash, e) }); log!( @@ -442,7 +432,7 @@ pub mod pallet { // the last 28 eras, have registered yourself to be unstaked, midway being checked, // you are exposed. if is_exposed { - T::DepositCurrency::slash_reserved(&stash, deposit); + T::Currency::slash_reserved(&stash, deposit); log!(info, "slashed {:?} by {:?}", stash, deposit); Self::deposit_event(Event::::Slashed { stash, amount: deposit }); } else { @@ -472,12 +462,5 @@ pub mod pallet { ::WeightInfo::on_idle_check(validator_count * eras_checked) } } - - /// Checks whether an account `staker` has been exposed in an era. - fn is_exposed_in_era(staker: &T::AccountId, era: &EraIndex) -> bool { - pallet_staking::ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { - validator == *staker || exposures.others.iter().any(|i| i.who == *staker) - }) - } } } diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 71fc2d4ba905a..87e89d90d6304 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -174,7 +174,8 @@ parameter_types! { impl fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Deposit = DepositAmount; - type DepositCurrency = Balances; + type Currency = Balances; + type Staking = Staking; type ControlOrigin = frame_system::EnsureRoot; type WeightInfo = (); } diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 6e617fd992028..18a9d59e9da66 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -48,7 +48,7 @@ fn register_insufficient_funds_fails() { use pallet_balances::Error as BalancesError; ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - ::DepositCurrency::make_free_balance_be(&1, 3); + ::Currency::make_free_balance_be(&1, 3); // Controller account registers for fast unstake. assert_noop!( @@ -138,15 +138,15 @@ fn deregister_works() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1), 0); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(::Currency::reserved_balance(&1), DepositAmount::get()); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); - assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1), 0); // Ensure stash no longer exists in the queue. assert_eq!(Queue::::get(1), None); @@ -363,7 +363,7 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given - assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1), 0); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); @@ -371,7 +371,7 @@ mod on_idle { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); - assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(::Currency::reserved_balance(&1), DepositAmount::get()); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -411,7 +411,7 @@ mod on_idle { ); assert_eq!(Queue::::count(), 3); - assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!(::Currency::reserved_balance(&1), 0); assert_eq!( fast_unstake_events_since_last_call(), diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 08b9ab4326eb2..460c9fa4354e5 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,6 +17,7 @@ //! Types used in the Fast Unstake pallet. +use crate::Config; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ traits::{Currency, Get}, @@ -26,10 +27,8 @@ use scale_info::TypeInfo; use sp_staking::EraIndex; use sp_std::{fmt::Debug, prelude::*}; -pub type BalanceOf = <::Currency as Currency< - ::AccountId, ->>::Balance; - +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// An unstake request. #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, diff --git a/frame/nomination-pools/benchmarking/src/lib.rs b/frame/nomination-pools/benchmarking/src/lib.rs index c31bcb1546ecd..9b063539152b7 100644 --- a/frame/nomination-pools/benchmarking/src/lib.rs +++ b/frame/nomination-pools/benchmarking/src/lib.rs @@ -133,15 +133,15 @@ impl ListScenario { // Create accounts with the origin weight let (pool_creator1, pool_origin1) = create_pool_account::(USER_SEED + 1, origin_weight); - T::StakingInterface::nominate( - pool_origin1.clone(), + T::Staking::nominate( + &pool_origin1, // NOTE: these don't really need to be validators. vec![account("random_validator", 0, USER_SEED)], )?; let (_, pool_origin2) = create_pool_account::(USER_SEED + 2, origin_weight); - T::StakingInterface::nominate( - pool_origin2.clone(), + T::Staking::nominate( + &pool_origin2, vec![account("random_validator", 0, USER_SEED)].clone(), )?; @@ -156,10 +156,7 @@ impl ListScenario { // Create an account with the worst case destination weight let (_, pool_dest1) = create_pool_account::(USER_SEED + 3, dest_weight); - T::StakingInterface::nominate( - pool_dest1.clone(), - vec![account("random_validator", 0, USER_SEED)], - )?; + T::Staking::nominate(&pool_dest1, vec![account("random_validator", 0, USER_SEED)])?; let weight_of = pallet_staking::Pallet::::weight_of_fn(); assert_eq!(vote_to_balance::(weight_of(&pool_origin1)).unwrap(), origin_weight); @@ -185,12 +182,11 @@ impl ListScenario { self.origin1_member = Some(joiner.clone()); CurrencyOf::::make_free_balance_be(&joiner, amount * 2u32.into()); - let original_bonded = T::StakingInterface::active_stake(&self.origin1).unwrap(); + let original_bonded = T::Staking::active_stake(&self.origin1).unwrap(); // Unbond `amount` from the underlying pool account so when the member joins // we will maintain `current_bonded`. - T::StakingInterface::unbond(self.origin1.clone(), amount) - .expect("the pool was created in `Self::new`."); + T::Staking::unbond(&self.origin1, amount).expect("the pool was created in `Self::new`."); // Account pool points for the unbonded balance. BondedPools::::mutate(&1, |maybe_pool| { @@ -219,7 +215,7 @@ frame_benchmarking::benchmarks! { // setup the worst case list scenario. let scenario = ListScenario::::new(origin_weight, true)?; assert_eq!( - T::StakingInterface::active_stake(&scenario.origin1).unwrap(), + T::Staking::active_stake(&scenario.origin1).unwrap(), origin_weight ); @@ -234,7 +230,7 @@ frame_benchmarking::benchmarks! { verify { assert_eq!(CurrencyOf::::free_balance(&joiner), joiner_free - max_additional); assert_eq!( - T::StakingInterface::active_stake(&scenario.origin1).unwrap(), + T::Staking::active_stake(&scenario.origin1).unwrap(), scenario.dest_weight ); } @@ -249,7 +245,7 @@ frame_benchmarking::benchmarks! { }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::FreeBalance(extra)) verify { assert!( - T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= + T::Staking::active_stake(&scenario.origin1).unwrap() >= scenario.dest_weight ); } @@ -267,7 +263,7 @@ frame_benchmarking::benchmarks! { }: bond_extra(RuntimeOrigin::Signed(scenario.creator1.clone()), BondExtra::Rewards) verify { assert!( - T::StakingInterface::active_stake(&scenario.origin1).unwrap() >= + T::Staking::active_stake(&scenario.origin1).unwrap() >= scenario.dest_weight ); } @@ -314,7 +310,7 @@ frame_benchmarking::benchmarks! { whitelist_account!(member_id); }: _(RuntimeOrigin::Signed(member_id.clone()), member_id_lookup, all_points) verify { - let bonded_after = T::StakingInterface::active_stake(&scenario.origin1).unwrap(); + let bonded_after = T::Staking::active_stake(&scenario.origin1).unwrap(); // We at least went down to the destination bag assert!(bonded_after <= scenario.dest_weight); let member = PoolMembers::::get( @@ -323,7 +319,7 @@ frame_benchmarking::benchmarks! { .unwrap(); assert_eq!( member.unbonding_eras.keys().cloned().collect::>(), - vec![0 + T::StakingInterface::bonding_duration()] + vec![0 + T::Staking::bonding_duration()] ); assert_eq!( member.unbonding_eras.values().cloned().collect::>(), @@ -345,7 +341,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::StakingInterface::active_stake(&pool_account).unwrap(), + T::Staking::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); @@ -355,7 +351,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakingInterface::active_stake(&pool_account).unwrap(), + T::Staking::active_stake(&pool_account).unwrap(), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -388,7 +384,7 @@ frame_benchmarking::benchmarks! { // Sanity check join worked assert_eq!( - T::StakingInterface::active_stake(&pool_account).unwrap(), + T::Staking::active_stake(&pool_account).unwrap(), min_create_bond + min_join_bond ); assert_eq!(CurrencyOf::::free_balance(&joiner), min_join_bond); @@ -399,7 +395,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakingInterface::active_stake(&pool_account).unwrap(), + T::Staking::active_stake(&pool_account).unwrap(), min_create_bond ); assert_eq!(pallet_staking::Ledger::::get(&pool_account).unwrap().unlocking.len(), 1); @@ -446,7 +442,7 @@ frame_benchmarking::benchmarks! { // Sanity check that unbond worked assert_eq!( - T::StakingInterface::active_stake(&pool_account).unwrap(), + T::Staking::active_stake(&pool_account).unwrap(), Zero::zero() ); assert_eq!( @@ -525,8 +521,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::StakingInterface::active_stake(&Pools::::create_bonded_account(1)), - Some(min_create_bond) + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) ); } @@ -564,8 +560,8 @@ frame_benchmarking::benchmarks! { } ); assert_eq!( - T::StakingInterface::active_stake(&Pools::::create_bonded_account(1)), - Some(min_create_bond) + T::Staking::active_stake(&Pools::::create_bonded_account(1)), + Ok(min_create_bond) ); } @@ -647,13 +643,13 @@ frame_benchmarking::benchmarks! { .map(|i| account("stash", USER_SEED, i)) .collect(); - assert_ok!(Pools::::nominate(RuntimeOrigin::Signed(depositor.clone()).into(), 1, validators)); - assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_some()); + assert_ok!(T::Staking::nominate(&pool_account, validators)); + assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_some()); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1) verify { - assert!(T::StakingInterface::nominations(Pools::::create_bonded_account(1)).is_none()); + assert!(T::Staking::nominations(Pools::::create_bonded_account(1)).is_none()); } impl_benchmark_test_suite!( diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index 0f617f0bf6f4b..db01989f2b563 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -157,11 +157,10 @@ impl pallet_nomination_pools::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; - type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type StakingInterface = Staking; + type Staking = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 9e77adaeee677..5173b2e7e7d21 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -286,7 +286,7 @@ use sp_runtime::{ AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, Zero, }, - FixedPointNumber, FixedPointOperand, + FixedPointNumber, }; use sp_staking::{EraIndex, OnStakerSlash, StakingInterface}; use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, ops::Div, vec::Vec}; @@ -417,7 +417,7 @@ impl PoolMember { /// /// This is derived from the ratio of points in the pool to which the member belongs to. /// Might return different values based on the pool state for the same member and points. - fn active_balance(&self) -> BalanceOf { + fn active_stake(&self) -> BalanceOf { if let Some(pool) = BondedPool::::get(self.pool_id).defensive() { pool.points_to_balance(self.points) } else { @@ -623,7 +623,7 @@ impl BondedPool { /// This is often used for bonding and issuing new funds into the pool. fn balance_to_point(&self, new_funds: BalanceOf) -> BalanceOf { let bonded_balance = - T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); Pallet::::balance_to_point(bonded_balance, self.points, new_funds) } @@ -632,7 +632,7 @@ impl BondedPool { /// This is often used for unbonding. fn points_to_balance(&self, points: BalanceOf) -> BalanceOf { let bonded_balance = - T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); Pallet::::point_to_balance(bonded_balance, self.points, points) } @@ -683,7 +683,7 @@ impl BondedPool { fn transferrable_balance(&self) -> BalanceOf { let account = self.bonded_account(); T::Currency::free_balance(&account) - .saturating_sub(T::StakingInterface::active_stake(&account).unwrap_or_default()) + .saturating_sub(T::Staking::active_stake(&account).unwrap_or_default()) } fn is_root(&self, who: &T::AccountId) -> bool { @@ -738,7 +738,7 @@ impl BondedPool { ensure!(!self.is_destroying(), Error::::CanNotChangeState); let bonded_balance = - T::StakingInterface::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); + T::Staking::active_stake(&self.bonded_account()).unwrap_or(Zero::zero()); ensure!(!bonded_balance.is_zero(), Error::::OverflowRisk); let points_to_balance_ratio_floor = self @@ -791,7 +791,7 @@ impl BondedPool { target_member.active_points().saturating_sub(unbonding_points); let mut target_member_after_unbond = (*target_member).clone(); target_member_after_unbond.points = new_depositor_points; - target_member_after_unbond.active_balance() + target_member_after_unbond.active_stake() }; // any partial unbonding is only ever allowed if this unbond is permissioned. @@ -862,8 +862,8 @@ impl BondedPool { /// Bond exactly `amount` from `who`'s funds into this pool. /// - /// If the bond type is `Create`, `StakingInterface::bond` is called, and `who` - /// is allowed to be killed. Otherwise, `StakingInterface::bond_extra` is called and `who` + /// If the bond type is `Create`, `Staking::bond` is called, and `who` + /// is allowed to be killed. Otherwise, `Staking::bond_extra` is called and `who` /// cannot be killed. /// /// Returns `Ok(points_issues)`, `Err` otherwise. @@ -889,16 +889,11 @@ impl BondedPool { let points_issued = self.issue(amount); match ty { - BondType::Create => T::StakingInterface::bond( - bonded_account.clone(), - bonded_account, - amount, - self.reward_account(), - )?, + BondType::Create => T::Staking::bond(&bonded_account, amount, &self.reward_account())?, // The pool should always be created in such a way its in a state to bond extra, but if // the active balance is slashed below the minimum bonded or the account cannot be // found, we exit early. - BondType::Later => T::StakingInterface::bond_extra(bonded_account, amount)?, + BondType::Later => T::Staking::bond_extra(&bonded_account, amount)?, } Ok(points_issued) @@ -1129,7 +1124,7 @@ impl Get for TotalUnbondingPools { // NOTE: this may be dangerous in the scenario bonding_duration gets decreased because // we would no longer be able to decode `UnbondingPoolsWithEra`, which uses // `TotalUnbondingPools` as the bound - T::StakingInterface::bonding_duration() + T::PostUnbondingPoolsWindow::get() + T::Staking::bonding_duration() + T::PostUnbondingPoolsWindow::get() } } @@ -1138,7 +1133,6 @@ pub mod pallet { use super::*; use frame_support::traits::StorageVersion; use frame_system::{ensure_signed, pallet_prelude::*}; - use sp_runtime::traits::CheckedAdd; /// The current storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); @@ -1157,20 +1151,7 @@ pub mod pallet { type WeightInfo: weights::WeightInfo; /// The nominating balance. - type Currency: Currency; - - /// Sadly needed to bound it to `FixedPointOperand`. - // The only alternative is to sprinkle a `where BalanceOf: FixedPointOperand` in roughly - // a million places, so we prefer doing this. - type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned - + codec::FullCodec - + MaybeSerializeDeserialize - + sp_std::fmt::Debug - + Default - + FixedPointOperand - + CheckedAdd - + TypeInfo - + MaxEncodedLen; + type Currency: Currency; /// The type that is used for reward counter. /// @@ -1212,10 +1193,7 @@ pub mod pallet { type U256ToBalance: Convert>; /// The interface for nominating. - type StakingInterface: StakingInterface< - Balance = BalanceOf, - AccountId = Self::AccountId, - >; + type Staking: StakingInterface, AccountId = Self::AccountId>; /// The amount of eras a `SubPools::with_era` pool can exist before it gets merged into the /// `SubPools::no_era` pool. In other words, this is the amount of eras a member will be @@ -1650,12 +1628,12 @@ pub mod pallet { let _ = reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; let _ = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; - let current_era = T::StakingInterface::current_era(); - let unbond_era = T::StakingInterface::bonding_duration().saturating_add(current_era); + let current_era = T::Staking::current_era(); + let unbond_era = T::Staking::bonding_duration().saturating_add(current_era); // Unbond in the actual underlying nominator. let unbonding_balance = bonded_pool.dissolve(unbonding_points); - T::StakingInterface::unbond(bonded_pool.bonded_account(), unbonding_balance)?; + T::Staking::unbond(&bonded_pool.bonded_account(), unbonding_balance)?; // Note that we lazily create the unbonding pools here if they don't already exist let mut sub_pools = SubPoolsStorage::::get(member.pool_id) @@ -1718,7 +1696,7 @@ pub mod pallet { // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool // is destroying then `withdraw_unbonded` can be used. ensure!(pool.state != PoolState::Destroying, Error::::NotDestroying); - T::StakingInterface::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; + T::Staking::withdraw_unbonded(pool.bonded_account(), num_slashing_spans)?; Ok(()) } @@ -1754,7 +1732,7 @@ pub mod pallet { let member_account = T::Lookup::lookup(member_account)?; let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; - let current_era = T::StakingInterface::current_era(); + let current_era = T::Staking::current_era(); let bonded_pool = BondedPool::::get(member.pool_id) .defensive_ok_or::>(DefensiveError::PoolNotFound.into())?; @@ -1769,10 +1747,8 @@ pub mod pallet { // Before calculate the `balance_to_unbond`, with call withdraw unbonded to ensure the // `transferrable_balance` is correct. - let stash_killed = T::StakingInterface::withdraw_unbonded( - bonded_pool.bonded_account(), - num_slashing_spans, - )?; + let stash_killed = + T::Staking::withdraw_unbonded(bonded_pool.bonded_account(), num_slashing_spans)?; // defensive-only: the depositor puts enough funds into the stash so that it will only // be destroyed when they are leaving. @@ -1960,7 +1936,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); - T::StakingInterface::nominate(bonded_pool.bonded_account(), validators) + T::Staking::nominate(&bonded_pool.bonded_account(), validators) } /// Set a new state for the pool. @@ -2132,7 +2108,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); - T::StakingInterface::chill(bonded_pool.bonded_account()) + T::Staking::chill(&bonded_pool.bonded_account()) } } @@ -2149,7 +2125,7 @@ pub mod pallet { "Minimum points to balance ratio must be greater than 0" ); assert!( - T::StakingInterface::bonding_duration() < TotalUnbondingPools::::get(), + T::Staking::bonding_duration() < TotalUnbondingPools::::get(), "There must be more unbonding pools then the bonding duration / so a slash can be applied to relevant unboding pools. (We assume / the bonding duration > slash deffer duration.", @@ -2185,7 +2161,7 @@ impl Pallet { /// It is essentially `max { MinNominatorBond, MinCreateBond, MinJoinBond }`, where the former /// is coming from the staking pallet and the latter two are configured in this pallet. pub fn depositor_min_bond() -> BalanceOf { - T::StakingInterface::minimum_bond() + T::Staking::minimum_nominator_bond() .max(MinCreateBond::::get()) .max(MinJoinBond::::get()) .max(T::Currency::minimum_balance()) @@ -2212,7 +2188,7 @@ impl Pallet { debug_assert_eq!(frame_system::Pallet::::consumers(&reward_account), 0); debug_assert_eq!(frame_system::Pallet::::consumers(&bonded_account), 0); debug_assert_eq!( - T::StakingInterface::total_stake(&bonded_account).unwrap_or_default(), + T::Staking::total_stake(&bonded_account).unwrap_or_default(), Zero::zero() ); @@ -2487,8 +2463,7 @@ impl Pallet { let subs = SubPoolsStorage::::get(pool_id).unwrap_or_default(); let sum_unbonding_balance = subs.sum_unbonding_balance(); - let bonded_balance = - T::StakingInterface::active_stake(&pool_account).unwrap_or_default(); + let bonded_balance = T::Staking::active_stake(&pool_account).unwrap_or_default(); let total_balance = T::Currency::total_balance(&pool_account); assert!( diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index 1b3372dae56ee..e1af456e31fd4 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -3,6 +3,7 @@ use crate::{self as pools}; use frame_support::{assert_ok, parameter_types, PalletId}; use frame_system::RawOrigin; use sp_runtime::FixedU128; +use sp_staking::Stake; pub type BlockNumber = u64; pub type AccountId = u128; @@ -47,10 +48,17 @@ impl sp_staking::StakingInterface for StakingMock { type Balance = Balance; type AccountId = AccountId; - fn minimum_bond() -> Self::Balance { + fn minimum_nominator_bond() -> Self::Balance { + StakingMinBond::get() + } + fn minimum_validator_bond() -> Self::Balance { StakingMinBond::get() } + fn desired_validator_count() -> u32 { + unimplemented!("method currently not used in testing") + } + fn current_era() -> EraIndex { CurrentEra::get() } @@ -59,39 +67,24 @@ impl sp_staking::StakingInterface for StakingMock { BondingDuration::get() } - fn active_stake(who: &Self::AccountId) -> Option { - BondedBalanceMap::get().get(who).map(|v| *v) - } - - fn total_stake(who: &Self::AccountId) -> Option { - match ( - UnbondingBalanceMap::get().get(who).map(|v| *v), - BondedBalanceMap::get().get(who).map(|v| *v), - ) { - (None, None) => None, - (Some(v), None) | (None, Some(v)) => Some(v), - (Some(a), Some(b)) => Some(a + b), - } - } - - fn bond_extra(who: Self::AccountId, extra: Self::Balance) -> DispatchResult { + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); - x.get_mut(&who).map(|v| *v += extra); + x.get_mut(who).map(|v| *v += extra); BondedBalanceMap::set(&x); Ok(()) } - fn unbond(who: Self::AccountId, amount: Self::Balance) -> DispatchResult { + fn unbond(who: &Self::AccountId, amount: Self::Balance) -> DispatchResult { let mut x = BondedBalanceMap::get(); - *x.get_mut(&who).unwrap() = x.get_mut(&who).unwrap().saturating_sub(amount); + *x.get_mut(who).unwrap() = x.get_mut(who).unwrap().saturating_sub(amount); BondedBalanceMap::set(&x); let mut y = UnbondingBalanceMap::get(); - *y.entry(who).or_insert(Self::Balance::zero()) += amount; + *y.entry(*who).or_insert(Self::Balance::zero()) += amount; UnbondingBalanceMap::set(&y); Ok(()) } - fn chill(_: Self::AccountId) -> sp_runtime::DispatchResult { + fn chill(_: &Self::AccountId) -> sp_runtime::DispatchResult { Ok(()) } @@ -104,17 +97,12 @@ impl sp_staking::StakingInterface for StakingMock { Ok(UnbondingBalanceMap::get().is_empty() && BondedBalanceMap::get().is_empty()) } - fn bond( - stash: Self::AccountId, - _: Self::AccountId, - value: Self::Balance, - _: Self::AccountId, - ) -> DispatchResult { - StakingMock::set_bonded_balance(stash, value); + fn bond(stash: &Self::AccountId, value: Self::Balance, _: &Self::AccountId) -> DispatchResult { + StakingMock::set_bonded_balance(*stash, value); Ok(()) } - fn nominate(_: Self::AccountId, nominations: Vec) -> DispatchResult { + fn nominate(_: &Self::AccountId, nominations: Vec) -> DispatchResult { Nominations::set(&Some(nominations)); Ok(()) } @@ -123,6 +111,48 @@ impl sp_staking::StakingInterface for StakingMock { fn nominations(_: Self::AccountId) -> Option> { Nominations::get() } + + fn stash_by_ctrl(_controller: &Self::AccountId) -> Result { + unimplemented!("method currently not used in testing") + } + + fn stake(who: &Self::AccountId) -> Result, DispatchError> { + match ( + UnbondingBalanceMap::get().get(who).map(|v| *v), + BondedBalanceMap::get().get(who).map(|v| *v), + ) { + (None, None) => Err(DispatchError::Other("balance not found")), + (Some(v), None) => Ok(Stake { total: v, active: 0, stash: *who }), + (None, Some(v)) => Ok(Stake { total: v, active: v, stash: *who }), + (Some(a), Some(b)) => Ok(Stake { total: a + b, active: b, stash: *who }), + } + } + + fn election_ongoing() -> bool { + unimplemented!("method currently not used in testing") + } + + fn force_unstake(_who: Self::AccountId) -> sp_runtime::DispatchResult { + unimplemented!("method currently not used in testing") + } + + fn is_exposed_in_era(_who: &Self::AccountId, _era: &EraIndex) -> bool { + unimplemented!("method currently not used in testing") + } + + #[cfg(feature = "runtime-benchmarks")] + fn add_era_stakers( + _current_era: &EraIndex, + _stash: &Self::AccountId, + _exposures: Vec<(Self::AccountId, Self::Balance)>, + ) { + unimplemented!("method currently not used in testing") + } + + #[cfg(feature = "runtime-benchmarks")] + fn set_current_era(_era: EraIndex) { + unimplemented!("method currently not used in testing") + } } impl frame_system::Config for Runtime { @@ -192,11 +222,10 @@ impl pools::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; - type CurrencyBalance = Balance; type RewardCounter = RewardCounter; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type StakingInterface = StakingMock; + type Staking = StakingMock; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type PalletId = PoolsPalletId; type MaxMetadataLen = MaxMetadataLen; diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 5074a7ffa695a..0c909b68152bb 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -3083,18 +3083,18 @@ mod pool_withdraw_unbonded { fn pool_withdraw_unbonded_works() { ExtBuilder::default().build_and_execute(|| { // Given 10 unbond'ed directly against the pool account - assert_ok!(StakingMock::unbond(default_bonded_account(), 5)); + assert_ok!(StakingMock::unbond(&default_bonded_account(), 5)); // and the pool account only has 10 balance - assert_eq!(StakingMock::active_stake(&default_bonded_account()), Some(5)); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(10)); + assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(5)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(10)); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); // When assert_ok!(Pools::pool_withdraw_unbonded(RuntimeOrigin::signed(10), 1, 0)); // Then there unbonding balance is no longer locked - assert_eq!(StakingMock::active_stake(&default_bonded_account()), Some(5)); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(5)); + assert_eq!(StakingMock::active_stake(&default_bonded_account()), Ok(5)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(5)); assert_eq!(Balances::free_balance(&default_bonded_account()), 10); }); } @@ -3270,7 +3270,7 @@ mod withdraw_unbonded { // current bond is 600, we slash it all to 300. StakingMock::set_bonded_balance(default_bonded_account(), 300); Balances::make_free_balance_be(&default_bonded_account(), 300); - assert_eq!(StakingMock::total_stake(&default_bonded_account()), Some(300)); + assert_eq!(StakingMock::total_stake(&default_bonded_account()), Ok(300)); assert_ok!(fully_unbond_permissioned(40)); assert_ok!(fully_unbond_permissioned(550)); @@ -4074,12 +4074,15 @@ mod create { assert!(!BondedPools::::contains_key(2)); assert!(!RewardPools::::contains_key(2)); assert!(!PoolMembers::::contains_key(11)); - assert_eq!(StakingMock::active_stake(&next_pool_stash), None); + assert_err!( + StakingMock::active_stake(&next_pool_stash), + DispatchError::Other("balance not found") + ); - Balances::make_free_balance_be(&11, StakingMock::minimum_bond() + ed); + Balances::make_free_balance_be(&11, StakingMock::minimum_nominator_bond() + ed); assert_ok!(Pools::create( RuntimeOrigin::signed(11), - StakingMock::minimum_bond(), + StakingMock::minimum_nominator_bond(), 123, 456, 789 @@ -4090,7 +4093,7 @@ mod create { PoolMembers::::get(11).unwrap(), PoolMember { pool_id: 2, - points: StakingMock::minimum_bond(), + points: StakingMock::minimum_nominator_bond(), ..Default::default() } ); @@ -4099,7 +4102,7 @@ mod create { BondedPool { id: 2, inner: BondedPoolInner { - points: StakingMock::minimum_bond(), + points: StakingMock::minimum_nominator_bond(), member_counter: 1, state: PoolState::Open, roles: PoolRoles { @@ -4113,7 +4116,7 @@ mod create { ); assert_eq!( StakingMock::active_stake(&next_pool_stash).unwrap(), - StakingMock::minimum_bond() + StakingMock::minimum_nominator_bond() ); assert_eq!( RewardPools::::get(2).unwrap(), @@ -4142,7 +4145,7 @@ mod create { // Given assert_eq!(MinCreateBond::::get(), 2); - assert_eq!(StakingMock::minimum_bond(), 10); + assert_eq!(StakingMock::minimum_nominator_bond(), 10); // Then assert_noop!( diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 8c04e40d71df4..5758b884e348d 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -171,11 +171,10 @@ impl pallet_nomination_pools::Config for Runtime { type RuntimeEvent = RuntimeEvent; type WeightInfo = (); type Currency = Balances; - type CurrencyBalance = Balance; type RewardCounter = FixedU128; type BalanceToU256 = BalanceToU256; type U256ToBalance = U256ToBalance; - type StakingInterface = Staking; + type Staking = Staking; type PostUnbondingPoolsWindow = PostUnbondingPoolsWindow; type MaxMetadataLen = ConstU32<256>; type MaxUnbonding = ConstU32<8>; diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 4ef064229693c..a9e9899b9761a 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -18,8 +18,8 @@ //! Implementations for the Staking FRAME Pallet. use frame_election_provider_support::{ - data_provider, ElectionDataProvider, ElectionProvider, ScoreProvider, SortedListProvider, - Supports, VoteWeight, VoterOf, + data_provider, ElectionDataProvider, ElectionProvider, ElectionProviderBase, ScoreProvider, + SortedListProvider, Supports, VoteWeight, VoterOf, }; use frame_support::{ dispatch::WithPostDispatchInfo, @@ -38,7 +38,7 @@ use sp_runtime::{ }; use sp_staking::{ offence::{DisableStrategy, OffenceDetails, OnOffenceHandler}, - EraIndex, SessionIndex, StakingInterface, + EraIndex, SessionIndex, Stake, StakingInterface, }; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -1482,14 +1482,44 @@ impl SortedListProvider for UseNominatorsAndValidatorsM } } +// NOTE: in this entire impl block, the assumption is that `who` is a stash account. impl StakingInterface for Pallet { type AccountId = T::AccountId; type Balance = BalanceOf; - fn minimum_bond() -> Self::Balance { + fn minimum_nominator_bond() -> Self::Balance { MinNominatorBond::::get() } + fn minimum_validator_bond() -> Self::Balance { + MinValidatorBond::::get() + } + + fn desired_validator_count() -> u32 { + ValidatorCount::::get() + } + + fn election_ongoing() -> bool { + ::ongoing() + } + + fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { + let num_slashing_spans = Self::slashing_spans(&who).iter().count() as u32; + Self::force_unstake(RawOrigin::Root.into(), who.clone(), num_slashing_spans) + } + + fn stash_by_ctrl(controller: &Self::AccountId) -> Result { + Self::ledger(controller) + .map(|l| l.stash) + .ok_or(Error::::NotController.into()) + } + + fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { + validator == *who || exposures.others.iter().any(|i| i.who == *who) + }) + } + fn bonding_duration() -> EraIndex { T::BondingDuration::get() } @@ -1498,58 +1528,81 @@ impl StakingInterface for Pallet { Self::current_era().unwrap_or(Zero::zero()) } - fn active_stake(controller: &Self::AccountId) -> Option { - Self::ledger(controller).map(|l| l.active) - } - - fn total_stake(controller: &Self::AccountId) -> Option { - Self::ledger(controller).map(|l| l.total) + fn stake(who: &Self::AccountId) -> Result, DispatchError> { + Self::bonded(who) + .and_then(|c| Self::ledger(c)) + .map(|l| Stake { stash: l.stash, total: l.total, active: l.active }) + .ok_or(Error::::NotStash.into()) } - fn bond_extra(stash: Self::AccountId, extra: Self::Balance) -> DispatchResult { - Self::bond_extra(RawOrigin::Signed(stash).into(), extra) + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult { + Self::bond_extra(RawOrigin::Signed(who.clone()).into(), extra) } - fn unbond(controller: Self::AccountId, value: Self::Balance) -> DispatchResult { - Self::unbond(RawOrigin::Signed(controller).into(), value) + fn unbond(who: &Self::AccountId, value: Self::Balance) -> DispatchResult { + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + Self::unbond(RawOrigin::Signed(ctrl).into(), value) } - fn chill(controller: Self::AccountId) -> DispatchResult { - Self::chill(RawOrigin::Signed(controller).into()) + fn chill(who: &Self::AccountId) -> DispatchResult { + // defensive-only: any account bonded via this interface has the stash set as the + // controller, but we have to be sure. Same comment anywhere else that we read this. + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + Self::chill(RawOrigin::Signed(ctrl).into()) } fn withdraw_unbonded( - controller: Self::AccountId, + who: Self::AccountId, num_slashing_spans: u32, ) -> Result { - Self::withdraw_unbonded(RawOrigin::Signed(controller.clone()).into(), num_slashing_spans) - .map(|_| !Ledger::::contains_key(&controller)) + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; + Self::withdraw_unbonded(RawOrigin::Signed(ctrl.clone()).into(), num_slashing_spans) + .map(|_| !Ledger::::contains_key(&ctrl)) .map_err(|with_post| with_post.error) } fn bond( - stash: Self::AccountId, - controller: Self::AccountId, + who: &Self::AccountId, value: Self::Balance, - payee: Self::AccountId, + payee: &Self::AccountId, ) -> DispatchResult { Self::bond( - RawOrigin::Signed(stash).into(), - T::Lookup::unlookup(controller), + RawOrigin::Signed(who.clone()).into(), + T::Lookup::unlookup(who.clone()), value, - RewardDestination::Account(payee), + RewardDestination::Account(payee.clone()), ) } - fn nominate(controller: Self::AccountId, targets: Vec) -> DispatchResult { + fn nominate(who: &Self::AccountId, targets: Vec) -> DispatchResult { + let ctrl = Self::bonded(who).ok_or(Error::::NotStash)?; let targets = targets.into_iter().map(T::Lookup::unlookup).collect::>(); - Self::nominate(RawOrigin::Signed(controller).into(), targets) + Self::nominate(RawOrigin::Signed(ctrl).into(), targets) } #[cfg(feature = "runtime-benchmarks")] fn nominations(who: Self::AccountId) -> Option> { Nominators::::get(who).map(|n| n.targets.into_inner()) } + + #[cfg(feature = "runtime-benchmarks")] + fn add_era_stakers( + current_era: &EraIndex, + stash: &T::AccountId, + exposures: Vec<(Self::AccountId, Self::Balance)>, + ) { + let others = exposures + .iter() + .map(|(who, value)| IndividualExposure { who: who.clone(), value: value.clone() }) + .collect::>(); + let exposure = Exposure { total: Default::default(), own: Default::default(), others }; + Self::add_era_stakers(current_era.clone(), stash.clone(), exposure) + } + + #[cfg(feature = "runtime-benchmarks")] + fn set_current_era(era: EraIndex) { + CurrentEra::::put(era); + } } #[cfg(any(test, feature = "try-runtime"))] diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 560c3b6ed830c..df90873ab2e59 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -262,7 +262,7 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// The ideal number of staking participants. + /// The ideal number of active validators. #[pallet::storage] #[pallet::getter(fn validator_count)] pub type ValidatorCount = StorageValue<_, u32, ValueQuery>; diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 5a3e97b4d5274..703f0abe80458 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -19,8 +19,9 @@ //! A crate which contains primitives that are useful for implementation that uses staking //! approaches in general. Definitions related to sessions, slashing, etc go here. + use sp_runtime::{DispatchError, DispatchResult}; -use sp_std::collections::btree_map::BTreeMap; +use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; pub mod offence; @@ -54,25 +55,55 @@ impl OnStakerSlash for () { } } -/// Trait for communication with the staking pallet. +/// A struct that reflects stake that an account has in the staking system. Provides a set of +/// methods to operate on it's properties. Aimed at making `StakingInterface` more concise. +pub struct Stake { + /// The stash account whose balance is actually locked and at stake. + pub stash: T::AccountId, + /// The total stake that `stash` has in the staking system. This includes the + /// `active` stake, and any funds currently in the process of unbonding via + /// [`StakingInterface::unbond`]. + /// + /// # Note + /// + /// This is only guaranteed to reflect the amount locked by the staking system. If there are + /// non-staking locks on the bonded pair's balance this amount is going to be larger in + /// reality. + pub total: T::Balance, + /// The total amount of the stash's balance that will be at stake in any forthcoming + /// rounds. + pub active: T::Balance, +} + +/// A generic representation of a staking implementation. +/// +/// This interface uses the terminology of NPoS, but it is aims to be generic enough to cover other +/// implementations as well. pub trait StakingInterface { /// Balance type used by the staking system. - type Balance; + type Balance: PartialEq; /// AccountId type used by the staking system type AccountId; - /// The minimum amount required to bond in order to be a nominator. This does not necessarily - /// mean the nomination will be counted in an election, but instead just enough to be stored as - /// a nominator. In other words, this is the minimum amount to register the intention to - /// nominate. - fn minimum_bond() -> Self::Balance; + /// The minimum amount required to bond in order to set nomination intentions. This does not + /// necessarily mean the nomination will be counted in an election, but instead just enough to + /// be stored as a nominator. In other words, this is the minimum amount to register the + /// intention to nominate. + fn minimum_nominator_bond() -> Self::Balance; - /// Number of eras that staked funds must remain bonded for. + /// The minimum amount required to bond in order to set validation intentions. + fn minimum_validator_bond() -> Self::Balance; + + /// Return a stash account that is controlled by a `controller`. /// - /// # Note + /// ## Note /// - /// This must be strictly greater than the staking systems slash deffer duration. + /// The controller abstraction is not permanent and might go away. Avoid using this as much as + /// possible. + fn stash_by_ctrl(controller: &Self::AccountId) -> Result; + + /// Number of eras that staked funds must remain bonded for. fn bonding_duration() -> EraIndex; /// The current era index. @@ -80,41 +111,39 @@ pub trait StakingInterface { /// This should be the latest planned era that the staking system knows about. fn current_era() -> EraIndex; - /// The amount of active stake that `stash` has in the staking system. - fn active_stake(stash: &Self::AccountId) -> Option; + /// Returns the stake of `who`. + fn stake(who: &Self::AccountId) -> Result, DispatchError>; - /// The total stake that `stash` has in the staking system. This includes the - /// [`Self::active_stake`], and any funds currently in the process of unbonding via - /// [`Self::unbond`]. - /// - /// # Note - /// - /// This is only guaranteed to reflect the amount locked by the staking system. If there are - /// non-staking locks on the bonded pair's balance this may not be accurate. - fn total_stake(stash: &Self::AccountId) -> Option; + fn total_stake(who: &Self::AccountId) -> Result { + Self::stake(who).map(|s| s.total) + } - /// Bond (lock) `value` of `stash`'s balance. `controller` will be set as the account - /// controlling `stash`. This creates what is referred to as "bonded pair". - fn bond( - stash: Self::AccountId, - controller: Self::AccountId, - value: Self::Balance, - payee: Self::AccountId, - ) -> DispatchResult; - - /// Have `controller` nominate `validators`. - fn nominate( - controller: Self::AccountId, - validators: sp_std::vec::Vec, - ) -> DispatchResult; - - /// Chill `stash`. - fn chill(controller: Self::AccountId) -> DispatchResult; - - /// Bond some extra amount in the _Stash_'s free balance against the active bonded balance of - /// the account. The amount extra actually bonded will never be more than the _Stash_'s free + fn active_stake(who: &Self::AccountId) -> Result { + Self::stake(who).map(|s| s.active) + } + + fn is_unbonding(who: &Self::AccountId) -> Result { + Self::stake(who).map(|s| s.active != s.total) + } + + fn fully_unbond(who: &Self::AccountId) -> DispatchResult { + Self::unbond(who, Self::stake(who)?.active) + } + + /// Bond (lock) `value` of `who`'s balance, while forwarding any rewards to `payee`. + fn bond(who: &Self::AccountId, value: Self::Balance, payee: &Self::AccountId) + -> DispatchResult; + + /// Have `who` nominate `validators`. + fn nominate(who: &Self::AccountId, validators: Vec) -> DispatchResult; + + /// Chill `who`. + fn chill(who: &Self::AccountId) -> DispatchResult; + + /// Bond some extra amount in `who`'s free balance against the active bonded balance of + /// the account. The amount extra actually bonded will never be more than `who`'s free /// balance. - fn bond_extra(stash: Self::AccountId, extra: Self::Balance) -> DispatchResult; + fn bond_extra(who: &Self::AccountId, extra: Self::Balance) -> DispatchResult; /// Schedule a portion of the active bonded balance to be unlocked at era /// [Self::current_era] + [`Self::bonding_duration`]. @@ -125,7 +154,7 @@ pub trait StakingInterface { /// The amount of times this can be successfully called is limited based on how many distinct /// eras funds are schedule to unlock in. Calling [`Self::withdraw_unbonded`] after some unlock /// schedules have reached their unlocking era should allow more calls to this function. - fn unbond(stash: Self::AccountId, value: Self::Balance) -> DispatchResult; + fn unbond(stash: &Self::AccountId, value: Self::Balance) -> DispatchResult; /// Unlock any funds schedule to unlock before or at the current era. /// @@ -135,7 +164,29 @@ pub trait StakingInterface { num_slashing_spans: u32, ) -> Result; + /// The ideal number of active validators. + fn desired_validator_count() -> u32; + + /// Whether or not there is an ongoing election. + fn election_ongoing() -> bool; + + /// Force a current staker to become completely unstaked, immediately. + fn force_unstake(who: Self::AccountId) -> DispatchResult; + + /// Checks whether an account `staker` has been exposed in an era. + fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool; + /// Get the nominations of a stash, if they are a nominator, `None` otherwise. #[cfg(feature = "runtime-benchmarks")] - fn nominations(who: Self::AccountId) -> Option>; + fn nominations(who: Self::AccountId) -> Option>; + + #[cfg(feature = "runtime-benchmarks")] + fn add_era_stakers( + current_era: &EraIndex, + stash: &Self::AccountId, + exposures: Vec<(Self::AccountId, Self::Balance)>, + ); + + #[cfg(feature = "runtime-benchmarks")] + fn set_current_era(era: EraIndex); } From 21a7f8ee05017c8ae895fb722df92d985ed6fd21 Mon Sep 17 00:00:00 2001 From: Doordashcon Date: Sat, 29 Oct 2022 15:57:16 +0100 Subject: [PATCH 291/348] nomination-pools: allow pool-ids to be reused (#12407) * initial * logic check * do_create * cargo fmt * fixes * ensure_signed * update * cargo fmt * remove unused import --- frame/nomination-pools/src/lib.rs | 159 +++++++++++++++++----------- frame/nomination-pools/src/tests.rs | 38 +++++++ 2 files changed, 137 insertions(+), 60 deletions(-) diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 5173b2e7e7d21..3661a7f70b48a 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -28,7 +28,7 @@ //! //! ## Key terms //! -//! * pool id: A unique identifier of each pool. Set to u12 +//! * pool id: A unique identifier of each pool. Set to u32. //! * bonded pool: Tracks the distribution of actively staked funds. See [`BondedPool`] and //! [`BondedPoolInner`]. //! * reward pool: Tracks rewards earned by actively staked funds. See [`RewardPool`] and @@ -47,7 +47,7 @@ //! exactly the same rules and conditions as a normal staker. Its bond increases or decreases as //! members join, it can `nominate` or `chill`, and might not even earn staking rewards if it is //! not nominating proper validators. -//! * reward account: A similar key-less account, that is set as the `Payee` account fo the bonded +//! * reward account: A similar key-less account, that is set as the `Payee` account for the bonded //! account for all staking rewards. //! //! ## Usage @@ -1429,6 +1429,10 @@ pub mod pallet { Defensive(DefensiveError), /// Partial unbonding now allowed permissionlessly. PartialUnbondNotAllowedPermissionlessly, + /// Pool id currently in use. + PoolIdInUse, + /// Pool id provided is not correct/usable. + InvalidPoolId, } #[derive(Encode, Decode, PartialEq, TypeInfo, frame_support::PalletError, RuntimeDebug)] @@ -1850,73 +1854,38 @@ pub mod pallet { nominator: AccountIdLookupOf, state_toggler: AccountIdLookupOf, ) -> DispatchResult { - let who = ensure_signed(origin)?; - let root = T::Lookup::lookup(root)?; - let nominator = T::Lookup::lookup(nominator)?; - let state_toggler = T::Lookup::lookup(state_toggler)?; - - ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); - ensure!( - MaxPools::::get() - .map_or(true, |max_pools| BondedPools::::count() < max_pools), - Error::::MaxPools - ); - ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); + let depositor = ensure_signed(origin)?; let pool_id = LastPoolId::::try_mutate::<_, Error, _>(|id| { *id = id.checked_add(1).ok_or(Error::::OverflowRisk)?; Ok(*id) })?; - let mut bonded_pool = BondedPool::::new( - pool_id, - PoolRoles { - root: Some(root), - nominator: Some(nominator), - state_toggler: Some(state_toggler), - depositor: who.clone(), - }, - ); - - bonded_pool.try_inc_members()?; - let points = bonded_pool.try_bond_funds(&who, amount, BondType::Create)?; - T::Currency::transfer( - &who, - &bonded_pool.reward_account(), - T::Currency::minimum_balance(), - ExistenceRequirement::AllowDeath, - )?; + Self::do_create(depositor, amount, root, nominator, state_toggler, pool_id) + } - PoolMembers::::insert( - who.clone(), - PoolMember:: { - pool_id, - points, - last_recorded_reward_counter: Zero::zero(), - unbonding_eras: Default::default(), - }, - ); - RewardPools::::insert( - pool_id, - RewardPool:: { - last_recorded_reward_counter: Zero::zero(), - last_recorded_total_payouts: Zero::zero(), - total_rewards_claimed: Zero::zero(), - }, - ); - ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); + /// Create a new delegation pool with a previously used pool id + /// + /// # Arguments + /// + /// same as `create` with the inclusion of + /// * `pool_id` - `A valid PoolId. + #[pallet::weight(T::WeightInfo::create())] + #[transactional] + pub fn create_with_pool_id( + origin: OriginFor, + #[pallet::compact] amount: BalanceOf, + root: AccountIdLookupOf, + nominator: AccountIdLookupOf, + state_toggler: AccountIdLookupOf, + pool_id: PoolId, + ) -> DispatchResult { + let depositor = ensure_signed(origin)?; - Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id }); + ensure!(!BondedPools::::contains_key(pool_id), Error::::PoolIdInUse); + ensure!(pool_id < LastPoolId::::get(), Error::::InvalidPoolId); - Self::deposit_event(Event::::Bonded { - member: who, - pool_id, - bonded: amount, - joined: true, - }); - bonded_pool.put(); - - Ok(()) + Self::do_create(depositor, amount, root, nominator, state_toggler, pool_id) } /// Nominate on behalf of the pool. @@ -2340,6 +2309,76 @@ impl Pallet { Ok(pending_rewards) } + fn do_create( + who: T::AccountId, + amount: BalanceOf, + root: AccountIdLookupOf, + nominator: AccountIdLookupOf, + state_toggler: AccountIdLookupOf, + pool_id: PoolId, + ) -> DispatchResult { + let root = T::Lookup::lookup(root)?; + let nominator = T::Lookup::lookup(nominator)?; + let state_toggler = T::Lookup::lookup(state_toggler)?; + + ensure!(amount >= Pallet::::depositor_min_bond(), Error::::MinimumBondNotMet); + ensure!( + MaxPools::::get().map_or(true, |max_pools| BondedPools::::count() < max_pools), + Error::::MaxPools + ); + ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); + let mut bonded_pool = BondedPool::::new( + pool_id, + PoolRoles { + root: Some(root), + nominator: Some(nominator), + state_toggler: Some(state_toggler), + depositor: who.clone(), + }, + ); + + bonded_pool.try_inc_members()?; + let points = bonded_pool.try_bond_funds(&who, amount, BondType::Create)?; + + T::Currency::transfer( + &who, + &bonded_pool.reward_account(), + T::Currency::minimum_balance(), + ExistenceRequirement::AllowDeath, + )?; + + PoolMembers::::insert( + who.clone(), + PoolMember:: { + pool_id, + points, + last_recorded_reward_counter: Zero::zero(), + unbonding_eras: Default::default(), + }, + ); + RewardPools::::insert( + pool_id, + RewardPool:: { + last_recorded_reward_counter: Zero::zero(), + last_recorded_total_payouts: Zero::zero(), + total_rewards_claimed: Zero::zero(), + }, + ); + ReversePoolIdLookup::::insert(bonded_pool.bonded_account(), pool_id); + + Self::deposit_event(Event::::Created { depositor: who.clone(), pool_id }); + + Self::deposit_event(Event::::Bonded { + member: who, + pool_id, + bonded: amount, + joined: true, + }); + bonded_pool.put(); + + Ok(()) + } + /// Ensure the correctness of the state of this pallet. /// /// This should be valid before or after each state transition of this pallet. diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 0c909b68152bb..17fee7cc8dda3 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -4201,6 +4201,44 @@ mod create { ); }); } + + #[test] + fn create_with_pool_id_works() { + ExtBuilder::default().build_and_execute(|| { + let ed = Balances::minimum_balance(); + + Balances::make_free_balance_be(&11, StakingMock::minimum_bond() + ed); + assert_ok!(Pools::create( + RuntimeOrigin::signed(11), + StakingMock::minimum_bond(), + 123, + 456, + 789 + )); + + assert_eq!(Balances::free_balance(&11), 0); + // delete the initial pool created, then pool_Id `1` will be free + + assert_noop!( + Pools::create_with_pool_id(RuntimeOrigin::signed(12), 20, 234, 654, 783, 1), + Error::::PoolIdInUse + ); + + assert_noop!( + Pools::create_with_pool_id(RuntimeOrigin::signed(12), 20, 234, 654, 783, 3), + Error::::InvalidPoolId + ); + + // start dismantling the pool. + assert_ok!(Pools::set_state(RuntimeOrigin::signed(902), 1, PoolState::Destroying)); + assert_ok!(fully_unbond_permissioned(10)); + + CurrentEra::set(3); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(10), 10, 10)); + + assert_ok!(Pools::create_with_pool_id(RuntimeOrigin::signed(10), 20, 234, 654, 783, 1)); + }); + } } mod nominate { From 0e33b4e9a3deb0a3637f0c618c6c8ac24f36c0d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sun, 30 Oct 2022 11:09:47 +0100 Subject: [PATCH 292/348] WIP: Replace `wasm-gc` with `wasm-opt` (#12280) * Use wasm-opt on runtime * Optimize for size * Simplify fn compact_wasm_file * Run a lighter pass for non production builds * Disable optimizations and keep name section * Update wasm-opt * Remove dward sections * Update wasm-opt * Update wasm-opt --- Cargo.lock | 164 +++++++++++++++++++------ utils/wasm-builder/Cargo.toml | 2 +- utils/wasm-builder/src/wasm_project.rs | 55 ++++----- 3 files changed, 150 insertions(+), 71 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cbf02a3fa6a50..b87f45508dfc9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,9 +93,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.38" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "approx" @@ -836,9 +836,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.71" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" dependencies = [ "jobserver", ] @@ -1030,6 +1030,16 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + [[package]] name = "comfy-table" version = "6.0.0" @@ -1489,6 +1499,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.13.0" @@ -3904,6 +3958,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" version = "0.5.4" @@ -6451,15 +6514,6 @@ dependencies = [ "synstructure", ] -[[package]] -name = "parity-wasm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" -dependencies = [ - "byteorder", -] - [[package]] name = "parity-wasm" version = "0.45.0" @@ -7237,9 +7291,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.5" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -7258,9 +7312,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "region" @@ -8074,7 +8128,7 @@ dependencies = [ "log", "once_cell", "parity-scale-codec", - "parity-wasm 0.45.0", + "parity-wasm", "paste 1.0.6", "rustix", "sc-allocator", @@ -8924,6 +8978,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + [[package]] name = "sct" version = "0.7.0" @@ -10095,7 +10155,7 @@ version = "5.0.0" dependencies = [ "impl-serde", "parity-scale-codec", - "parity-wasm 0.45.0", + "parity-wasm", "scale-info", "serde", "sp-core-hashing-proc-macro", @@ -10534,7 +10594,7 @@ dependencies = [ "tempfile", "toml", "walkdir", - "wasm-gc-api", + "wasm-opt", ] [[package]] @@ -10633,18 +10693,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -11092,7 +11152,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if 0.1.10", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -11368,23 +11428,53 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" [[package]] -name = "wasm-gc-api" -version = "0.1.11" +name = "wasm-instrument" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" +checksum = "aa1dafb3e60065305741e83db35c6c2584bb3725b692b5b66148a38d72ace6cd" dependencies = [ - "log", - "parity-wasm 0.32.0", - "rustc-demangle", + "parity-wasm", ] [[package]] -name = "wasm-instrument" -version = "0.3.0" +name = "wasm-opt" +version = "0.110.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa1dafb3e60065305741e83db35c6c2584bb3725b692b5b66148a38d72ace6cd" +checksum = "b68e8037b4daf711393f4be2056246d12d975651b14d581520ad5d1f19219cec" dependencies = [ - "parity-wasm 0.45.0", + "anyhow", + "libc", + "strum", + "strum_macros", + "tempfile", + "thiserror", + "wasm-opt-cxx-sys", + "wasm-opt-sys", +] + +[[package]] +name = "wasm-opt-cxx-sys" +version = "0.110.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91adbad477e97bba3fbd21dd7bfb594e7ad5ceb9169ab1c93ab9cb0ada636b6f" +dependencies = [ + "anyhow", + "cxx", + "cxx-build", + "wasm-opt-sys", +] + +[[package]] +name = "wasm-opt-sys" +version = "0.110.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec4fa5a322a4e6ac22fd141f498d56afbdbf9df5debeac32380d2dcaa3e06941" +dependencies = [ + "anyhow", + "cc", + "cxx", + "cxx-build", + "regex", ] [[package]] @@ -11621,7 +11711,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc13b3c219ca9aafeec59150d80d89851df02e0061bc357b4d66fc55a8d38787" dependencies = [ - "parity-wasm 0.45.0", + "parity-wasm", "wasmi-validation", "wasmi_core", ] @@ -11632,7 +11722,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ff416ad1ff0c42e5a926ed5d5fab74c0f098749aa0ad8b2a34b982ce0e867b" dependencies = [ - "parity-wasm 0.45.0", + "parity-wasm", ] [[package]] diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index ac0ba5dbdb85a..46c5929969fbb 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -20,6 +20,6 @@ strum = { version = "0.24.1", features = ["derive"] } tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.2" -wasm-gc-api = "0.1.11" sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/maybe-compressed-blob" } filetime = "0.2.16" +wasm-opt = "0.110" \ No newline at end of file diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 197c1d1b220bb..07219676413fc 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -656,50 +656,39 @@ fn compact_wasm_file( project: &Path, profile: Profile, cargo_manifest: &Path, - wasm_binary_name: Option, + out_name: Option, ) -> (Option, Option, WasmBinaryBloaty) { - let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); - let wasm_file = project + let default_out_name = get_wasm_binary_name(cargo_manifest); + let out_name = out_name.unwrap_or_else(|| default_out_name.clone()); + let in_path = project .join("target/wasm32-unknown-unknown") .join(profile.directory()) - .join(format!("{}.wasm", default_wasm_binary_name)); - - let wasm_compact_file = if profile.wants_compact() { - let wasm_compact_file = project.join(format!( - "{}.compact.wasm", - wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), - )); - wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) + .join(format!("{}.wasm", default_out_name)); + + let (wasm_compact_path, wasm_compact_compressed_path) = if profile.wants_compact() { + let wasm_compact_path = project.join(format!("{}.compact.wasm", out_name,)); + wasm_opt::OptimizationOptions::new_opt_level_0() + .mvp_features_only() + .debug_info(true) + .add_pass(wasm_opt::Pass::StripDwarf) + .run(&in_path, &wasm_compact_path) .expect("Failed to compact generated WASM binary."); - Some(WasmBinary(wasm_compact_file)) - } else { - None - }; - - let wasm_compact_compressed_file = wasm_compact_file.as_ref().and_then(|compact_binary| { - let file_name = - wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()); - let wasm_compact_compressed_file = - project.join(format!("{}.compact.compressed.wasm", file_name)); - - if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { - Some(WasmBinary(wasm_compact_compressed_file)) + let wasm_compact_compressed_path = + project.join(format!("{}.compact.compressed.wasm", out_name)); + if compress_wasm(&wasm_compact_path, &wasm_compact_compressed_path) { + (Some(WasmBinary(wasm_compact_path)), Some(WasmBinary(wasm_compact_compressed_path))) } else { - None + (Some(WasmBinary(wasm_compact_path)), None) } - }); - - let bloaty_file_name = if let Some(name) = wasm_binary_name { - format!("{}.wasm", name) } else { - format!("{}.wasm", default_wasm_binary_name) + (None, None) }; - let bloaty_file = project.join(bloaty_file_name); - fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); + let bloaty_path = project.join(format!("{}.wasm", out_name)); + fs::copy(in_path, &bloaty_path).expect("Copying the bloaty file to the project dir."); - (wasm_compact_file, wasm_compact_compressed_file, WasmBinaryBloaty(bloaty_file)) + (wasm_compact_path, wasm_compact_compressed_path, WasmBinaryBloaty(bloaty_path)) } fn compress_wasm(wasm_binary_path: &Path, compressed_binary_out_path: &Path) -> bool { From 5fbd2a333491f34cbb5e77ba1135293b9ff58eac Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Sun, 30 Oct 2022 17:00:26 +0100 Subject: [PATCH 293/348] Use minimum_nominator_bond instead of nominator_bond (#12585) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/nomination-pools/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index 17fee7cc8dda3..aadd4871d737e 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -4207,10 +4207,10 @@ mod create { ExtBuilder::default().build_and_execute(|| { let ed = Balances::minimum_balance(); - Balances::make_free_balance_be(&11, StakingMock::minimum_bond() + ed); + Balances::make_free_balance_be(&11, StakingMock::minimum_nominator_bond() + ed); assert_ok!(Pools::create( RuntimeOrigin::signed(11), - StakingMock::minimum_bond(), + StakingMock::minimum_nominator_bond(), 123, 456, 789 From 5ae005c244295d23586c93da43148b2bc826b137 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Sun, 30 Oct 2022 18:58:29 +0100 Subject: [PATCH 294/348] BlockId removal: refactor: Backend::append_justification (#12551) * BlockId removal: refactor: Backend::append_justification It changes the arguments of `Backend::append_justification` from: block: `BlockId` to: hash: `&Block::Hash` This PR is part of `BlockId::Number` refactoring analysis (paritytech/substrate#11292) * Error message improved Co-authored-by: Adrian Catangiu * single error message in beefy::finalize * println removed Co-authored-by: Adrian Catangiu --- client/api/src/backend.rs | 5 ++--- client/api/src/in_mem.rs | 20 ++++++++++---------- client/beefy/src/worker.rs | 32 +++++++++++++++++++------------- client/db/src/lib.rs | 15 +++++++-------- 4 files changed, 38 insertions(+), 34 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 77a9da3535655..df9c92b626ecc 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -27,7 +27,6 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use sp_core::offchain::OffchainStorage; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, Justification, Justifications, StateVersion, Storage, }; @@ -485,12 +484,12 @@ pub trait Backend: AuxStore + Send + Sync { justification: Option, ) -> sp_blockchain::Result<()>; - /// Append justification to the block with the given Id. + /// Append justification to the block with the given `hash`. /// /// This should only be called for blocks that are already finalized. fn append_justification( &self, - block: BlockId, + hash: &Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()>; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 29f7eade9fa6f..4028b99d6fb37 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -295,10 +295,9 @@ impl Blockchain { fn append_justification( &self, - id: BlockId, + hash: &Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { - let hash = self.expect_block_hash_from_id(&id)?; let mut storage = self.storage.write(); let block = storage @@ -745,10 +744,10 @@ where fn append_justification( &self, - block: BlockId, + hash: &Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { - self.blockchain.append_justification(block, justification) + self.blockchain.append_justification(hash, justification) } fn blockchain(&self) -> &Self::Blockchain { @@ -865,26 +864,27 @@ mod tests { fn append_and_retrieve_justifications() { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - let block = BlockId::Hash(last_finalized); - blockchain.append_justification(block, (ID2, vec![4])).unwrap(); + blockchain.append_justification(&last_finalized, (ID2, vec![4])).unwrap(); let justifications = { let mut just = Justifications::from((ID1, vec![3])); just.append((ID2, vec![4])); just }; - assert_eq!(blockchain.justifications(block).unwrap(), Some(justifications)); + assert_eq!( + blockchain.justifications(BlockId::Hash(last_finalized)).unwrap(), + Some(justifications) + ); } #[test] fn store_duplicate_justifications_is_forbidden() { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - let block = BlockId::Hash(last_finalized); - blockchain.append_justification(block, (ID2, vec![0])).unwrap(); + blockchain.append_justification(&last_finalized, (ID2, vec![0])).unwrap(); assert!(matches!( - blockchain.append_justification(block, (ID2, vec![1])), + blockchain.append_justification(&last_finalized, (ID2, vec![1])), Err(sp_blockchain::Error::BadJustification(_)), )); } diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 9b331b73ed093..305b92d696975 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -500,20 +500,22 @@ where self.on_demand_justifications.cancel_requests_older_than(block_num); - if let Err(e) = self.backend.append_justification( - BlockId::Number(block_num), - (BEEFY_ENGINE_ID, finality_proof.encode()), - ) { + if let Err(e) = self + .backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(block_num)) + .and_then(|hash| { + self.links + .to_rpc_best_block_sender + .notify(|| Ok::<_, ()>(hash)) + .expect("forwards closure result; the closure always returns Ok; qed."); + + self.backend + .append_justification(&hash, (BEEFY_ENGINE_ID, finality_proof.encode())) + }) { error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } - self.backend.blockchain().hash(block_num).ok().flatten().map(|hash| { - self.links - .to_rpc_best_block_sender - .notify(|| Ok::<_, ()>(hash)) - .expect("forwards closure result; the closure always returns Ok; qed.") - }); - self.links .to_rpc_justif_sender .notify(|| Ok::<_, ()>(finality_proof)) @@ -1546,8 +1548,10 @@ pub(crate) mod tests { commitment, signatures: vec![None], }); + let hashof10 = + backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); backend - .append_justification(BlockId::Number(10), (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(&hashof10, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 10 @@ -1580,8 +1584,10 @@ pub(crate) mod tests { commitment, signatures: vec![None], }); + let hashof12 = + backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); backend - .append_justification(BlockId::Number(12), (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(&hashof12, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 12 diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e212b382716f1..4a7159208c2c8 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2013,12 +2013,11 @@ impl sc_client_api::backend::Backend for Backend { fn append_justification( &self, - block: BlockId, + hash: &Block::Hash, justification: Justification, ) -> ClientResult<()> { let mut transaction: Transaction = Transaction::new(); - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - let header = self.blockchain.expect_header(block)?; + let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; let number = *header.number(); // Check if the block is finalized first. @@ -2027,13 +2026,13 @@ impl sc_client_api::backend::Backend for Backend { // We can do a quick check first, before doing a proper but more expensive check if number > self.blockchain.info().finalized_number || - (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + (*hash != last_finalized && !is_descendent_of(hash, &last_finalized)?) { return Err(ClientError::NotInFinalizedChain) } let justifications = if let Some(mut stored_justifications) = - self.blockchain.justifications(block)? + self.blockchain.justifications(BlockId::Hash(*hash))? { if !stored_justifications.append(justification) { return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) @@ -2045,7 +2044,7 @@ impl sc_client_api::backend::Backend for Backend { transaction.set_from_vec( columns::JUSTIFICATIONS, - &utils::number_and_hash_to_lookup_key(number, hash)?, + &utils::number_and_hash_to_lookup_key(number, *hash)?, justifications.encode(), ); @@ -3029,11 +3028,11 @@ pub(crate) mod tests { backend.finalize_block(&block1, Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); - backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); + backend.append_justification(&block1, just1.clone()).unwrap(); let just2 = (CONS1_ENGINE_ID, vec![6, 7]); assert!(matches!( - backend.append_justification(BlockId::Number(1), just2), + backend.append_justification(&block1, just2), Err(ClientError::BadJustification(_)) )); From 70351393fd632317124f35ab8b24ef7134e08864 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Mon, 31 Oct 2022 12:20:29 +0100 Subject: [PATCH 295/348] fix some typos (#12584) --- frame/conviction-voting/src/lib.rs | 12 ++++++------ frame/conviction-voting/src/vote.rs | 2 +- frame/ranked-collective/src/lib.rs | 10 +++++----- frame/referenda/src/lib.rs | 6 +++--- frame/referenda/src/types.rs | 4 ++-- frame/whitelist/src/lib.rs | 4 ++-- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index b876a9354ee59..3ecc6e56be94e 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -121,8 +121,8 @@ pub mod pallet { /// The maximum number of concurrent votes an account may have. /// - /// Also used to compute weight, an overly large value can - /// lead to extrinsic with large weight estimation: see `delegate` for instance. + /// Also used to compute weight, an overly large value can lead to extrinsics with large + /// weight estimation: see `delegate` for instance. #[pallet::constant] type MaxVotes: Get; @@ -261,7 +261,7 @@ pub mod pallet { /// Undelegate the voting power of the sending account for a particular class of polls. /// /// Tokens may be unlocked following once an amount of time consistent with the lock period - /// of the conviction with which the delegation was issued. + /// of the conviction with which the delegation was issued has passed. /// /// The dispatch origin of this call must be _Signed_ and the signing account must be /// currently delegating. @@ -284,7 +284,7 @@ pub mod pallet { Ok(Some(T::WeightInfo::undelegate(votes)).into()) } - /// Remove the lock caused prior voting/delegating which has expired within a particluar + /// Remove the lock caused by prior voting/delegating which has expired within a particular /// class. /// /// The dispatch origin of this call must be _Signed_. @@ -475,7 +475,7 @@ impl, I: 'static> Pallet { }) } - /// Return the number of votes for `who` + /// Return the number of votes for `who`. fn increase_upstream_delegation( who: &T::AccountId, class: &ClassOf, @@ -503,7 +503,7 @@ impl, I: 'static> Pallet { }) } - /// Return the number of votes for `who` + /// Return the number of votes for `who`. fn reduce_upstream_delegation( who: &T::AccountId, class: &ClassOf, diff --git a/frame/conviction-voting/src/vote.rs b/frame/conviction-voting/src/vote.rs index a0b17ab4de39d..a8e012b6c97a1 100644 --- a/frame/conviction-voting/src/vote.rs +++ b/frame/conviction-voting/src/vote.rs @@ -233,7 +233,7 @@ where AsMut::>::as_mut(self).rejig(now); } - /// The amount of this account's balance that much currently be locked due to voting. + /// The amount of this account's balance that must currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { Voting::Casting(Casting { votes, prior, .. }) => diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index 111c5f70efdfa..33aed2704918c 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -21,7 +21,7 @@ //! systems such as the Referenda pallet. Members each have a rank, with zero being the lowest. //! There is no complexity limitation on either the number of members at a rank or the number of //! ranks in the system thus allowing potentially public membership. A member of at least a given -//! rank can be selected at random in O(1) time, allowing for various games to constructed using +//! rank can be selected at random in O(1) time, allowing for various games to be constructed using //! this as a primitive. Members may only be promoted and demoted by one rank at a time, however //! all operations (save one) are O(1) in complexity. The only operation which is not O(1) is the //! `remove_member` since they must be removed from all ranks from the present down to zero. @@ -33,7 +33,7 @@ //! //! Two `Config` trait items control these "rank privileges": `MinRankOfClass` and `VoteWeight`. //! The first controls which ranks are allowed to vote on a particular class of poll. The second -//! controls the weight of a vote given the voters rank compared to the minimum rank of the poll. +//! controls the weight of a vote given the voter's rank compared to the minimum rank of the poll. //! //! An origin control, `EnsureRank`, ensures that the origin is a member of the collective of at //! least a particular rank. @@ -310,8 +310,8 @@ impl, I: 'static, const MIN_RANK: u16> EnsureOrigin(PhantomData<(T, I)>); impl, I: 'static, const MIN_RANK: u16> EnsureOrigin for EnsureRankedMember @@ -430,7 +430,7 @@ pub mod pallet { pub enum Event, I: 'static = ()> { /// A member `who` has been added. MemberAdded { who: T::AccountId }, - /// The member `who`'s rank has been changed to the given `rank`. + /// The member `who`se rank has been changed to the given `rank`. RankChanged { who: T::AccountId, rank: Rank }, /// The member `who` of given `rank` has been removed from the collective. MemberRemoved { who: T::AccountId, rank: Rank }, diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 1bdc19d49c414..d060c3db3fa70 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -249,7 +249,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { - /// A referendum has being submitted. + /// A referendum has been submitted. Submitted { /// Index of the referendum. index: ReferendumIndex, @@ -352,7 +352,7 @@ pub mod pallet { HasDeposit, /// The track identifier given was invalid. BadTrack, - /// There are already a full complement of referendums in progress for this track. + /// There are already a full complement of referenda in progress for this track. Full, /// The queue of the track is empty. QueueEmpty, @@ -908,7 +908,7 @@ impl, I: 'static> Pallet { /// /// In terms of storage, every call to it is expected to access: /// - The scheduler, either to insert, remove or alter an entry; - /// - `TrackQueue`, which should be a `BoundedVec` with a low limit (8-16). + /// - `TrackQueue`, which should be a `BoundedVec` with a low limit (8-16); /// - `DecidingCount`. /// /// Both of the two storage items will only have as many items as there are different tracks, diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index 2ce93cb6adc3c..48db0847edf2e 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -178,7 +178,7 @@ pub struct ReferendumStatus< pub(crate) proposal: Call, /// The time the proposal should be scheduled for enactment. pub(crate) enactment: DispatchTime, - /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if it + /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if /// `deciding` is `None`. pub(crate) submitted: Moment, /// The deposit reserved for the submission of this referendum. @@ -224,7 +224,7 @@ pub enum ReferendumInfo< Approved(Moment, Deposit, Option>), /// Referendum finished with rejection. Submission deposit is held. Rejected(Moment, Deposit, Option>), - /// Referendum finished with cancelation. Submission deposit is held. + /// Referendum finished with cancellation. Submission deposit is held. Cancelled(Moment, Deposit, Option>), /// Referendum finished and was never decided. Submission deposit is held. TimedOut(Moment, Deposit, Option>), diff --git a/frame/whitelist/src/lib.rs b/frame/whitelist/src/lib.rs index 55d5c42b8f4bc..be5fdf9e472b3 100644 --- a/frame/whitelist/src/lib.rs +++ b/frame/whitelist/src/lib.rs @@ -26,8 +26,8 @@ //! and allow another configurable origin: [`Config::DispatchWhitelistedOrigin`] to dispatch them //! with the root origin. //! -//! In the meantime the call corresponding to the hash must have been submitted to the to the -//! pre-image handler [`PreimageProvider`]. +//! In the meantime the call corresponding to the hash must have been submitted to the pre-image +//! handler [`PreimageProvider`]. #![cfg_attr(not(feature = "std"), no_std)] From 07a6332886f3bd030a49586ba5f825fc3a4d1851 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 31 Oct 2022 12:43:36 +0100 Subject: [PATCH 296/348] Added test for Client::block (#12590) --- client/service/test/src/client/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 9937c436a33ff..329bffae03e7b 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1128,6 +1128,14 @@ fn finality_notifications_content() { assert!(finality_notifications.try_next().is_err()); } +#[test] +fn get_block_by_bad_block_hash_returns_none() { + let client = substrate_test_runtime_client::new(); + + let hash = H256::from_low_u64_be(5); + assert!(client.block(&BlockId::Hash(hash)).unwrap().is_none()); +} + #[test] fn get_header_by_block_number_doesnt_panic() { let client = substrate_test_runtime_client::new(); From 0d64ba4268106fffe430d41b541c1aeedd4f8da5 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 31 Oct 2022 17:41:01 +0200 Subject: [PATCH 297/348] client/beefy: fix incorrect BEEFY justifications import test (#12593) Signed-off-by: Adrian Catangiu --- client/beefy/src/tests.rs | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index f0647d7b142e6..b0a1433fafec6 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -755,10 +755,16 @@ fn beefy_importing_blocks() { block_on(block_import.import_block(params(block, None), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); - // Verify no justifications present: + // Verify no BEEFY justifications present: { // none in backend, - assert!(full_client.justifications(&block_id).unwrap().is_none()); + assert_eq!( + full_client + .justifications(&block_id) + .unwrap() + .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), + None + ); // and none sent to BEEFY worker. block_on(poll_fn(move |cx| { assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); @@ -787,11 +793,18 @@ fn beefy_importing_blocks() { ..Default::default() }), ); - // Verify justification successfully imported: + // Verify BEEFY justification successfully imported: { - // available in backend, - assert!(full_client.justifications(&BlockId::Number(block_num)).unwrap().is_some()); - // and also sent to BEEFY worker. + // still not in backend (worker is responsible for appending to backend), + assert_eq!( + full_client + .justifications(&block_id) + .unwrap() + .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), + None + ); + // but sent to BEEFY worker + // (worker will append it to backend when all previous mandatory justifs are there as well). block_on(poll_fn(move |cx| { match justif_recv.poll_next_unpin(cx) { Poll::Ready(Some(_justification)) => (), @@ -823,10 +836,16 @@ fn beefy_importing_blocks() { ..Default::default() }), ); - // Verify bad justifications was not imported: + // Verify bad BEEFY justifications was not imported: { // none in backend, - assert!(full_client.justifications(&block_id).unwrap().is_none()); + assert_eq!( + full_client + .justifications(&BlockId::Number(block_num)) + .unwrap() + .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), + None + ); // and none sent to BEEFY worker. block_on(poll_fn(move |cx| { assert_eq!(justif_recv.poll_next_unpin(cx), Poll::Pending); From ea71267aeb2b627d7a65a16ea6c12d1923d5564a Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 1 Nov 2022 17:24:09 +0100 Subject: [PATCH 298/348] BlockId removal: refactor: Backend::body (#12587) It changes the arguments of `Backend::body` method from: `BlockId` to: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) Co-authored-by: parity-processbot <> --- bin/node/inspect/src/lib.rs | 5 +- client/api/src/client.rs | 2 +- client/api/src/in_mem.rs | 15 +++-- client/db/src/lib.rs | 60 +++++++++++-------- .../network/sync/src/block_request_handler.rs | 2 +- client/network/test/src/lib.rs | 2 +- client/service/src/client/client.rs | 22 ++++--- client/service/test/src/client/mod.rs | 11 ++-- client/tracing/src/block/mod.rs | 2 +- client/transaction-pool/benches/basics.rs | 2 +- client/transaction-pool/src/api.rs | 4 +- client/transaction-pool/src/graph/pool.rs | 4 +- client/transaction-pool/src/lib.rs | 18 +++--- client/transaction-pool/src/tests.rs | 2 +- primitives/blockchain/src/backend.rs | 2 +- .../runtime/transaction-pool/src/lib.rs | 14 ++--- 16 files changed, 93 insertions(+), 74 deletions(-) diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index aacae0ff7a0d9..1e44cec914995 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -140,10 +140,11 @@ impl> Inspector BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?, BlockAddress::Number(number) => { let id = BlockId::number(number); + let hash = self.chain.expect_block_hash_from_id(&id)?; let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&id)? + .block_body(&hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; @@ -154,7 +155,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&id)? + .block_body(&hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; diff --git a/client/api/src/client.rs b/client/api/src/client.rs index b809e0ee61032..670c1711db948 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -110,7 +110,7 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - id: &BlockId, + hash: &Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>>; /// Get all indexed transactions for a block, diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 4028b99d6fb37..7b0e6f7b72575 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -405,15 +405,14 @@ impl HeaderMetadata for Blockchain { impl blockchain::Backend for Blockchain { fn body( &self, - id: BlockId, + hash: &Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { - Ok(self.id(id).and_then(|hash| { - self.storage - .read() - .blocks - .get(&hash) - .and_then(|b| b.extrinsics().map(|x| x.to_vec())) - })) + Ok(self + .storage + .read() + .blocks + .get(hash) + .and_then(|b| b.extrinsics().map(|x| x.to_vec()))) } fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 4a7159208c2c8..9d9a8d268dd81 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -577,8 +577,10 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } impl sc_client_api::blockchain::Backend for BlockchainDb { - fn body(&self, id: BlockId) -> ClientResult>> { - if let Some(body) = read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + fn body(&self, hash: &Block::Hash) -> ClientResult>> { + if let Some(body) = + read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(*hash))? + { // Plain body match Decode::decode(&mut &body[..]) { Ok(body) => return Ok(Some(body)), @@ -590,7 +592,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb(*hash), + )? { match Vec::>::decode(&mut &index[..]) { Ok(index) => { let mut body = Vec::new(); @@ -3201,11 +3208,11 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); - assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); - assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + assert_eq!(None, bc.body(&blocks[0]).unwrap()); + assert_eq!(None, bc.body(&blocks[1]).unwrap()); + assert_eq!(None, bc.body(&blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); } #[test] @@ -3236,11 +3243,11 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); } #[test] @@ -3291,7 +3298,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); @@ -3300,13 +3307,13 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - assert_eq!(Some(vec![0.into()]), bc.body(BlockId::hash(blocks[0])).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(BlockId::hash(blocks[1])).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(blocks[2])).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(BlockId::hash(fork_hash_root)).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); assert_eq!(bc.info().best_number, 4); for i in 0..5 { assert!(bc.hash(i).unwrap().is_some()); @@ -3367,11 +3374,11 @@ pub(crate) mod tests { } let bc = backend.blockchain(); - assert_eq!(None, bc.body(BlockId::hash(blocks[0])).unwrap()); - assert_eq!(None, bc.body(BlockId::hash(blocks[1])).unwrap()); - assert_eq!(None, bc.body(BlockId::hash(blocks[2])).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(BlockId::hash(blocks[3])).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(BlockId::hash(blocks[4])).unwrap()); + assert_eq!(None, bc.body(&blocks[0]).unwrap()); + assert_eq!(None, bc.body(&blocks[1]).unwrap()); + assert_eq!(None, bc.body(&blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); } #[test] @@ -3408,11 +3415,12 @@ pub(crate) mod tests { assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[1..]); assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]); + let hashof0 = bc.info().genesis_hash; // Push one more blocks and make sure block is pruned and transaction index is cleared. let block1 = insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); backend.finalize_block(&block1, None).unwrap(); - assert_eq!(bc.body(BlockId::Number(0)).unwrap(), None); + assert_eq!(bc.body(&hashof0).unwrap(), None); assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None); assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); } diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index a6b39591e7945..dbde5fc5d8c22 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -364,7 +364,7 @@ where }; let body = if get_body { - match self.client.block_body(&BlockId::Hash(hash))? { + match self.client.block_body(&hash)? { Some(mut extrinsics) => extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index c9b99fbc6af10..a6d73b53efdf0 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -542,7 +542,7 @@ where pub fn has_body(&self, hash: &H256) -> bool { self.backend .as_ref() - .map(|backend| backend.blockchain().body(BlockId::hash(*hash)).unwrap().is_some()) + .map(|backend| backend.blockchain().body(hash).unwrap().is_some()) .unwrap_or(false) } } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index eb946436671e4..b5ab06a0318c5 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1053,9 +1053,9 @@ where /// Get block body by id. pub fn body( &self, - id: &BlockId, + hash: &Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { - self.backend.blockchain().body(*id) + self.backend.blockchain().body(hash) } /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. @@ -1939,16 +1939,22 @@ where { fn block_body( &self, - id: &BlockId, + hash: &Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { - self.body(id) + self.body(hash) } fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { - Ok(match (self.header(id)?, self.body(id)?, self.justifications(id)?) { - (Some(header), Some(extrinsics), justifications) => - Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), - _ => None, + Ok(match self.header(id)? { + Some(header) => { + let hash = header.hash(); + match (self.body(&hash)?, self.justifications(id)?) { + (Some(extrinsics), justifications) => + Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), + _ => None, + } + }, + None => None, }) } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 329bffae03e7b..b9aec421802c9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -396,16 +396,19 @@ fn block_builder_does_not_include_invalid() { let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); - let hash0 = client + let hashof0 = client .expect_block_hash_from_id(&BlockId::Number(0)) .expect("block 0 was just imported. qed"); - let hash1 = client + let hashof1 = client .expect_block_hash_from_id(&BlockId::Number(1)) .expect("block 1 was just imported. qed"); assert_eq!(client.chain_info().best_number, 1); - assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); - assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) + assert_ne!( + client.state_at(&hashof1).unwrap().pairs(), + client.state_at(&hashof0).unwrap().pairs() + ); + assert_eq!(client.body(&hashof1).unwrap().unwrap().len(), 1) } #[test] diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 2a034f06ce8a8..ee524f5f72902 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -225,7 +225,7 @@ where .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; let extrinsics = self .client - .block_body(&id) + .block_body(&self.block) .map_err(Error::InvalidBlockId)? .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index 2632f8fb6aab5..bc6f2f7d5e947 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -111,7 +111,7 @@ impl ChainApi for TestApi { (blake2_256(&encoded).into(), encoded.len()) } - fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { + fn block_body(&self, _id: &::Hash) -> Self::BodyFuture { ready(Ok(None)) } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 110647b8cb3b0..f162a02ddb643 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -126,8 +126,8 @@ where Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - ready(self.client.block_body(id).map_err(error::Error::from)) + fn block_body(&self, hash: &Block::Hash) -> Self::BodyFuture { + ready(self.client.block_body(hash).map_err(error::Error::from)) } fn validate_transaction( diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index d311e0d0c8f2f..99119ac8fa8ab 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -90,8 +90,8 @@ pub trait ChainApi: Send + Sync { /// Returns hash and encoding length of the extrinsic. fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (ExtrinsicHash, usize); - /// Returns a block body given the block id. - fn block_body(&self, at: &BlockId) -> Self::BodyFuture; + /// Returns a block body given the block. + fn block_body(&self, at: &::Hash) -> Self::BodyFuture; /// Returns a block header given the block id. fn block_header( diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 410ab662e3601..9e0a2e74bf421 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -550,12 +550,12 @@ impl RevalidationStatus { /// Prune the known txs for the given block. async fn prune_known_txs_for_block>( - block_id: BlockId, + block_hash: &Block::Hash, api: &Api, pool: &graph::Pool, ) -> Vec> { let extrinsics = api - .block_body(&block_id) + .block_body(&block_hash) .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request: {}", e); @@ -567,19 +567,21 @@ async fn prune_known_txs_for_block h, Ok(None) => { - log::debug!(target: "txpool", "Could not find header for {:?}.", block_id); + log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); return hashes }, Err(e) => { - log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_id, e); + log::debug!(target: "txpool", "Error retrieving header for {:?}: {}", block_hash, e); return hashes }, }; - if let Err(e) = pool.prune(&block_id, &BlockId::hash(*header.parent_hash()), &extrinsics).await + if let Err(e) = pool + .prune(&BlockId::Hash(*block_hash), &BlockId::hash(*header.parent_hash()), &extrinsics) + .await { log::error!("Cannot prune known in the pool: {}", e); } @@ -636,7 +638,7 @@ where tree_route .enacted() .iter() - .map(|h| prune_known_txs_for_block(BlockId::Hash(h.hash), &*api, &*pool)), + .map(|h| prune_known_txs_for_block(&h.hash, &*api, &*pool)), ) .await .into_iter() @@ -654,7 +656,7 @@ where let hash = retracted.hash; let block_transactions = api - .block_body(&BlockId::hash(hash)) + .block_body(&hash) .await .unwrap_or_else(|e| { log::warn!("Failed to fetch block body: {}", e); diff --git a/client/transaction-pool/src/tests.rs b/client/transaction-pool/src/tests.rs index ce2c7872e32bb..8775dfb6e9e88 100644 --- a/client/transaction-pool/src/tests.rs +++ b/client/transaction-pool/src/tests.rs @@ -164,7 +164,7 @@ impl ChainApi for TestApi { (Hashing::hash(&encoded), len) } - fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { + fn block_body(&self, _id: &::Hash) -> Self::BodyFuture { futures::future::ready(Ok(None)) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 79c05aec8adca..610ceb0cf9323 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -89,7 +89,7 @@ pub trait Backend: HeaderBackend + HeaderMetadata { /// Get block body. Returns `None` if block is not found. - fn body(&self, id: BlockId) -> Result::Extrinsic>>>; + fn body(&self, hash: &Block::Hash) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index dc8be78aa7397..e2d6efccea424 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -315,13 +315,13 @@ impl sc_transaction_pool::ChainApi for TestApi { Self::hash_and_length_inner(ex) } - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - futures::future::ready(Ok(match id { - BlockId::Number(num) => - self.chain.read().block_by_number.get(num).map(|b| b[0].0.extrinsics().to_vec()), - BlockId::Hash(hash) => - self.chain.read().block_by_hash.get(hash).map(|b| b.extrinsics().to_vec()), - })) + fn block_body(&self, hash: &::Hash) -> Self::BodyFuture { + futures::future::ready(Ok(self + .chain + .read() + .block_by_hash + .get(hash) + .map(|b| b.extrinsics().to_vec()))) } fn block_header( From 8fb4138d73ad88a19331c30c36ba91796c5846e1 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 2 Nov 2022 02:10:43 +0300 Subject: [PATCH 299/348] fix: construct_runtime multiple features (#12594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: construct_runtime multiple features * Update frame/support/procedural/src/construct_runtime/mod.rs Co-authored-by: Bastian Köcher --- .../procedural/src/construct_runtime/mod.rs | 3 +- frame/support/test/Cargo.toml | 1 + frame/support/test/tests/pallet.rs | 102 ++++++++++++++++-- scripts/ci/gitlab/pipeline/test.yml | 1 + 4 files changed, 95 insertions(+), 12 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 73d0d54343eb9..9e22037a6782e 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -389,8 +389,9 @@ fn decl_all_pallets<'a>( (attr, names) } else { let test_cfg = features.remove("test").then_some(quote!(test)).into_iter(); + let disabled_features = all_features.difference(&features); let features = features.iter(); - let attr = quote!(#[cfg(all( #(#test_cfg),* #(feature = #features),* ))]); + let attr = quote!(#[cfg(all( #(#test_cfg,)* #(feature = #features,)* #(not(feature = #disabled_features)),* ))]); (attr, names) } diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index d7d3bfc98f3d7..471dba8df44e2 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -51,6 +51,7 @@ try-runtime = ["frame-support/try-runtime"] # Only CI runs with this feature enabled. This feature is for testing stuff related to the FRAME macros # in conjunction with rust features. frame-feature-testing = [] +frame-feature-testing-2 = [] # Disable ui tests disable-ui-tests = [] no-metadata-docs = ["frame-support/no-metadata-docs"] diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 05c1f14601862..0fd32dad2242a 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -574,6 +574,20 @@ pub mod pallet4 { impl Pallet {} } +/// Test that the supertrait check works when we pass some parameter to the `frame_system::Config`. +#[frame_support::pallet] +pub mod pallet5 { + #[pallet::config] + pub trait Config: + frame_system::Config::RuntimeOrigin> + { + type RuntimeOrigin; + } + + #[pallet::pallet] + pub struct Pallet(_); +} + frame_support::parameter_types!( pub const MyGetParam3: u32 = 12; ); @@ -623,6 +637,11 @@ impl pallet3::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; } +#[cfg(feature = "frame-feature-testing-2")] +impl pallet5::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; +} + pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; @@ -640,6 +659,9 @@ frame_support::construct_runtime!( #[cfg(feature = "frame-feature-testing")] Example3: pallet3, Example4: pallet4 use_parts { Call }, + + #[cfg(feature = "frame-feature-testing-2")] + Example5: pallet5, } ); @@ -1175,7 +1197,13 @@ fn migrate_from_pallet_version_to_storage_version() { AllPalletsWithSystem, >(&db_weight); - let pallet_num = if cfg!(feature = "frame-feature-testing") { 5 } else { 4 }; + let mut pallet_num = 4; + if cfg!(feature = "frame-feature-testing") { + pallet_num += 1; + }; + if cfg!(feature = "frame-feature-testing-2") { + pallet_num += 1; + }; // `pallet_num` pallets, 2 writes and every write costs 5 weight. assert_eq!(Weight::from_ref_time(pallet_num * 2 * 5), weight); @@ -1512,6 +1540,16 @@ fn metadata() { constants: vec![], error: None, }, + #[cfg(feature = "frame-feature-testing-2")] + PalletMetadata { + index: 5, + name: "Example5", + storage: None, + calls: None, + event: None, + constants: vec![], + error: None, + }, ]; let empty_doc = pallets[0].event.as_ref().unwrap().ty.type_info().docs().is_empty() && @@ -1753,42 +1791,68 @@ fn assert_type_all_pallets_reversed_with_system_first_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsReversedWithSystemFirst) {} - #[cfg(not(feature = "frame-feature-testing"))] + #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] fn _b(t: (System, Example4, Example2, Example)) { _a(t) } - #[cfg(feature = "frame-feature-testing")] + #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] fn _b(t: (System, Example4, Example3, Example2, Example)) { _a(t) } + + #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] + fn _b(t: (System, Example5, Example4, Example2, Example)) { + _a(t) + } + + #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] + fn _b(t: (System, Example5, Example4, Example3, Example2, Example)) { + _a(t) + } } #[test] fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithSystem) {} - #[cfg(not(feature = "frame-feature-testing"))] + #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] fn _b(t: (System, Example, Example2, Example4)) { _a(t) } - #[cfg(feature = "frame-feature-testing")] + #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] fn _b(t: (System, Example, Example2, Example3, Example4)) { _a(t) } + #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] + fn _b(t: (System, Example, Example2, Example4, Example5)) { + _a(t) + } + #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] + fn _b(t: (System, Example, Example2, Example3, Example4, Example5)) { + _a(t) + } } #[test] fn assert_type_all_pallets_without_system_is_correct() { // Just ensure the 2 types are same. fn _a(_t: AllPalletsWithoutSystem) {} - #[cfg(not(feature = "frame-feature-testing"))] + #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] fn _b(t: (Example, Example2, Example4)) { _a(t) } - #[cfg(feature = "frame-feature-testing")] + #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] fn _b(t: (Example, Example2, Example3, Example4)) { _a(t) } + #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] + fn _b(t: (Example, Example2, Example4, Example5)) { + _a(t) + } + #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] + fn _b(t: (Example, Example2, Example3, Example4, Example5)) { + _a(t) + } } #[test] @@ -1796,14 +1860,22 @@ fn assert_type_all_pallets_with_system_reversed_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsWithSystemReversed) {} - #[cfg(not(feature = "frame-feature-testing"))] + #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] fn _b(t: (Example4, Example2, Example, System)) { _a(t) } - #[cfg(feature = "frame-feature-testing")] + #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] fn _b(t: (Example4, Example3, Example2, Example, System)) { _a(t) } + #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] + fn _b(t: (Example5, Example4, Example2, Example, System)) { + _a(t) + } + #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] + fn _b(t: (Example5, Example4, Example3, Example2, Example, System)) { + _a(t) + } } #[test] @@ -1811,14 +1883,22 @@ fn assert_type_all_pallets_without_system_reversed_is_correct() { // Just ensure the 2 types are same. #[allow(deprecated)] fn _a(_t: AllPalletsWithoutSystemReversed) {} - #[cfg(not(feature = "frame-feature-testing"))] + #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] fn _b(t: (Example4, Example2, Example)) { _a(t) } - #[cfg(feature = "frame-feature-testing")] + #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] fn _b(t: (Example4, Example3, Example2, Example)) { _a(t) } + #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] + fn _b(t: (Example5, Example4, Example2, Example)) { + _a(t) + } + #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] + fn _b(t: (Example5, Example4, Example3, Example2, Example)) { + _a(t) + } } #[test] diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 8baebb39be6dc..4f523738b151c 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -254,6 +254,7 @@ test-frame-support: script: - rusty-cachier snapshot create - time cargo test --locked -p frame-support-test --features=frame-feature-testing,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec + - time cargo test --locked -p frame-support-test --features=frame-feature-testing,frame-feature-testing-2,no-metadata-docs --manifest-path ./frame/support/test/Cargo.toml --test pallet # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - rusty-cachier cache upload From 5683513310f44eb2dc8026cd79b189b196a5982a Mon Sep 17 00:00:00 2001 From: zjb0807 Date: Wed, 2 Nov 2022 22:56:48 +1300 Subject: [PATCH 300/348] Fix fungible unbalanced trait (#12569) * Fix fungible unbalanced trait * Add simple decrease_balance test Signed-off-by: Oliver Tale-Yazdi * Fix decrease_balance_at_most * Fix decrease_balance_at_most in fungibles * Rename free_balanceto balance_on_free * Use reducible_balance instead of balance_on_free Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi --- frame/balances/src/lib.rs | 12 +- frame/balances/src/tests.rs | 160 +++++++++++++++++- .../src/traits/tokens/fungible/balanced.rs | 7 +- .../src/traits/tokens/fungibles/balanced.rs | 7 +- 4 files changed, 175 insertions(+), 11 deletions(-) diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 4014efdd26e4e..67976cbaf06ac 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -1149,15 +1149,19 @@ impl, I: 'static> fungible::Transfer for Pallet impl, I: 'static> fungible::Unbalanced for Pallet { fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - Self::mutate_account(who, |account| { - account.free = amount; + Self::mutate_account(who, |account| -> DispatchResult { + // fungibles::Unbalanced::decrease_balance didn't check account.reserved + // free = new_balance - reserved + account.free = + amount.checked_sub(&account.reserved).ok_or(ArithmeticError::Underflow)?; Self::deposit_event(Event::BalanceSet { who: who.clone(), free: account.free, reserved: account.reserved, }); - })?; - Ok(()) + + Ok(()) + })? } fn set_total_issuance(amount: Self::Balance) { diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index faf53e16cb028..83944caf9f7ff 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -24,7 +24,7 @@ macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { use crate::*; - use sp_runtime::{ArithmeticError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; + use sp_runtime::{ArithmeticError, TokenError, FixedPointNumber, traits::{SignedExtension, BadOrigin}}; use frame_support::{ assert_noop, assert_storage_noop, assert_ok, assert_err, traits::{ @@ -1298,5 +1298,163 @@ macro_rules! decl_tests { assert_eq!(Balances::reserved_balance(&1337), 42); }); } + + #[test] + fn fungible_unbalanced_trait_set_balance_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_eq!(>::balance(&1337), 0); + assert_ok!(>::set_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_ok!(Balances::reserve(&1337, 60)); + assert_eq!(Balances::free_balance(1337) , 40); + assert_eq!(Balances::reserved_balance(1337), 60); + + assert_noop!(>::set_balance(&1337, 0), ArithmeticError::Underflow); + + assert_ok!(>::set_balance(&1337, 60)); + assert_eq!(Balances::free_balance(1337) , 0); + assert_eq!(Balances::reserved_balance(1337), 60); + }); + } + + #[test] + fn fungible_unbalanced_trait_set_total_issuance_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_eq!(>::total_issuance(), 0); + >::set_total_issuance(100); + assert_eq!(>::total_issuance(), 100); + }); + } + + #[test] + fn fungible_unbalanced_trait_decrease_balance_simple_works() { + <$ext_builder>::default().build().execute_with(|| { + // An Account that starts at 100 + assert_ok!(>::set_balance(&1337, 100)); + // and reserves 50 + assert_ok!(Balances::reserve(&1337, 50)); + // and is decreased by 20 + assert_ok!(>::decrease_balance(&1337, 20)); + // should end up at 80. + assert_eq!(>::balance(&1337), 80); + }); + } + + #[test] + fn fungible_unbalanced_trait_decrease_balance_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_ok!(>::set_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_noop!( + >::decrease_balance(&1337, 101), + TokenError::NoFunds + ); + assert_eq!( + >::decrease_balance(&1337, 100), + Ok(100) + ); + assert_eq!(>::balance(&1337), 0); + + // free: 40, reserved: 60 + assert_ok!(>::set_balance(&1337, 100)); + assert_ok!(Balances::reserve(&1337, 60)); + assert_eq!(Balances::free_balance(1337) , 40); + assert_eq!(Balances::reserved_balance(1337), 60); + assert_noop!( + >::decrease_balance(&1337, 41), + TokenError::NoFunds + ); + assert_eq!( + >::decrease_balance(&1337, 40), + Ok(40) + ); + assert_eq!(>::balance(&1337), 60); + assert_eq!(Balances::free_balance(1337), 0); + assert_eq!(Balances::reserved_balance(1337), 60); + }); + } + + #[test] + fn fungible_unbalanced_trait_decrease_balance_at_most_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_ok!(>::set_balance(&1337, 100)); + assert_eq!(>::balance(&1337), 100); + + assert_eq!( + >::decrease_balance_at_most(&1337, 101), + 100 + ); + assert_eq!(>::balance(&1337), 0); + + assert_ok!(>::set_balance(&1337, 100)); + assert_eq!( + >::decrease_balance_at_most(&1337, 100), + 100 + ); + assert_eq!(>::balance(&1337), 0); + + // free: 40, reserved: 60 + assert_ok!(>::set_balance(&1337, 100)); + assert_ok!(Balances::reserve(&1337, 60)); + assert_eq!(Balances::free_balance(1337) , 40); + assert_eq!(Balances::reserved_balance(1337), 60); + assert_eq!( + >::decrease_balance_at_most(&1337, 0), + 0 + ); + assert_eq!(Balances::free_balance(1337) , 40); + assert_eq!(Balances::reserved_balance(1337), 60); + assert_eq!( + >::decrease_balance_at_most(&1337, 10), + 10 + ); + assert_eq!(Balances::free_balance(1337), 30); + assert_eq!( + >::decrease_balance_at_most(&1337, 200), + 30 + ); + assert_eq!(>::balance(&1337), 60); + assert_eq!(Balances::free_balance(1337), 0); + assert_eq!(Balances::reserved_balance(1337), 60); + }); + } + + #[test] + fn fungible_unbalanced_trait_increase_balance_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_noop!( + >::increase_balance(&1337, 0), + TokenError::BelowMinimum + ); + assert_eq!( + >::increase_balance(&1337, 1), + Ok(1) + ); + assert_noop!( + >::increase_balance(&1337, u64::MAX), + ArithmeticError::Overflow + ); + }); + } + + #[test] + fn fungible_unbalanced_trait_increase_balance_at_most_works() { + <$ext_builder>::default().build().execute_with(|| { + assert_eq!( + >::increase_balance_at_most(&1337, 0), + 0 + ); + assert_eq!( + >::increase_balance_at_most(&1337, 1), + 1 + ); + assert_eq!( + >::increase_balance_at_most(&1337, u64::MAX), + u64::MAX - 1 + ); + }); + } } } diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs index ed9c3a1afa480..0e75ccc22d050 100644 --- a/frame/support/src/traits/tokens/fungible/balanced.rs +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -164,7 +164,7 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Result { let old_balance = Self::balance(who); - let (mut new_balance, mut amount) = if old_balance < amount { + let (mut new_balance, mut amount) = if Self::reducible_balance(who, false) < amount { return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) @@ -186,8 +186,9 @@ pub trait Unbalanced: Inspect { /// Return the imbalance by which the account was reduced. fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { let old_balance = Self::balance(who); - let (mut new_balance, mut amount) = if old_balance < amount { - (Zero::zero(), old_balance) + let old_free_balance = Self::reducible_balance(who, false); + let (mut new_balance, mut amount) = if old_free_balance < amount { + (old_balance.saturating_sub(old_free_balance), old_free_balance) } else { (old_balance - amount, amount) }; diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs index a870168e4db91..9e50ff834a874 100644 --- a/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -185,7 +185,7 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Result { let old_balance = Self::balance(asset, who); - let (mut new_balance, mut amount) = if old_balance < amount { + let (mut new_balance, mut amount) = if Self::reducible_balance(asset, who, false) < amount { return Err(TokenError::NoFunds.into()) } else { (old_balance - amount, amount) @@ -211,8 +211,9 @@ pub trait Unbalanced: Inspect { amount: Self::Balance, ) -> Self::Balance { let old_balance = Self::balance(asset, who); - let (mut new_balance, mut amount) = if old_balance < amount { - (Zero::zero(), old_balance) + let old_free_balance = Self::reducible_balance(asset, who, false); + let (mut new_balance, mut amount) = if old_free_balance < amount { + (old_balance.saturating_sub(old_free_balance), old_free_balance) } else { (old_balance - amount, amount) }; From 2216f8cdaaa0ee92938675523e14c4d1a856dad0 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 2 Nov 2022 14:39:28 +0100 Subject: [PATCH 301/348] [ci] allow fail skip-if-draft job (#12604) --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e6f26cbfd5cf7..9053e39eb59bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -210,6 +210,7 @@ skip-if-draft: - echo "Ref is ${CI_COMMIT_REF_NAME}" - echo "pipeline source is ${CI_PIPELINE_SOURCE}" - ./scripts/ci/gitlab/skip_if_draft.sh + allow_failure: true include: # check jobs From 1f17288074b389091c4242c54be8b8a7fdc4b39b Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 2 Nov 2022 23:15:33 +0100 Subject: [PATCH 302/348] BlockId removal: refactor: Backend::justifications (#12602) * BlockId removal: refactor: Backend::justifications It changes the arguments of `Backend::justifications` method from: `BlockId` to: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * trigger CI job * trigger CI job * bug fix * match -> if Co-authored-by: Adrian Catangiu Co-authored-by: Adrian Catangiu --- client/api/src/client.rs | 2 +- client/api/src/in_mem.rs | 13 ++---- .../incoming_requests_handler.rs | 21 +++++---- client/beefy/src/tests.rs | 10 +++-- client/beefy/src/worker.rs | 4 +- client/db/src/lib.rs | 18 ++++---- client/finality-grandpa/src/finality_proof.rs | 6 ++- client/finality-grandpa/src/tests.rs | 40 +++++------------ client/finality-grandpa/src/warp_proof.rs | 2 +- .../network/sync/src/block_request_handler.rs | 7 +-- client/network/test/src/block_import.rs | 2 +- client/network/test/src/lib.rs | 7 ++- client/network/test/src/sync.rs | 45 ++++++++++--------- client/service/src/client/client.rs | 6 +-- client/service/test/src/client/mod.rs | 6 +-- primitives/blockchain/src/backend.rs | 2 +- 16 files changed, 89 insertions(+), 102 deletions(-) diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 670c1711db948..d0053afeba97b 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -131,7 +131,7 @@ pub trait BlockBackend { -> sp_blockchain::Result; /// Get block justifications for the block with the given id. - fn justifications(&self, id: &BlockId) -> sp_blockchain::Result>; + fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 7b0e6f7b72575..6f33695fe4bf4 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -415,10 +415,8 @@ impl blockchain::Backend for Blockchain { .and_then(|b| b.extrinsics().map(|x| x.to_vec()))) } - fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned()) - })) + fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + Ok(self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned())) } fn last_finalized(&self) -> sp_blockchain::Result { @@ -816,7 +814,7 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { #[cfg(test)] mod tests { use crate::{in_mem::Blockchain, NewBlockState}; - use sp_api::{BlockId, HeaderT}; + use sp_api::HeaderT; use sp_blockchain::Backend; use sp_runtime::{ConsensusEngineId, Justifications}; use substrate_test_runtime::{Block, Header, H256}; @@ -870,10 +868,7 @@ mod tests { just.append((ID2, vec![4])); just }; - assert_eq!( - blockchain.justifications(BlockId::Hash(last_finalized)).unwrap(), - Some(justifications) - ); + assert_eq!(blockchain.justifications(&last_finalized).unwrap(), Some(justifications)); } #[test] diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs index c0910a60fba3b..3affbbe870ad7 100644 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -26,7 +26,7 @@ use log::{debug, trace}; use sc_client_api::BlockBackend; use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId, ReputationChange}; use sc_network_common::protocol::ProtocolName; -use sp_runtime::{generic::BlockId, traits::Block}; +use sp_runtime::traits::Block; use std::{marker::PhantomData, sync::Arc}; use crate::communication::request_response::{ @@ -149,13 +149,18 @@ where fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. - let maybe_encoded_proof = self - .client - .justifications(&BlockId::Number(request.payload.begin)) - .map_err(Error::Client)? - .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) - // No BEEFY justification present. - .ok_or(()); + let maybe_encoded_proof = if let Some(hash) = + self.client.block_hash(request.payload.begin).map_err(Error::Client)? + { + self.client + .justifications(&hash) + .map_err(Error::Client)? + .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) + // No BEEFY justification present. + .ok_or(()) + } else { + Err(()) + }; request .pending_response diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index b0a1433fafec6..62d8a45f4471c 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -741,9 +741,9 @@ fn beefy_importing_blocks() { let full_client = client.as_client(); let parent_id = BlockId::Number(0); - let block_id = BlockId::Number(1); let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; + let hashof1 = block.header.hash(); // Import without justifications. let mut justif_recv = justif_stream.subscribe(); @@ -760,7 +760,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&block_id) + .justifications(&hashof1) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -784,6 +784,7 @@ fn beefy_importing_blocks() { let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; + let hashof2 = block.header.hash(); let mut justif_recv = justif_stream.subscribe(); assert_eq!( block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), @@ -798,7 +799,7 @@ fn beefy_importing_blocks() { // still not in backend (worker is responsible for appending to backend), assert_eq!( full_client - .justifications(&block_id) + .justifications(&hashof2) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -826,6 +827,7 @@ fn beefy_importing_blocks() { let builder = full_client.new_block_at(&parent_id, Default::default(), false).unwrap(); let block = builder.build().unwrap().block; + let hashof3 = block.header.hash(); let mut justif_recv = justif_stream.subscribe(); assert_eq!( block_on(block_import.import_block(params(block, justif), HashMap::new())).unwrap(), @@ -841,7 +843,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&BlockId::Number(block_num)) + .justifications(&hashof3) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 305b92d696975..ea9e0d0c33999 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -709,7 +709,7 @@ where // a BEEFY justification, or at this session's boundary; voter will resume from there. loop { if let Some(true) = blockchain - .justifications(BlockId::hash(header.hash())) + .justifications(&header.hash()) .ok() .flatten() .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) @@ -1401,7 +1401,7 @@ pub(crate) mod tests { })); // check BEEFY justifications are also appended to backend - let justifs = backend.blockchain().justifications(BlockId::number(2)).unwrap().unwrap(); + let justifs = backend.blockchain().justifications(&hashof2).unwrap().unwrap(); assert!(justifs.get(BEEFY_ENGINE_ID).is_some()) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9d9a8d268dd81..af45e7c961fd3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -642,8 +642,13 @@ impl sc_client_api::blockchain::Backend for BlockchainDb) -> ClientResult> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, id)? { + fn justifications(&self, hash: &Block::Hash) -> ClientResult> { + match read_db( + &*self.db, + columns::KEY_LOOKUP, + columns::JUSTIFICATIONS, + BlockId::::Hash(*hash), + )? { Some(justifications) => match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), Err(err) => @@ -2039,7 +2044,7 @@ impl sc_client_api::backend::Backend for Backend { } let justifications = if let Some(mut stored_justifications) = - self.blockchain.justifications(BlockId::Hash(*hash))? + self.blockchain.justifications(hash)? { if !stored_justifications.append(justification) { return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) @@ -3017,7 +3022,7 @@ pub(crate) mod tests { backend.finalize_block(&block1, justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justifications(BlockId::Number(1)).unwrap(), + backend.blockchain().justifications(&block1).unwrap(), justification.map(Justifications::from), ); } @@ -3048,10 +3053,7 @@ pub(crate) mod tests { just.append(just1); just }; - assert_eq!( - backend.blockchain().justifications(BlockId::Number(1)).unwrap(), - Some(justifications), - ); + assert_eq!(backend.blockchain().justifications(&block1).unwrap(), Some(justifications),); } #[test] diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index a7042e26b1a71..3070581350662 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -183,10 +183,12 @@ where } }, AuthoritySetChangeId::Set(_, last_block_for_set) => { - let last_block_for_set_id = BlockId::Number(last_block_for_set); + let last_block_for_set_id = backend + .blockchain() + .expect_block_hash_from_id(&BlockId::Number(last_block_for_set))?; let justification = if let Some(grandpa_justification) = backend .blockchain() - .justifications(last_block_for_set_id)? + .justifications(&last_block_for_set_id)? .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) { grandpa_justification diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 39379e69c9157..d2db1feea0fef 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -363,9 +363,11 @@ fn finalize_3_voters_no_observers() { runtime.spawn(initialize_grandpa(&mut net, peers)); net.peer(0).push_blocks(20, false); net.block_until_sync(); + let hashof20 = net.peer(0).client().info().best_hash; for i in 0..3 { assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_hash, hashof20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -373,12 +375,7 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock() - .peer(0) - .client() - .justifications(&BlockId::Number(20)) - .unwrap() - .is_none(), + net.lock().peer(0).client().justifications(&hashof20).unwrap().is_none(), "Extra justification for block#1", ); } @@ -616,19 +613,15 @@ fn justification_is_generated_periodically() { net.peer(0).push_blocks(32, false); net.block_until_sync(); + let hashof32 = net.peer(0).client().info().best_hash; + let net = Arc::new(Mutex::new(net)); run_to_completion(&mut runtime, 32, net.clone(), peers); // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net - .lock() - .peer(i) - .client() - .justifications(&BlockId::Number(32)) - .unwrap() - .is_some()); + assert!(net.lock().peer(i).client().justifications(&hashof32).unwrap().is_some()); } } @@ -648,7 +641,7 @@ fn sync_justifications_on_change_blocks() { net.peer(0).push_blocks(20, false); // at block 21 we do add a transition which is instant - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { + let hashof21 = net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; add_scheduled_change( &mut block, @@ -672,25 +665,12 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net - .lock() - .peer(i) - .client() - .justifications(&BlockId::Number(21)) - .unwrap() - .is_some()); + assert!(net.lock().peer(i).client().justifications(&hashof21).unwrap().is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net - .lock() - .peer(3) - .client() - .justifications(&BlockId::Number(21)) - .unwrap() - .is_none() - { + if net.lock().peer(3).client().justifications(&hashof21).unwrap().is_none() { net.lock().poll(cx); Poll::Pending } else { @@ -1686,7 +1666,7 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!(client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some()); + assert!(client.justifications(&block_hash).unwrap().is_some()); } #[test] diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 10d02f790a0dc..786dfacf8b0b9 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -130,7 +130,7 @@ impl WarpSyncProof { } let justification = blockchain - .justifications(BlockId::Number(*last_block))? + .justifications(&header.hash())? .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .expect( "header is last in set and contains standard change signal; \ diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index dbde5fc5d8c22..08b8cffa38714 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -331,11 +331,8 @@ where let number = *header.number(); let hash = header.hash(); let parent_hash = *header.parent_hash(); - let justifications = if get_justification { - self.client.justifications(&BlockId::Hash(hash))? - } else { - None - }; + let justifications = + if get_justification { self.client.justifications(&hash)? } else { None }; let (justifications, justification, is_empty_justification) = if support_multiple_justifications { diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index a2bd5276c31d6..a1d42f1e60440 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -40,7 +40,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justifications = client.justifications(&BlockId::Number(1)).unwrap(); + let justifications = client.justifications(&hash).unwrap(); let peer_id = PeerId::random(); ( client, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index a6d73b53efdf0..f348d5cf94c43 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -176,8 +176,11 @@ impl PeersClient { self.backend.have_state_at(&header.hash(), *header.number()) } - pub fn justifications(&self, block: &BlockId) -> ClientResult> { - self.client.justifications(block) + pub fn justifications( + &self, + hash: &::Hash, + ) -> ClientResult> { + self.client.justifications(hash) } pub fn finality_notification_stream(&self) -> FinalityNotifications { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 399062a88d269..9ae3014e497ce 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -246,15 +246,16 @@ fn sync_justifications() { net.peer(0).push_blocks(20, false); net.block_until_sync(); - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); - - // we finalize block #10, #15 and #20 for peer 0 with a justification let backend = net.peer(0).client().as_backend(); let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); let hashof15 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(15)).unwrap(); let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); + + // there's currently no justification for block #10 + assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); + + // we finalize block #10, #15 and #20 for peer 0 with a justification let just = (*b"FRNK", Vec::new()); net.peer(0) .client() @@ -269,25 +270,25 @@ fn sync_justifications() { .finalize_block(&hashof20, Some(just.clone()), true) .unwrap(); - let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); - let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); - let h3 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap(); + let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); + let hashof15 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap().hash(); + let hashof20 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap().hash(); // peer 1 should get the justifications from the network - net.peer(1).request_justification(&h1.hash().into(), 10); - net.peer(1).request_justification(&h2.hash().into(), 15); - net.peer(1).request_justification(&h3.hash().into(), 20); + net.peer(1).request_justification(&hashof10, 10); + net.peer(1).request_justification(&hashof15, 15); + net.peer(1).request_justification(&hashof20, 20); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - for height in (10..21).step_by(5) { - if net.peer(0).client().justifications(&BlockId::Number(height)).unwrap() != + for hash in [hashof10, hashof15, hashof20] { + if net.peer(0).client().justifications(&hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(&BlockId::Number(height)).unwrap() != + if net.peer(1).client().justifications(&hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -321,9 +322,9 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(&BlockId::Number(10)).unwrap() == + if net.peer(0).client().justifications(&f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) && - net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() == + net.peer(1).client().justifications(&f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) @@ -952,14 +953,14 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { net.peer(0).push_blocks(10, false); net.block_until_sync(); - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&BlockId::Number(10)).unwrap(), None); + let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); - let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); + // there's currently no justification for block #10 + assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); // Let's assume block 10 was finalized, but we still need the justification from the network. - net.peer(1).request_justification(&h1.hash().into(), 10); + net.peer(1).request_justification(&hashof10, 10); // Let's build some more blocks and wait always for the network to have synced them for _ in 0..5 { @@ -987,7 +988,7 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() != + if net.peer(1).client().justifications(&hashof10).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b5ab06a0318c5..6d463f337aabc 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1948,7 +1948,7 @@ where Ok(match self.header(id)? { Some(header) => { let hash = header.hash(); - match (self.body(&hash)?, self.justifications(id)?) { + match (self.body(&hash)?, self.justifications(&hash)?) { (Some(extrinsics), justifications) => Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, @@ -1962,8 +1962,8 @@ where Client::block_status(self, id) } - fn justifications(&self, id: &BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().justifications(*id) + fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + self.backend.blockchain().justifications(hash) } fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index b9aec421802c9..c60ff4dd09d7b 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -884,11 +884,11 @@ fn import_with_justification() { assert_eq!(client.chain_info().finalized_hash, a3.hash()); - assert_eq!(client.justifications(&BlockId::Hash(a3.hash())).unwrap(), Some(justification)); + assert_eq!(client.justifications(&a3.hash()).unwrap(), Some(justification)); - assert_eq!(client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None); + assert_eq!(client.justifications(&a1.hash()).unwrap(), None); - assert_eq!(client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None); + assert_eq!(client.justifications(&a2.hash()).unwrap(), None); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 610ceb0cf9323..e5764354e90f3 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -91,7 +91,7 @@ pub trait Backend: /// Get block body. Returns `None` if block is not found. fn body(&self, hash: &Block::Hash) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. - fn justifications(&self, id: BlockId) -> Result>; + fn justifications(&self, hash: &Block::Hash) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; From c7147ce4d3f8b43fe8c3875f0216739c6901c1e4 Mon Sep 17 00:00:00 2001 From: benluelo <57334811+benluelo@users.noreply.github.com> Date: Wed, 2 Nov 2022 19:31:04 -0400 Subject: [PATCH 303/348] use associated iterator types for InspectEnumerable (#12389) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * use associated iterator types for InspectEnumerable * Update frame/uniques/src/impl_nonfungibles.rs Co-authored-by: parity-processbot <> Co-authored-by: Bastian Köcher --- .../support/src/traits/tokens/nonfungible.rs | 18 +++++++++++---- .../support/src/traits/tokens/nonfungibles.rs | 17 ++++++++++---- frame/uniques/src/impl_nonfungibles.rs | 23 ++++++++++++------- 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/frame/support/src/traits/tokens/nonfungible.rs b/frame/support/src/traits/tokens/nonfungible.rs index fe0d2e729930e..46ca573131127 100644 --- a/frame/support/src/traits/tokens/nonfungible.rs +++ b/frame/support/src/traits/tokens/nonfungible.rs @@ -65,11 +65,16 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over a collection /// of NFTs. pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + /// Returns an iterator of the items within a `collection` in existence. - fn items() -> Box>; + fn items() -> Self::ItemsIterator; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Box>; + fn owned(who: &AccountId) -> Self::OwnedIterator; } /// Trait for providing an interface for NFT-like items which may be minted, burned and/or have @@ -149,10 +154,15 @@ impl< AccountId, > InspectEnumerable for ItemOf { - fn items() -> Box> { + type ItemsIterator = >::ItemsIterator; + type OwnedIterator = + >::OwnedInCollectionIterator; + + fn items() -> Self::ItemsIterator { >::items(&A::get()) } - fn owned(who: &AccountId) -> Box> { + + fn owned(who: &AccountId) -> Self::OwnedIterator { >::owned_in_collection(&A::get(), who) } } diff --git a/frame/support/src/traits/tokens/nonfungibles.rs b/frame/support/src/traits/tokens/nonfungibles.rs index d043a87ce7c10..ac007b5a67f1d 100644 --- a/frame/support/src/traits/tokens/nonfungibles.rs +++ b/frame/support/src/traits/tokens/nonfungibles.rs @@ -105,20 +105,29 @@ pub trait Inspect { /// Interface for enumerating items in existence or owned by a given account over many collections /// of NFTs. pub trait InspectEnumerable: Inspect { + /// The iterator type for [`Self::collections`]. + type CollectionsIterator: Iterator; + /// The iterator type for [`Self::items`]. + type ItemsIterator: Iterator; + /// The iterator type for [`Self::owned`]. + type OwnedIterator: Iterator; + /// The iterator type for [`Self::owned_in_collection`]. + type OwnedInCollectionIterator: Iterator; + /// Returns an iterator of the collections in existence. - fn collections() -> Box>; + fn collections() -> Self::CollectionsIterator; /// Returns an iterator of the items of a `collection` in existence. - fn items(collection: &Self::CollectionId) -> Box>; + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator; /// Returns an iterator of the items of all collections owned by `who`. - fn owned(who: &AccountId) -> Box>; + fn owned(who: &AccountId) -> Self::OwnedIterator; /// Returns an iterator of the items of `collection` owned by `who`. fn owned_in_collection( collection: &Self::CollectionId, who: &AccountId, - ) -> Box>; + ) -> Self::OwnedInCollectionIterator; } /// Trait for providing the ability to create collections of nonfungible items. diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index cead6f562ab58..75d193ad19605 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -19,6 +19,7 @@ use super::*; use frame_support::{ + storage::KeyPrefixIterator, traits::{tokens::nonfungibles::*, Get}, BoundedSlice, }; @@ -155,25 +156,31 @@ impl, I: 'static> Transfer for Pallet { } impl, I: 'static> InspectEnumerable for Pallet { + type CollectionsIterator = KeyPrefixIterator<>::CollectionId>; + type ItemsIterator = KeyPrefixIterator<>::ItemId>; + type OwnedIterator = + KeyPrefixIterator<(>::CollectionId, >::ItemId)>; + type OwnedInCollectionIterator = KeyPrefixIterator<>::ItemId>; + /// Returns an iterator of the collections in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn collections() -> Box> { - Box::new(CollectionMetadataOf::::iter_keys()) + fn collections() -> Self::CollectionsIterator { + CollectionMetadataOf::::iter_keys() } /// Returns an iterator of the items of a `collection` in existence. /// /// NOTE: iterating this list invokes a storage read per item. - fn items(collection: &Self::CollectionId) -> Box> { - Box::new(ItemMetadataOf::::iter_key_prefix(collection)) + fn items(collection: &Self::CollectionId) -> Self::ItemsIterator { + ItemMetadataOf::::iter_key_prefix(collection) } /// Returns an iterator of the items of all collections owned by `who`. /// /// NOTE: iterating this list invokes a storage read per item. - fn owned(who: &T::AccountId) -> Box> { - Box::new(Account::::iter_key_prefix((who,))) + fn owned(who: &T::AccountId) -> Self::OwnedIterator { + Account::::iter_key_prefix((who,)) } /// Returns an iterator of the items of `collection` owned by `who`. @@ -182,7 +189,7 @@ impl, I: 'static> InspectEnumerable for Pallet fn owned_in_collection( collection: &Self::CollectionId, who: &T::AccountId, - ) -> Box> { - Box::new(Account::::iter_key_prefix((who, collection))) + ) -> Self::OwnedInCollectionIterator { + Account::::iter_key_prefix((who, collection)) } } From a88f75eb54414405ffa785072d38548793f0009e Mon Sep 17 00:00:00 2001 From: benluelo <57334811+benluelo@users.noreply.github.com> Date: Thu, 3 Nov 2022 07:20:13 -0400 Subject: [PATCH 304/348] Add map and try_map methods (#12581) Co-authored-by: parity-processbot <> --- .../core/src/bounded/bounded_btree_map.rs | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/primitives/core/src/bounded/bounded_btree_map.rs b/primitives/core/src/bounded/bounded_btree_map.rs index 32f8d85ca795b..d2c148d6de9c5 100644 --- a/primitives/core/src/bounded/bounded_btree_map.rs +++ b/primitives/core/src/bounded/bounded_btree_map.rs @@ -168,6 +168,37 @@ where pub fn iter_mut(&mut self) -> sp_std::collections::btree_map::IterMut { self.0.iter_mut() } + + /// Consume the map, applying `f` to each of it's values and returning a new map. + pub fn map(self, mut f: F) -> BoundedBTreeMap + where + F: FnMut((&K, V)) -> T, + { + BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| { + let t = f((&k, v)); + (k, t) + }) + .collect(), + ) + } + + /// Consume the map, applying `f` to each of it's values as long as it returns successfully. If + /// an `Err(E)` is ever encountered, the mapping is short circuited and the error is returned; + /// otherwise, a new map is returned in the contained `Ok` value. + pub fn try_map(self, mut f: F) -> Result, E> + where + F: FnMut((&K, V)) -> Result, + { + Ok(BoundedBTreeMap::::unchecked_from( + self.0 + .into_iter() + .map(|(k, v)| (f((&k, v)).map(|t| (k, t)))) + .collect::, _>>()?, + )) + } } impl Default for BoundedBTreeMap @@ -537,4 +568,58 @@ pub mod test { assert_eq!(b1, b2); } + + #[test] + fn map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.map(|(_, _)| 5_u32).len()); + } + + #[test] + fn map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.map(|(_, v)| v * 2)); + } + + #[test] + fn try_map_retains_size() { + let b1 = boundedmap_from_keys::>(&[1, 2]); + let b2 = b1.clone(); + + assert_eq!(b1.len(), b2.try_map::<_, (), _>(|(_, _)| Ok(5_u32)).unwrap().len()); + } + + #[test] + fn try_map_maps_properly() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(b1, b2.try_map::<_, (), _>(|(_, v)| Ok(v * 2)).unwrap()); + } + + #[test] + fn try_map_short_circuit() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + + assert_eq!(Err("overflow"), b1.try_map(|(_, v)| v.checked_mul(100).ok_or("overflow"))); + } + + #[test] + fn try_map_ok() { + let b1: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap(); + let b2: BoundedBTreeMap> = + [1, 2, 3, 4].into_iter().map(|k| (k, (k as u16) * 100)).try_collect().unwrap(); + + assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow"))); + } } From 265e3f12a2937fe4f71280b3652471627609d04f Mon Sep 17 00:00:00 2001 From: Sasha Gryaznov Date: Thu, 3 Nov 2022 18:23:05 +0200 Subject: [PATCH 305/348] stabilize 4 storage host funcs (#12611) --- frame/contracts/src/benchmarking/mod.rs | 18 ++-- frame/contracts/src/wasm/mod.rs | 135 +----------------------- frame/contracts/src/wasm/runtime.rs | 8 +- 3 files changed, 17 insertions(+), 144 deletions(-) diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index a952eeb2cbd3a..5465e720dc197 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -919,7 +919,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal2", name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -967,7 +967,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal2", name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1015,7 +1015,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal2", name: "set_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1067,7 +1067,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1114,7 +1114,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "clear_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1162,7 +1162,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1216,7 +1216,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "get_storage", params: vec![ValueType::I32, ValueType::I32, ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1271,7 +1271,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), @@ -1318,7 +1318,7 @@ benchmarks! { let code = WasmModule::::from(ModuleDefinition { memory: Some(ImportedMemory::max::()), imported_functions: vec![ImportedFunction { - module: "__unstable__", + module: "seal1", name: "contains_storage", params: vec![ValueType::I32, ValueType::I32], return_type: Some(ValueType::I32), diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 053729730e679..9a094ad4f7da0 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -896,73 +896,12 @@ mod tests { } #[test] - #[cfg(not(feature = "unstable-interface"))] fn contains_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_contains_storage" (func $seal_contains_storage (param i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - ;; [0, 4) size of input buffer (32 bytes as we copy the key here) - (data (i32.const 0) "\20") - - ;; [4, 36) input buffer - ;; [36, inf) output buffer - - (func (export "call") - ;; Receive key - (call $seal_input - (i32.const 4) ;; Pointer to the input buffer - (i32.const 0) ;; Size of the length buffer - ) - - ;; Load the return value into the output buffer - (i32.store (i32.const 36) - (call $seal_contains_storage - (i32.const 4) ;; The pointer to the storage key to fetch - ) - ) - - ;; Return the contents of the buffer - (call $seal_return - (i32.const 0) ;; flags - (i32.const 36) ;; output buffer ptr - (i32.const 4) ;; result is integer (4 bytes) - ) - ) - - (func (export "deploy")) -) -"#; - - let mut ext = MockExt::default(); - - ext.storage.insert(vec![1u8; 32], vec![42u8]); - ext.storage.insert(vec![2u8; 32], vec![]); - - // value does not exist -> sentinel value returned - let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); - - // value did exist -> success - let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1,); - - // value did exist -> success (zero sized type) - let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0,); - } - - #[test] - #[cfg(feature = "unstable-interface")] - fn contains_storage_works() { - const CODE: &str = r#" -(module - (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "__unstable__" "contains_storage" (func $contains_storage (param i32 i32) (result i32))) + (import "seal1" "contains_storage" (func $contains_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) @@ -2357,76 +2296,12 @@ mod tests { } #[test] - #[cfg(not(feature = "unstable-interface"))] fn set_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "seal1" "seal_set_storage" (func $seal_set_storage (param i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - ;; 0x1000 = 4k in little endian - ;; size of input buffer - (data (i32.const 0) "\00\10") - - (func (export "call") - ;; Receive (key ++ value_to_write) - (call $seal_input - (i32.const 4) ;; Pointer to the input buffer - (i32.const 0) ;; Size of the length buffer - ) - ;; Store the passed value to the passed key and store result to memory - (i32.store (i32.const 0) - (call $seal_set_storage - (i32.const 4) ;; key_ptr - (i32.const 36) ;; value_ptr - (i32.sub ;; value_len (input_size - key_size) - (i32.load (i32.const 0)) - (i32.const 32) - ) - ) - ) - (call $seal_return - (i32.const 0) ;; flags - (i32.const 0) ;; pointer to returned value - (i32.const 4) ;; length of returned value - ) - ) - - (func (export "deploy")) -) -"#; - - let mut ext = MockExt::default(); - - // value did not exist before -> sentinel returned - let input = ([1u8; 32], [42u8, 48]).encode(); - let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); - assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); - - // value do exist -> length of old value returned - let input = ([1u8; 32], [0u8; 0]).encode(); - let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); - assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); - - // value do exist -> length of old value returned (test for zero sized val) - let input = ([1u8; 32], [99u8]).encode(); - let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); - assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); - } - - #[test] - #[cfg(feature = "unstable-interface")] - fn set_storage_works() { - const CODE: &str = r#" -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "set_storage" (func $set_storage (param i32 i32 i32 i32) (result i32))) + (import "seal2" "set_storage" (func $set_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer @@ -2491,13 +2366,12 @@ mod tests { } #[test] - #[cfg(feature = "unstable-interface")] fn get_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "get_storage" (func $get_storage (param i32 i32 i32 i32) (result i32))) + (import "seal1" "get_storage" (func $get_storage (param i32 i32 i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; [0, 4) size of input buffer (160 bytes as we copy the key+len here) @@ -2585,13 +2459,12 @@ mod tests { } #[test] - #[cfg(feature = "unstable-interface")] fn clear_storage_works() { const CODE: &str = r#" (module (import "seal0" "seal_input" (func $seal_input (param i32 i32))) (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) - (import "__unstable__" "clear_storage" (func $clear_storage (param i32 i32) (result i32))) + (import "seal1" "clear_storage" (func $clear_storage (param i32 i32) (result i32))) (import "env" "memory" (memory 1 1)) ;; size of input buffer diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 281cc30c4f053..50947962c0631 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -1021,7 +1021,7 @@ pub mod env { /// /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. - #[unstable] + #[version(2)] #[prefixed_alias] fn set_storage( ctx: Runtime, @@ -1053,7 +1053,7 @@ pub mod env { /// /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. - #[unstable] + #[version(1)] #[prefixed_alias] fn clear_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { ctx.clear_storage(KeyType::Variable(key_len), key_ptr) @@ -1102,7 +1102,7 @@ pub mod env { /// # Errors /// /// `ReturnCode::KeyNotFound` - #[unstable] + #[version(1)] #[prefixed_alias] fn get_storage( ctx: Runtime, @@ -1145,7 +1145,7 @@ pub mod env { /// /// Returns the size of the pre-existing value at the specified key if any. Otherwise /// `SENTINEL` is returned as a sentinel value. - #[unstable] + #[version(1)] #[prefixed_alias] fn contains_storage(ctx: Runtime, key_ptr: u32, key_len: u32) -> Result { ctx.contains_storage(KeyType::Variable(key_len), key_ptr) From a4ebc278492d131f9c0bc9f7e393ca748e30d1b4 Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Thu, 3 Nov 2022 17:52:57 +0100 Subject: [PATCH 306/348] Collective benchmark respects DefaultVote configuration (#12612) * Collective benchmark respects DefaultVote configuration * ".git/.scripts/bench-bot.sh" pallet dev pallet_collective Co-authored-by: command-bot <> --- frame/collective/src/benchmarking.rs | 10 + frame/collective/src/weights.rs | 300 ++++++++++++++++----------- 2 files changed, 192 insertions(+), 118 deletions(-) diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index fcebacf5762e7..4e8bf094ef9d6 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -489,9 +489,19 @@ benchmarks_instance_pallet! { let index = p - 1; // Have almost everyone vote aye on last proposal, while keeping it from passing. // A few abstainers will be the nay votes needed to fail the vote. + let mut yes_votes: MemberCount = 0; for j in 2 .. m - 1 { let voter = &members[j as usize]; let approve = true; + yes_votes += 1; + // vote aye till a prime nay vote keeps the proposal disapproved. + if <>::DefaultVote as DefaultVote>::default_vote( + Some(false), + yes_votes, + 0, + m,) { + break; + } Collective::::vote( SystemOrigin::Signed(voter.clone()).into(), last_hash, diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index 50029046230af..c7237911da6ab 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,22 +18,26 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_collective // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json +// --pallet=pallet_collective +// --chain=dev +// --header=./HEADER-APACHE2 // --output=./frame/collective/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,38 +65,46 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Council Members (r:1 w:1) // Storage: Council Proposals (r:1 w:0) - // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) - fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as u64).saturating_mul(m as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as u64).saturating_mul(n as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as u64).saturating_mul(p as u64)) + // Storage: Council Voting (r:100 w:100) + /// The range of component `m` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `p` is `[0, 100]`. + fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { + // Minimum execution time: 18_852 nanoseconds. + Weight::from_ref_time(19_138_000 as u64) + // Standard Error: 65_564 + .saturating_add(Weight::from_ref_time(5_276_957 as u64).saturating_mul(m as u64)) + // Standard Error: 65_564 + .saturating_add(Weight::from_ref_time(7_655_866 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Council Members (r:1 w:0) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(16_819_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 23_081 nanoseconds. + Weight::from_ref_time(22_608_754 as u64) + // Standard Error: 35 + .saturating_add(Weight::from_ref_time(1_722 as u64).saturating_mul(b as u64)) + // Standard Error: 370 + .saturating_add(Weight::from_ref_time(23_442 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(18_849_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 25_477 nanoseconds. + Weight::from_ref_time(25_478_243 as u64) + // Standard Error: 47 + .saturating_add(Weight::from_ref_time(1_346 as u64).saturating_mul(b as u64)) + // Standard Error: 493 + .saturating_add(Weight::from_ref_time(27_323 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) @@ -100,23 +112,29 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_204_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_037 nanoseconds. + Weight::from_ref_time(29_297_056 as u64) + // Standard Error: 180 + .saturating_add(Weight::from_ref_time(5_899 as u64).saturating_mul(b as u64)) + // Standard Error: 1_884 + .saturating_add(Weight::from_ref_time(33_511 as u64).saturating_mul(m as u64)) + // Standard Error: 1_860 + .saturating_add(Weight::from_ref_time(195_416 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) + /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - Weight::from_ref_time(30_941_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 35_284 nanoseconds. + Weight::from_ref_time(38_865_202 as u64) + // Standard Error: 2_322 + .saturating_add(Weight::from_ref_time(53_753 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -124,12 +142,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(32_485_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 35_773 nanoseconds. + Weight::from_ref_time(36_651_208 as u64) + // Standard Error: 1_142 + .saturating_add(Weight::from_ref_time(27_095 as u64).saturating_mul(m as u64)) + // Standard Error: 1_114 + .saturating_add(Weight::from_ref_time(169_747 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -137,14 +158,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_487_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 46_290 nanoseconds. + Weight::from_ref_time(46_176_864 as u64) + // Standard Error: 147 + .saturating_add(Weight::from_ref_time(2_318 as u64).saturating_mul(b as u64)) + // Standard Error: 1_560 + .saturating_add(Weight::from_ref_time(31_428 as u64).saturating_mul(m as u64)) + // Standard Error: 1_520 + .saturating_add(Weight::from_ref_time(171_822 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -153,12 +178,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Prime (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_494_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 38_421 nanoseconds. + Weight::from_ref_time(40_586_165 as u64) + // Standard Error: 1_853 + .saturating_add(Weight::from_ref_time(20_063 as u64).saturating_mul(m as u64)) + // Standard Error: 1_807 + .saturating_add(Weight::from_ref_time(151_494 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -167,24 +195,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Prime (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_566_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 48_281 nanoseconds. + Weight::from_ref_time(47_511_499 as u64) + // Standard Error: 135 + .saturating_add(Weight::from_ref_time(1_900 as u64).saturating_mul(b as u64)) + // Standard Error: 1_429 + .saturating_add(Weight::from_ref_time(37_612 as u64).saturating_mul(m as u64)) + // Standard Error: 1_393 + .saturating_add(Weight::from_ref_time(180_682 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(20_159_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 22_534 nanoseconds. + Weight::from_ref_time(25_722_688 as u64) + // Standard Error: 1_622 + .saturating_add(Weight::from_ref_time(166_308 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -194,38 +228,46 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Council Members (r:1 w:1) // Storage: Council Proposals (r:1 w:0) - // Storage: Council Voting (r:100 w:100) // Storage: Council Prime (r:0 w:1) - fn set_members(m: u32, n: u32, p: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(10_280_000 as u64).saturating_mul(m as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(126_000 as u64).saturating_mul(n as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(13_310_000 as u64).saturating_mul(p as u64)) + // Storage: Council Voting (r:100 w:100) + /// The range of component `m` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `p` is `[0, 100]`. + fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { + // Minimum execution time: 18_852 nanoseconds. + Weight::from_ref_time(19_138_000 as u64) + // Standard Error: 65_564 + .saturating_add(Weight::from_ref_time(5_276_957 as u64).saturating_mul(m as u64)) + // Standard Error: 65_564 + .saturating_add(Weight::from_ref_time(7_655_866 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(p as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } // Storage: Council Members (r:1 w:0) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(16_819_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 23_081 nanoseconds. + Weight::from_ref_time(22_608_754 as u64) + // Standard Error: 35 + .saturating_add(Weight::from_ref_time(1_722 as u64).saturating_mul(b as u64)) + // Standard Error: 370 + .saturating_add(Weight::from_ref_time(23_442 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:0) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - Weight::from_ref_time(18_849_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 25_477 nanoseconds. + Weight::from_ref_time(25_478_243 as u64) + // Standard Error: 47 + .saturating_add(Weight::from_ref_time(1_346 as u64).saturating_mul(b as u64)) + // Standard Error: 493 + .saturating_add(Weight::from_ref_time(27_323 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) @@ -233,23 +275,29 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalCount (r:1 w:1) // Storage: Council Voting (r:0 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(22_204_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(8_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(49_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_037 nanoseconds. + Weight::from_ref_time(29_297_056 as u64) + // Standard Error: 180 + .saturating_add(Weight::from_ref_time(5_899 as u64).saturating_mul(b as u64)) + // Standard Error: 1_884 + .saturating_add(Weight::from_ref_time(33_511 as u64).saturating_mul(m as u64)) + // Standard Error: 1_860 + .saturating_add(Weight::from_ref_time(195_416 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Council Members (r:1 w:0) // Storage: Council Voting (r:1 w:1) + /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - Weight::from_ref_time(30_941_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 35_284 nanoseconds. + Weight::from_ref_time(38_865_202 as u64) + // Standard Error: 2_322 + .saturating_add(Weight::from_ref_time(53_753 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -257,12 +305,15 @@ impl WeightInfo for () { // Storage: Council Members (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(32_485_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(39_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 35_773 nanoseconds. + Weight::from_ref_time(36_651_208 as u64) + // Standard Error: 1_142 + .saturating_add(Weight::from_ref_time(27_095 as u64).saturating_mul(m as u64)) + // Standard Error: 1_114 + .saturating_add(Weight::from_ref_time(169_747 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -270,14 +321,18 @@ impl WeightInfo for () { // Storage: Council Members (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_487_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(66_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(157_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 46_290 nanoseconds. + Weight::from_ref_time(46_176_864 as u64) + // Standard Error: 147 + .saturating_add(Weight::from_ref_time(2_318 as u64).saturating_mul(b as u64)) + // Standard Error: 1_560 + .saturating_add(Weight::from_ref_time(31_428 as u64).saturating_mul(m as u64)) + // Standard Error: 1_520 + .saturating_add(Weight::from_ref_time(171_822 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -286,12 +341,15 @@ impl WeightInfo for () { // Storage: Council Prime (r:1 w:0) // Storage: Council Proposals (r:1 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_494_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(58_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(124_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 38_421 nanoseconds. + Weight::from_ref_time(40_586_165 as u64) + // Standard Error: 1_853 + .saturating_add(Weight::from_ref_time(20_063 as u64).saturating_mul(m as u64)) + // Standard Error: 1_807 + .saturating_add(Weight::from_ref_time(151_494 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -300,24 +358,30 @@ impl WeightInfo for () { // Storage: Council Prime (r:1 w:0) // Storage: Council ProposalOf (r:1 w:1) // Storage: Council Proposals (r:1 w:1) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_566_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(63_000 as u64).saturating_mul(m as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 48_281 nanoseconds. + Weight::from_ref_time(47_511_499 as u64) + // Standard Error: 135 + .saturating_add(Weight::from_ref_time(1_900 as u64).saturating_mul(b as u64)) + // Standard Error: 1_429 + .saturating_add(Weight::from_ref_time(37_612 as u64).saturating_mul(m as u64)) + // Standard Error: 1_393 + .saturating_add(Weight::from_ref_time(180_682 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Council Proposals (r:1 w:1) // Storage: Council Voting (r:0 w:1) // Storage: Council ProposalOf (r:0 w:1) + /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(20_159_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 22_534 nanoseconds. + Weight::from_ref_time(25_722_688 as u64) + // Standard Error: 1_622 + .saturating_add(Weight::from_ref_time(166_308 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } From 87f3fdea8f227d33322c439d45a9e1796637e972 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Thu, 3 Nov 2022 17:57:10 +0100 Subject: [PATCH 307/348] BlockId removal: refactor: Backend::block_indexed_body (#12609) * BlockId removal: refactor: Backend::block_indexed_body It changes the arguments of `Backend::block_indexed_body` method from: `BlockId` to: `&Block::Hash` This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) * trigger CI job --- client/api/src/client.rs | 6 ++---- client/api/src/in_mem.rs | 2 +- client/db/src/lib.rs | 9 +++++++-- client/network/sync/src/block_request_handler.rs | 2 +- client/service/src/client/client.rs | 16 +++++++++++++--- primitives/blockchain/src/backend.rs | 2 +- 6 files changed, 25 insertions(+), 12 deletions(-) diff --git a/client/api/src/client.rs b/client/api/src/client.rs index d0053afeba97b..a07198abfe8d6 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -118,10 +118,8 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn block_indexed_body( - &self, - id: &BlockId, - ) -> sp_blockchain::Result>>>; + fn block_indexed_body(&self, hash: &Block::Hash) + -> sp_blockchain::Result>>>; /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 6f33695fe4bf4..26364f28acca2 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -451,7 +451,7 @@ impl blockchain::Backend for Blockchain { fn block_indexed_body( &self, - _id: BlockId, + _hash: &Block::Hash, ) -> sp_blockchain::Result>>> { unimplemented!("Not supported by the in-mem backend.") } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index af45e7c961fd3..0138df36a38fb 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -694,8 +694,13 @@ impl sc_client_api::blockchain::Backend for BlockchainDb) -> ClientResult>>> { - let body = match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, id)? { + fn block_indexed_body(&self, hash: &Block::Hash) -> ClientResult>>> { + let body = match read_db( + &*self.db, + columns::KEY_LOOKUP, + columns::BODY_INDEX, + BlockId::::Hash(*hash), + )? { Some(body) => body, None => return Ok(None), }; diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 08b8cffa38714..5eba1d52dc68c 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -374,7 +374,7 @@ where }; let indexed_body = if get_indexed_body { - match self.client.block_indexed_body(&BlockId::Hash(hash))? { + match self.client.block_indexed_body(&hash)? { Some(transactions) => transactions, None => { log::trace!( diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 6d463f337aabc..a4890f2fcf06f 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1980,9 +1980,9 @@ where fn block_indexed_body( &self, - id: &BlockId, + hash: &Block::Hash, ) -> sp_blockchain::Result>>> { - self.backend.blockchain().block_indexed_body(*id) + self.backend.blockchain().block_indexed_body(hash) } fn requires_full_sync(&self) -> bool { @@ -2073,9 +2073,19 @@ where &self, number: NumberFor, ) -> Result>>, sp_transaction_storage_proof::Error> { + let hash = match self + .backend + .blockchain() + .block_hash_from_id(&BlockId::Number(number)) + .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e)))? + { + Some(hash) => hash, + None => return Ok(None), + }; + self.backend .blockchain() - .block_indexed_body(BlockId::number(number)) + .block_indexed_body(&hash) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index e5764354e90f3..fdb56020661b4 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -238,7 +238,7 @@ pub trait Backend: Ok(self.indexed_transaction(hash)?.is_some()) } - fn block_indexed_body(&self, id: BlockId) -> Result>>>; + fn block_indexed_body(&self, hash: &Block::Hash) -> Result>>>; } /// Blockchain info From 564cdeb9026a556133cab6ff20a6f7426e24b38e Mon Sep 17 00:00:00 2001 From: dharjeezy Date: Thu, 3 Nov 2022 20:51:18 +0100 Subject: [PATCH 308/348] Introduce DefensiveMin and DefensiveMax (#12554) * traits for defensive min and defensive max * defensive min and strict min with tests * defensive max and strict max with tests * include docs * implement partial ord on defensive min and max * Update frame/support/src/traits/misc.rs Co-authored-by: Oliver Tale-Yazdi * wrap lines * Fix traits Signed-off-by: Oliver Tale-Yazdi * Update frame/support/src/traits/misc.rs * Update frame/support/src/traits/misc.rs Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> --- frame/support/src/traits.rs | 10 +- frame/support/src/traits/misc.rs | 176 +++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+), 5 deletions(-) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 2b6c5efee10bb..f09b715a970ad 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -56,11 +56,11 @@ mod misc; pub use misc::{ defensive_prelude::{self, *}, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, - ConstU32, ConstU64, ConstU8, DefensiveSaturating, DefensiveTruncateFrom, - EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, - GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, - OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, TryCollect, TryDrop, TypedGet, - UnixTime, WrapperKeepOpaque, WrapperOpaque, + ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, + DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsSubType, IsType, + Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, SameOrOther, Time, + TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 1297956b2f64e..ce8faaaf37c3d 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -407,6 +407,134 @@ where } } +/// Defensively calculates the minimum of two values. +/// +/// Can be used in contexts where we assume the receiver value to be (strictly) smaller. +pub trait DefensiveMin { + /// Returns the minimum and defensively checks that `self` is not larger than `other`. + /// + /// # Example + /// + /// ``` + /// use frame_support::traits::DefensiveMin; + /// // min(3, 4) is 3. + /// assert_eq!(3, 3_u32.defensive_min(4_u32)); + /// // min(4, 4) is 4. + /// assert_eq!(4, 4_u32.defensive_min(4_u32)); + /// ``` + /// + /// ```should_panic + /// use frame_support::traits::DefensiveMin; + /// // min(4, 3) panics. + /// 4_u32.defensive_min(3_u32); + /// ``` + fn defensive_min(self, other: T) -> Self; + + /// Returns the minimum and defensively checks that `self` is smaller than `other`. + /// + /// # Example + /// + /// ``` + /// use frame_support::traits::DefensiveMin; + /// // min(3, 4) is 3. + /// assert_eq!(3, 3_u32.defensive_strict_min(4_u32)); + /// ``` + /// + /// ```should_panic + /// use frame_support::traits::DefensiveMin; + /// // min(4, 4) panics. + /// 4_u32.defensive_strict_min(4_u32); + /// ``` + fn defensive_strict_min(self, other: T) -> Self; +} + +impl DefensiveMin for T +where + T: sp_std::cmp::PartialOrd, +{ + fn defensive_min(self, other: T) -> Self { + if self <= other { + self + } else { + defensive!("DefensiveMin"); + other + } + } + + fn defensive_strict_min(self, other: T) -> Self { + if self < other { + self + } else { + defensive!("DefensiveMin strict"); + other + } + } +} + +/// Defensively calculates the maximum of two values. +/// +/// Can be used in contexts where we assume the receiver value to be (strictly) larger. +pub trait DefensiveMax { + /// Returns the maximum and defensively asserts that `other` is not larger than `self`. + /// + /// # Example + /// + /// ``` + /// use frame_support::traits::DefensiveMax; + /// // max(4, 3) is 4. + /// assert_eq!(4, 4_u32.defensive_max(3_u32)); + /// // max(4, 4) is 4. + /// assert_eq!(4, 4_u32.defensive_max(4_u32)); + /// ``` + /// + /// ```should_panic + /// use frame_support::traits::DefensiveMax; + /// // max(4, 5) panics. + /// 4_u32.defensive_max(5_u32); + /// ``` + fn defensive_max(self, other: T) -> Self; + + /// Returns the maximum and defensively asserts that `other` is smaller than `self`. + /// + /// # Example + /// + /// ``` + /// use frame_support::traits::DefensiveMax; + /// // y(4, 3) is 4. + /// assert_eq!(4, 4_u32.defensive_strict_max(3_u32)); + /// ``` + /// + /// ```should_panic + /// use frame_support::traits::DefensiveMax; + /// // max(4, 4) panics. + /// 4_u32.defensive_strict_max(4_u32); + /// ``` + fn defensive_strict_max(self, other: T) -> Self; +} + +impl DefensiveMax for T +where + T: sp_std::cmp::PartialOrd, +{ + fn defensive_max(self, other: T) -> Self { + if self >= other { + self + } else { + defensive!("DefensiveMax"); + other + } + } + + fn defensive_strict_max(self, other: T) -> Self { + if self > other { + self + } else { + defensive!("DefensiveMax strict"); + other + } + } +} + /// Anything that can have a `::len()` method. pub trait Len { /// Return the length of data type. @@ -1109,4 +1237,52 @@ mod test { let data = decoded.encode(); WrapperOpaque::::decode(&mut &data[..]).unwrap(); } + + #[test] + fn defensive_min_works() { + assert_eq!(10, 10_u32.defensive_min(11_u32)); + assert_eq!(10, 10_u32.defensive_min(10_u32)); + } + + #[test] + #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMin\"")] + fn defensive_min_panics() { + 10_u32.defensive_min(9_u32); + } + + #[test] + fn defensive_strict_min_works() { + assert_eq!(10, 10_u32.defensive_strict_min(11_u32)); + assert_eq!(9, 9_u32.defensive_strict_min(10_u32)); + } + + #[test] + #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMin strict\"")] + fn defensive_strict_min_panics() { + 9_u32.defensive_strict_min(9_u32); + } + + #[test] + fn defensive_max_works() { + assert_eq!(11, 11_u32.defensive_max(10_u32)); + assert_eq!(10, 10_u32.defensive_max(10_u32)); + } + + #[test] + #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMax\"")] + fn defensive_max_panics() { + 9_u32.defensive_max(10_u32); + } + + #[test] + fn defensive_strict_max_works() { + assert_eq!(11, 11_u32.defensive_strict_max(10_u32)); + assert_eq!(10, 10_u32.defensive_strict_max(9_u32)); + } + + #[test] + #[should_panic(expected = "Defensive failure has been triggered!: \"DefensiveMax strict\"")] + fn defensive_strict_max_panics() { + 9_u32.defensive_strict_max(9_u32); + } } From b2a914c09739478c63d318f6b294901a3290e548 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Fri, 4 Nov 2022 07:08:24 +0800 Subject: [PATCH 309/348] pallet-sudo: add `CheckOnlySudoAccount` signed extension (#12496) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * pallet-sudo: add `CheckSudoKey` signed extension Signed-off-by: koushiro * Rename CheckSudoKey => CheckOnlySudo Signed-off-by: koushiro * Rename extension name and Add some docs * Apply review suggestions * Update frame/sudo/src/extension.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> * Update frame/sudo/src/extension.rs Signed-off-by: koushiro Co-authored-by: Bastian Köcher Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> --- frame/sudo/src/extension.rs | 107 ++++++++++++++++++++++++++++++++++++ frame/sudo/src/lib.rs | 11 +++- 2 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 frame/sudo/src/extension.rs diff --git a/frame/sudo/src/extension.rs b/frame/sudo/src/extension.rs new file mode 100644 index 0000000000000..068fa2ed928d5 --- /dev/null +++ b/frame/sudo/src/extension.rs @@ -0,0 +1,107 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Config, Pallet}; +use codec::{Decode, Encode}; +use frame_support::{dispatch::DispatchInfo, ensure}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, Dispatchable, SignedExtension}, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, + UnknownTransaction, ValidTransaction, + }, +}; +use sp_std::{fmt, marker::PhantomData}; + +/// Ensure that signed transactions are only valid if they are signed by sudo account. +/// +/// In the initial phase of a chain without any tokens you can not prevent accounts from sending +/// transactions. +/// These transactions would enter the transaction pool as the succeed the validation, but would +/// fail on applying them as they are not allowed/disabled/whatever. This would be some huge dos +/// vector to any kind of chain. This extension solves the dos vector by preventing any kind of +/// transaction entering the pool as long as it is not signed by the sudo account. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct CheckOnlySudoAccount(PhantomData); + +impl Default for CheckOnlySudoAccount { + fn default() -> Self { + Self(Default::default()) + } +} + +impl fmt::Debug for CheckOnlySudoAccount { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "CheckOnlySudoAccount") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } +} + +impl CheckOnlySudoAccount { + /// Creates new `SignedExtension` to check sudo key. + pub fn new() -> Self { + Self::default() + } +} + +impl SignedExtension for CheckOnlySudoAccount +where + ::RuntimeCall: Dispatchable, +{ + const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; + type AccountId = T::AccountId; + type Call = ::RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> Result { + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + let sudo_key: T::AccountId = >::key().ok_or(UnknownTransaction::CannotLookup)?; + ensure!(*who == sudo_key, InvalidTransaction::BadSigner); + + Ok(ValidTransaction { + priority: info.weight.ref_time() as TransactionPriority, + ..Default::default() + }) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info, len).map(|_| ()) + } +} diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 75d15d23c680a..c18ced8911193 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -59,7 +59,7 @@ //! use frame_system::pallet_prelude::*; //! //! #[pallet::pallet] -//! pub struct Pallet(_); +//! pub struct Pallet(PhantomData); //! //! #[pallet::config] //! pub trait Config: frame_system::Config {} @@ -79,6 +79,13 @@ //! # fn main() {} //! ``` //! +//! ### Signed Extension +//! +//! The Sudo pallet defines the following extension: +//! +//! - [`CheckOnlySudoAccount`]: Ensures that the signed transactions are only valid if they are +//! signed by sudo account. +//! //! ## Genesis Config //! //! The Sudo pallet depends on the [`GenesisConfig`]. @@ -97,11 +104,13 @@ use sp_std::prelude::*; use frame_support::{dispatch::GetDispatchInfo, traits::UnfilteredDispatchable}; +mod extension; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +pub use extension::CheckOnlySudoAccount; pub use pallet::*; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; From 6bd2eac1c230bfed1e58a8e2bad03d5bacf54b60 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Fri, 4 Nov 2022 18:13:57 +0100 Subject: [PATCH 310/348] Move Throughput into `sc-sysinfo` (#12368) * move Throughput to sc-sysinfo * replace u64 * fix in tests * change Throughput * refactored Throughput * fixes * moved tests & fixes * custom serializer * note * fix serializer * forgot to remove * deserialize * functioning deserialization :) * try to make clipply happy * Serialize as function * test HwBench * rename * fix serialization * deserialize as function * unused import * move serialize/deserialize * don't serialize none * remove nonsense * remove nonsense comment :P * fixes * remove all the todos * return enum * fixes * fix nit * improve docs & readability * Update client/sysinfo/src/sysinfo.rs Co-authored-by: Oliver Tale-Yazdi * fix all the nits * rename * fix * Update client/sysinfo/src/sysinfo.rs Co-authored-by: Oliver Tale-Yazdi * remove unit from serialization * Update utils/frame/benchmarking-cli/src/machine/hardware.rs Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 2 + client/sysinfo/Cargo.toml | 3 + client/sysinfo/src/lib.rs | 19 +- client/sysinfo/src/sysinfo.rs | 187 +++++++++++++++--- utils/frame/benchmarking-cli/Cargo.toml | 1 + .../benchmarking-cli/src/machine/hardware.rs | 129 ++++-------- .../frame/benchmarking-cli/src/machine/mod.rs | 19 +- .../src/machine/reference_hardware.json | 20 +- 8 files changed, 236 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b87f45508dfc9..48af2ffe4d88f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2216,6 +2216,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", + "sp-std", "sp-storage", "sp-trie", "tempfile", @@ -8796,6 +8797,7 @@ dependencies = [ "serde_json", "sp-core", "sp-io", + "sp-runtime", "sp-std", ] diff --git a/client/sysinfo/Cargo.toml b/client/sysinfo/Cargo.toml index 1e96f69a92dfe..882cbd96c1c5f 100644 --- a/client/sysinfo/Cargo.toml +++ b/client/sysinfo/Cargo.toml @@ -26,3 +26,6 @@ sc-telemetry = { version = "4.0.0-dev", path = "../telemetry" } sp-core = { version = "6.0.0", path = "../../primitives/core" } sp-io = { version = "6.0.0", path = "../../primitives/io" } sp-std = { version = "4.0.0", path = "../../primitives/std" } + +[dev-dependencies] +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } diff --git a/client/sysinfo/src/lib.rs b/client/sysinfo/src/lib.rs index be63fefe9ecd1..372c7c1a6d219 100644 --- a/client/sysinfo/src/lib.rs +++ b/client/sysinfo/src/lib.rs @@ -29,6 +29,7 @@ mod sysinfo_linux; pub use sysinfo::{ benchmark_cpu, benchmark_disk_random_writes, benchmark_disk_sequential_writes, benchmark_memory, benchmark_sr25519_verify, gather_hwbench, gather_sysinfo, + serialize_throughput, serialize_throughput_option, Throughput, }; /// The operating system part of the current target triplet. @@ -44,13 +45,23 @@ pub const TARGET_ENV: &str = include_str!(concat!(env!("OUT_DIR"), "/target_env. #[derive(Clone, Debug, serde::Serialize)] pub struct HwBench { /// The CPU speed, as measured in how many MB/s it can hash using the BLAKE2b-256 hash. - pub cpu_hashrate_score: u64, + #[serde(serialize_with = "serialize_throughput")] + pub cpu_hashrate_score: Throughput, /// Memory bandwidth in MB/s, calculated by measuring the throughput of `memcpy`. - pub memory_memcpy_score: u64, + #[serde(serialize_with = "serialize_throughput")] + pub memory_memcpy_score: Throughput, /// Sequential disk write speed in MB/s. - pub disk_sequential_write_score: Option, + #[serde( + serialize_with = "serialize_throughput_option", + skip_serializing_if = "Option::is_none" + )] + pub disk_sequential_write_score: Option, /// Random disk write speed in MB/s. - pub disk_random_write_score: Option, + #[serde( + serialize_with = "serialize_throughput_option", + skip_serializing_if = "Option::is_none" + )] + pub disk_random_write_score: Option, } /// Limit the execution time of a benchmark. diff --git a/client/sysinfo/src/sysinfo.rs b/client/sysinfo/src/sysinfo.rs index fc347c1cc2eb3..c66a6f6a62aed 100644 --- a/client/sysinfo/src/sysinfo.rs +++ b/client/sysinfo/src/sysinfo.rs @@ -21,9 +21,10 @@ use crate::{ExecutionLimit, HwBench}; use sc_telemetry::SysInfo; use sp_core::{sr25519, Pair}; use sp_io::crypto::sr25519_verify; -use sp_std::prelude::*; +use sp_std::{fmt, prelude::*}; use rand::{seq::SliceRandom, Rng, RngCore}; +use serde::Serializer; use std::{ fs::File, io::{Seek, SeekFrom, Write}, @@ -32,6 +33,110 @@ use std::{ time::{Duration, Instant}, }; +/// The unit in which the [`Throughput`] (bytes per second) is denoted. +pub enum Unit { + GiBs, + MiBs, + KiBs, +} + +impl fmt::Display for Unit { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + Unit::GiBs => "GiBs", + Unit::MiBs => "MiBs", + Unit::KiBs => "KiBs", + }) + } +} + +/// Throughput as measured in bytes per second. +#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] +pub struct Throughput(f64); + +const KIBIBYTE: f64 = (1 << 10) as f64; +const MEBIBYTE: f64 = (1 << 20) as f64; +const GIBIBYTE: f64 = (1 << 30) as f64; + +impl Throughput { + /// Construct [`Self`] from kibibyte/s. + pub fn from_kibs(kibs: f64) -> Throughput { + Throughput(kibs * KIBIBYTE) + } + + /// Construct [`Self`] from mebibyte/s. + pub fn from_mibs(mibs: f64) -> Throughput { + Throughput(mibs * MEBIBYTE) + } + + /// Construct [`Self`] from gibibyte/s. + pub fn from_gibs(gibs: f64) -> Throughput { + Throughput(gibs * GIBIBYTE) + } + + /// [`Self`] as number of byte/s. + pub fn as_bytes(&self) -> f64 { + self.0 + } + + /// [`Self`] as number of kibibyte/s. + pub fn as_kibs(&self) -> f64 { + self.0 / KIBIBYTE + } + + /// [`Self`] as number of mebibyte/s. + pub fn as_mibs(&self) -> f64 { + self.0 / MEBIBYTE + } + + /// [`Self`] as number of gibibyte/s. + pub fn as_gibs(&self) -> f64 { + self.0 / GIBIBYTE + } + + /// Normalizes [`Self`] to use the largest unit possible. + pub fn normalize(&self) -> (f64, Unit) { + let bs = self.0; + + if bs >= GIBIBYTE { + (self.as_gibs(), Unit::GiBs) + } else if bs >= MEBIBYTE { + (self.as_mibs(), Unit::MiBs) + } else { + (self.as_kibs(), Unit::KiBs) + } + } +} + +impl fmt::Display for Throughput { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let (value, unit) = self.normalize(); + write!(f, "{:.2?} {}", value, unit) + } +} + +/// Serializes `Throughput` and uses MiBs as the unit. +pub fn serialize_throughput(throughput: &Throughput, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_u64(throughput.as_mibs() as u64) +} + +/// Serializes `Option` and uses MiBs as the unit. +pub fn serialize_throughput_option( + maybe_throughput: &Option, + serializer: S, +) -> Result +where + S: Serializer, +{ + if let Some(throughput) = maybe_throughput { + return serializer.serialize_some(&(throughput.as_mibs() as u64)) + } + serializer.serialize_none() +} + #[inline(always)] pub(crate) fn benchmark( name: &str, @@ -39,7 +144,7 @@ pub(crate) fn benchmark( max_iterations: usize, max_duration: Duration, mut run: impl FnMut() -> Result<(), E>, -) -> Result { +) -> Result { // Run the benchmark once as a warmup to get the code into the L1 cache. run()?; @@ -58,9 +163,9 @@ pub(crate) fn benchmark( } } - let score = ((size * count) as f64 / elapsed.as_secs_f64()) / (1024.0 * 1024.0); + let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0)); log::trace!( - "Calculated {} of {:.2}MB/s in {} iterations in {}ms", + "Calculated {} of {} in {} iterations in {}ms", name, score, count, @@ -120,14 +225,14 @@ fn clobber_value(input: &mut T) { pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit = ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) }; -// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in MB/s. -pub fn benchmark_cpu(limit: ExecutionLimit) -> f64 { +// This benchmarks the CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per second. +pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput { // In general the results of this benchmark are somewhat sensitive to how much - // data we hash at the time. The smaller this is the *less* MB/s we can hash, - // the bigger this is the *more* MB/s we can hash, up until a certain point + // data we hash at the time. The smaller this is the *less* B/s we can hash, + // the bigger this is the *more* B/s we can hash, up until a certain point // where we can achieve roughly ~100% of what the hasher can do. If we'd plot // this on a graph with the number of bytes we want to hash on the X axis - // and the speed in MB/s on the Y axis then we'd essentially see it grow + // and the speed in B/s on the Y axis then we'd essentially see it grow // logarithmically. // // In practice however we might not always have enough data to hit the maximum @@ -156,12 +261,12 @@ pub fn benchmark_cpu(limit: ExecutionLimit) -> f64 { pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit = ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) }; -// This benchmarks the effective `memcpy` memory bandwidth available in MB/s. +// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second. // // It doesn't technically measure the absolute maximum memory bandwidth available, // but that's fine, because real code most of the time isn't optimized to take // advantage of the full memory bandwidth either. -pub fn benchmark_memory(limit: ExecutionLimit) -> f64 { +pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput { // Ideally this should be at least as big as the CPU's L3 cache, // and it should be big enough so that the `memcpy` takes enough // time to be actually measurable. @@ -253,7 +358,7 @@ pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit = pub fn benchmark_disk_sequential_writes( limit: ExecutionLimit, directory: &Path, -) -> Result { +) -> Result { const SIZE: usize = 64 * 1024 * 1024; let buffer = random_data(SIZE); @@ -295,7 +400,7 @@ pub fn benchmark_disk_sequential_writes( pub fn benchmark_disk_random_writes( limit: ExecutionLimit, directory: &Path, -) -> Result { +) -> Result { const SIZE: usize = 64 * 1024 * 1024; let buffer = random_data(SIZE); @@ -360,9 +465,9 @@ pub fn benchmark_disk_random_writes( /// Benchmarks the verification speed of sr25519 signatures. /// -/// Returns the throughput in MB/s by convention. +/// Returns the throughput in B/s by convention. /// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s. -pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> f64 { +pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput { const INPUT_SIZE: usize = 32; const ITERATION_SIZE: usize = 2048; let pair = sr25519::Pair::from_string("//Alice", None).unwrap(); @@ -402,8 +507,8 @@ pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> f64 { pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { #[allow(unused_mut)] let mut hwbench = HwBench { - cpu_hashrate_score: benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) as u64, - memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) as u64, + cpu_hashrate_score: benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT), + memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT), disk_sequential_write_score: None, disk_random_write_score: None, }; @@ -412,7 +517,7 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { hwbench.disk_sequential_write_score = match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) { - Ok(score) => Some(score as u64), + Ok(score) => Some(score), Err(error) => { log::warn!("Failed to run the sequential write disk benchmark: {}", error); None @@ -421,7 +526,7 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { hwbench.disk_random_write_score = match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) { - Ok(score) => Some(score as u64), + Ok(score) => Some(score), Err(error) => { log::warn!("Failed to run the random write disk benchmark: {}", error); None @@ -435,6 +540,7 @@ pub fn gather_hwbench(scratch_directory: Option<&Path>) -> HwBench { #[cfg(test)] mod tests { use super::*; + use sp_runtime::assert_eq_error_rate_float; #[cfg(target_os = "linux")] #[test] @@ -450,19 +556,19 @@ mod tests { #[test] fn test_benchmark_cpu() { - assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > 0.0); + assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0)); } #[test] fn test_benchmark_memory() { - assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > 0.0); + assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0)); } #[test] fn test_benchmark_disk_sequential_writes() { assert!( benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - 0.0 + Throughput::from_mibs(0.0) ); } @@ -470,12 +576,45 @@ mod tests { fn test_benchmark_disk_random_writes() { assert!( benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap() > - 0.0 + Throughput::from_mibs(0.0) ); } #[test] fn test_benchmark_sr25519_verify() { - assert!(benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > 0.0); + assert!( + benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0) + ); + } + + /// Test the [`Throughput`]. + #[test] + fn throughput_works() { + /// Float precision. + const EPS: f64 = 0.1; + let gib = Throughput::from_gibs(14.324); + + assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS); + assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS); + assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS); + assert_eq!("14.32 GiBs", gib.to_string()); + + let mib = Throughput::from_mibs(1029.0); + assert_eq!("1.00 GiBs", mib.to_string()); + } + + /// Test the [`HwBench`] serialization. + #[test] + fn hwbench_serialize_works() { + let hwbench = HwBench { + cpu_hashrate_score: Throughput::from_gibs(1.32), + memory_memcpy_score: Throughput::from_kibs(9342.432), + disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)), + disk_random_write_score: None, + }; + + let serialized = serde_json::to_string(&hwbench).unwrap(); + // Throughput from all of the benchmarks should be converted to MiBs. + assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}"); } } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index ea908bf0336bf..a2d548f1fa5cd 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -54,6 +54,7 @@ sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-keystore = { version = "0.12.0", path = "../../../primitives/keystore" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machine" } +sp-std = { version = "4.0.0", path = "../../../primitives/std" } sp-storage = { version = "6.0.0", path = "../../../primitives/storage" } sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } gethostname = "0.2.3" diff --git a/utils/frame/benchmarking-cli/src/machine/hardware.rs b/utils/frame/benchmarking-cli/src/machine/hardware.rs index 97960de99c4bf..50c88ec74646c 100644 --- a/utils/frame/benchmarking-cli/src/machine/hardware.rs +++ b/utils/frame/benchmarking-cli/src/machine/hardware.rs @@ -18,8 +18,40 @@ //! Contains types to define hardware requirements. use lazy_static::lazy_static; -use serde::{Deserialize, Serialize}; -use std::fmt; +use sc_sysinfo::Throughput; +use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; +use sp_std::{fmt, fmt::Formatter}; + +/// Serializes throughput into MiBs and represents it as `f64`. +fn serialize_throughput_as_f64(throughput: &Throughput, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_f64(throughput.as_mibs()) +} + +struct ThroughputVisitor; +impl<'de> Visitor<'de> for ThroughputVisitor { + type Value = Throughput; + + fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { + formatter.write_str("A value that is a f64.") + } + + fn visit_f64(self, value: f64) -> Result + where + E: serde::de::Error, + { + Ok(Throughput::from_mibs(value)) + } +} + +fn deserialize_throughput<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + Ok(deserializer.deserialize_f64(ThroughputVisitor))? +} lazy_static! { /// The hardware requirements as measured on reference hardware. @@ -45,6 +77,10 @@ pub struct Requirement { /// The metric to measure. pub metric: Metric, /// The minimal throughput that needs to be archived for this requirement. + #[serde( + serialize_with = "serialize_throughput_as_f64", + deserialize_with = "deserialize_throughput" + )] pub minimum: Throughput, } @@ -65,17 +101,6 @@ pub enum Metric { DiskRndWrite, } -/// Throughput as measured in bytes per second. -#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)] -pub enum Throughput { - /// KiB/s - KiBs(f64), - /// MiB/s - MiBs(f64), - /// GiB/s - GiBs(f64), -} - impl Metric { /// The category of the metric. pub fn category(&self) -> &'static str { @@ -98,70 +123,9 @@ impl Metric { } } -const KIBIBYTE: f64 = 1024.0; - -impl Throughput { - /// The unit of the metric. - pub fn unit(&self) -> &'static str { - match self { - Self::KiBs(_) => "KiB/s", - Self::MiBs(_) => "MiB/s", - Self::GiBs(_) => "GiB/s", - } - } - - /// [`Self`] as number of byte/s. - pub fn to_bs(&self) -> f64 { - self.to_kibs() * KIBIBYTE - } - - /// [`Self`] as number of kibibyte/s. - pub fn to_kibs(&self) -> f64 { - self.to_mibs() * KIBIBYTE - } - - /// [`Self`] as number of mebibyte/s. - pub fn to_mibs(&self) -> f64 { - self.to_gibs() * KIBIBYTE - } - - /// [`Self`] as number of gibibyte/s. - pub fn to_gibs(&self) -> f64 { - match self { - Self::KiBs(k) => *k / (KIBIBYTE * KIBIBYTE), - Self::MiBs(m) => *m / KIBIBYTE, - Self::GiBs(g) => *g, - } - } - - /// Normalizes [`Self`] to use the larges unit possible. - pub fn normalize(&self) -> Self { - let bs = self.to_bs(); - - if bs >= KIBIBYTE * KIBIBYTE * KIBIBYTE { - Self::GiBs(self.to_gibs()) - } else if bs >= KIBIBYTE * KIBIBYTE { - Self::MiBs(self.to_mibs()) - } else { - Self::KiBs(self.to_kibs()) - } - } -} - -impl fmt::Display for Throughput { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let normalized = self.normalize(); - match normalized { - Self::KiBs(s) | Self::MiBs(s) | Self::GiBs(s) => - write!(f, "{:.2?} {}", s, normalized.unit()), - } - } -} - #[cfg(test)] mod tests { use super::*; - use sp_runtime::assert_eq_error_rate_float; /// `SUBSTRATE_REFERENCE_HARDWARE` can be en- and decoded. #[test] @@ -171,21 +135,4 @@ mod tests { assert_eq!(decoded, SUBSTRATE_REFERENCE_HARDWARE.clone()); } - - /// Test the [`Throughput`]. - #[test] - fn throughput_works() { - /// Float precision. - const EPS: f64 = 0.1; - let gib = Throughput::GiBs(14.324); - - assert_eq_error_rate_float!(14.324, gib.to_gibs(), EPS); - assert_eq_error_rate_float!(14667.776, gib.to_mibs(), EPS); - assert_eq_error_rate_float!(14667.776 * 1024.0, gib.to_kibs(), EPS); - assert_eq!("14.32 GiB/s", gib.to_string()); - assert_eq!("14.32 GiB/s", gib.normalize().to_string()); - - let mib = Throughput::MiBs(1029.0); - assert_eq!("1.00 GiB/s", mib.to_string()); - } } diff --git a/utils/frame/benchmarking-cli/src/machine/mod.rs b/utils/frame/benchmarking-cli/src/machine/mod.rs index a19db671f3cd1..82b4e5be7358e 100644 --- a/utils/frame/benchmarking-cli/src/machine/mod.rs +++ b/utils/frame/benchmarking-cli/src/machine/mod.rs @@ -30,11 +30,11 @@ use sc_cli::{CliConfiguration, Result, SharedParams}; use sc_service::Configuration; use sc_sysinfo::{ benchmark_cpu, benchmark_disk_random_writes, benchmark_disk_sequential_writes, - benchmark_memory, benchmark_sr25519_verify, ExecutionLimit, + benchmark_memory, benchmark_sr25519_verify, ExecutionLimit, Throughput, }; use crate::shared::check_build_profile; -pub use hardware::{Metric, Requirement, Requirements, Throughput, SUBSTRATE_REFERENCE_HARDWARE}; +pub use hardware::{Metric, Requirement, Requirements, SUBSTRATE_REFERENCE_HARDWARE}; /// Command to benchmark the hardware. /// @@ -128,8 +128,9 @@ impl MachineCmd { /// Benchmarks a specific metric of the hardware and judges the resulting score. fn run_benchmark(&self, requirement: &Requirement, dir: &Path) -> Result { // Dispatch the concrete function from `sc-sysinfo`. + let score = self.measure(&requirement.metric, dir)?; - let rel_score = score.to_bs() / requirement.minimum.to_bs(); + let rel_score = score.as_bytes() / requirement.minimum.as_bytes(); // Sanity check if the result is off by factor >100x. if rel_score >= 100.0 || rel_score <= 0.01 { @@ -147,13 +148,11 @@ impl MachineCmd { let memory_limit = ExecutionLimit::from_secs_f32(self.memory_duration); let score = match metric { - Metric::Blake2256 => Throughput::MiBs(benchmark_cpu(hash_limit) as f64), - Metric::Sr25519Verify => Throughput::MiBs(benchmark_sr25519_verify(verify_limit)), - Metric::MemCopy => Throughput::MiBs(benchmark_memory(memory_limit) as f64), - Metric::DiskSeqWrite => - Throughput::MiBs(benchmark_disk_sequential_writes(disk_limit, dir)? as f64), - Metric::DiskRndWrite => - Throughput::MiBs(benchmark_disk_random_writes(disk_limit, dir)? as f64), + Metric::Blake2256 => benchmark_cpu(hash_limit), + Metric::Sr25519Verify => benchmark_sr25519_verify(verify_limit), + Metric::MemCopy => benchmark_memory(memory_limit), + Metric::DiskSeqWrite => benchmark_disk_sequential_writes(disk_limit, dir)?, + Metric::DiskRndWrite => benchmark_disk_random_writes(disk_limit, dir)?, }; Ok(score) } diff --git a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json index 12645df8391e7..2a451d31403f1 100644 --- a/utils/frame/benchmarking-cli/src/machine/reference_hardware.json +++ b/utils/frame/benchmarking-cli/src/machine/reference_hardware.json @@ -1,32 +1,22 @@ [ { "metric": "Blake2256", - "minimum": { - "MiBs": 1029.0 - } + "minimum": 1029.0 }, { "metric": "Sr25519Verify", - "minimum": { - "KiBs": 666.0 - } + "minimum": 0.650391 }, { "metric": "MemCopy", - "minimum": { - "GiBs": 14.323 - } + "minimum": 14666.752 }, { "metric": "DiskSeqWrite", - "minimum": { - "MiBs": 450.0 - } + "minimum": 450.0 }, { "metric": "DiskRndWrite", - "minimum": { - "MiBs": 200.0 - } + "minimum": 200.0 } ] From 3cd6db7907bd0efc9e4299b168196d97651dfc25 Mon Sep 17 00:00:00 2001 From: shekohex Date: Sat, 5 Nov 2022 13:39:29 +0200 Subject: [PATCH 311/348] Bump `k256` from `0.10.4` to `0.11.4` (#12085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Bump `k256` from `0.10.4` to `0.11.4` * Update Cargo.lock * Update Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 75 +++++++++++++++++++++++----------------- frame/support/Cargo.toml | 2 +- 2 files changed, 44 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48af2ffe4d88f..510161e225732 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "constant_time_eq" @@ -1379,9 +1379,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.3.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" +checksum = "9f2b443d17d49dad5ef0ede301c3179cc923b8822f3393b4d2c28c269dd4a122" dependencies = [ "generic-array 0.14.4", "rand_core 0.6.2", @@ -1606,11 +1606,12 @@ dependencies = [ [[package]] name = "der" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", + "zeroize", ] [[package]] @@ -1801,9 +1802,9 @@ dependencies = [ [[package]] name = "ecdsa" -version = "0.13.4" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "85789ce7dfbd0f0624c07ef653a08bb2ebf43d3e16531361f46d36dd54334fed" dependencies = [ "der", "elliptic-curve", @@ -1856,13 +1857,14 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "elliptic-curve" -version = "0.11.12" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" dependencies = [ "base16ct", "crypto-bigint", "der", + "digest 0.10.3", "ff", "generic-array 0.14.4", "group", @@ -2032,9 +2034,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2958d04124b9f27f175eaeb9a9f383d026098aa837eadd8ba22c11f13a05b9e" +checksum = "df689201f395c6b90dfe87127685f8dbfc083a5e779e613575d8bd7314300c3e" dependencies = [ "rand_core 0.6.2", "subtle", @@ -2781,9 +2783,9 @@ dependencies = [ [[package]] name = "group" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" +checksum = "7391856def869c1c81063a03457c676fbcd419709c3dfb33d8d319de484b154d" dependencies = [ "ff", "rand_core 0.6.2", @@ -2903,6 +2905,15 @@ dependencies = [ "digest 0.9.0", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "hmac-drbg" version = "0.3.0" @@ -3333,14 +3344,14 @@ dependencies = [ [[package]] name = "k256" -version = "0.10.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "3636d281d46c3b64182eb3a0a42b7b483191a2ecc3f05301fa67403f7c9bc949" dependencies = [ "cfg-if 1.0.0", "ecdsa", "elliptic-curve", - "sec1", + "sha2 0.10.2", ] [[package]] @@ -6723,13 +6734,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", - "zeroize", ] [[package]] @@ -7378,12 +7388,12 @@ dependencies = [ [[package]] name = "rfc6979" -version = "0.1.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" +checksum = "88c86280f057430a52f4861551b092a01b419b8eacefc7c995eacb9dc132fe32" dependencies = [ "crypto-bigint", - "hmac 0.11.0", + "hmac 0.12.1", "zeroize", ] @@ -9004,10 +9014,11 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "sec1" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" +checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ + "base16ct", "der", "generic-array 0.14.4", "pkcs8", @@ -9264,11 +9275,11 @@ dependencies = [ [[package]] name = "signature" -version = "1.4.0" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +checksum = "deb766570a2825fa972bceff0d195727876a9cdf2460ab2e52d455dc2de47fd9" dependencies = [ - "digest 0.9.0", + "digest 0.10.3", "rand_core 0.6.2", ] @@ -10213,9 +10224,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.5.4" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ "base64ct", "der", @@ -12202,9 +12213,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index be9cb1f1bf316..5af1dc26c1b49 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -37,7 +37,7 @@ impl-trait-for-tuples = "0.2.2" smallvec = "1.8.0" log = { version = "0.4.17", default-features = false } sp-core-hashing-proc-macro = { version = "5.0.0", path = "../../primitives/core/hashing/proc-macro" } -k256 = { version = "0.10.4", default-features = false, features = ["ecdsa"] } +k256 = { version = "0.11.5", default-features = false, features = ["ecdsa"] } [dev-dependencies] serde_json = "1.0.85" From cafbb668d2e57ede83f11f59d8bf291aa8d0c765 Mon Sep 17 00:00:00 2001 From: cheme Date: Sat, 5 Nov 2022 23:26:12 +0100 Subject: [PATCH 312/348] Guard some invalid node for proof decoding. (#12417) * Guard some invalid node for proof decoding. * only forbid bitmap with no children. * change format * scale error. * small test Co-authored-by: parity-processbot <> --- primitives/trie/src/lib.rs | 11 +++++++++++ primitives/trie/src/node_codec.rs | 9 +++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index f40ed4d9b53a5..5634d96d58356 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -986,4 +986,15 @@ mod tests { assert_eq!(first_storage_root, second_storage_root); } + + #[test] + fn node_with_no_children_fail_decoding() { + let branch = NodeCodec::::branch_node_nibbled( + b"some_partial".iter().copied(), + 24, + vec![None; 16].into_iter(), + Some(trie_db::node::Value::Inline(b"value"[..].into())), + ); + assert!(NodeCodec::::decode(branch.as_slice()).is_err()); + } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 4b3e69adb7041..0202b1c6ba7d2 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -304,8 +304,13 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(mut data: &[u8]) -> Result { - Ok(Bitmap(u16::decode(&mut data)?)) + pub fn decode(data: &[u8]) -> Result { + let value = u16::decode(&mut &data[..])?; + if value == 0 { + Err("Bitmap without a child.".into()) + } else { + Ok(Bitmap(value)) + } } pub fn value_at(&self, i: usize) -> bool { From 30b7e62e3b00cc7b6eaaa9548c8936c3b8b6e3d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 10:17:53 +0000 Subject: [PATCH 313/348] Bump regex from 1.5.5 to 1.6.0 (#12117) Bumps [regex](https://github.com/rust-lang/regex) from 1.5.5 to 1.6.0. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.5.5...1.6.0) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: parity-processbot <> --- bin/node/cli/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/executor/Cargo.toml | 2 +- client/tracing/Cargo.toml | 2 +- primitives/core/Cargo.toml | 2 +- primitives/panic-handler/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 07208abdd089b..ffd40aac1caa0 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -125,7 +125,7 @@ tempfile = "3.1.0" assert_cmd = "2.0.2" nix = "0.23" serde_json = "1.0" -regex = "1.5.5" +regex = "1.6.0" platforms = "2.0" async-std = { version = "1.11.0", features = ["attributes"] } soketto = "0.7.1" diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 04824859ce8ab..f749b9b5b0c4a 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -23,7 +23,7 @@ log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.0.0" rand = "0.7.3" -regex = "1.5.5" +regex = "1.6.0" rpassword = "7.0.0" serde = "1.0.136" serde_json = "1.0.85" diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 264db0e89b1c8..bda2992392707 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -47,7 +47,7 @@ sp-maybe-compressed-blob = { version = "4.1.0-dev", path = "../../primitives/may sc-tracing = { version = "4.0.0-dev", path = "../tracing" } tracing-subscriber = "0.2.19" paste = "1.0" -regex = "1.5.5" +regex = "1.6.0" criterion = "0.3" env_logger = "0.9" num_cpus = "1.13.1" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 8bc5d770c9c5a..43fa2d4e52e8a 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -21,7 +21,7 @@ libc = "0.2.121" log = { version = "0.4.17" } once_cell = "1.8.0" parking_lot = "0.12.1" -regex = "1.5.5" +regex = "1.6.0" rustc-hash = "1.1.0" serde = "1.0.136" thiserror = "1.0.30" diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index dcb61b33f347f..12cc9e2f0b60f 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -30,7 +30,7 @@ base58 = { version = "0.2.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.4", optional = true } tiny-bip39 = { version = "0.8.2", optional = true } -regex = { version = "1.5.4", optional = true } +regex = { version = "1.6.0", optional = true } num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index bd429fd0e8af7..19f76dddbab22 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -16,4 +16,4 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] backtrace = "0.3.64" lazy_static = "1.4.0" -regex = "1.5.5" +regex = "1.6.0" From 409e6f9044f320d70f68fde11274b76ab0228c33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 7 Nov 2022 14:01:23 +0100 Subject: [PATCH 314/348] Make `--db` case insensitive again (#12630) This was broken in the switch to Clap v4. --- client/cli/src/params/database_params.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index 3c9ae09e9ab42..fdd3622580a6d 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -23,7 +23,7 @@ use clap::Args; #[derive(Debug, Clone, PartialEq, Args)] pub struct DatabaseParams { /// Select database backend to use. - #[arg(long, alias = "db", value_name = "DB", value_enum)] + #[arg(long, alias = "db", value_name = "DB", ignore_case = true, value_enum)] pub database: Option, /// Limit the memory the database cache can use. From 247ff88c4ff1f7242c66487f00ad5117ae3e1ee7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 7 Nov 2022 21:12:26 +0100 Subject: [PATCH 315/348] txpool: enactment state forced update (#12632) * txpool: enactment state forced update When `tree_route` computation fails, we still need to update the `enactment_state` to be aligned with last known finalized/best block. We do not execute enactment phase of maintain procedure, but we do update the state. * error -> debug * test added --- .../transaction-pool/src/enactment_state.rs | 28 +++++++++++++++++++ client/transaction-pool/src/lib.rs | 4 +-- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs index 242b557dfbbbd..b347de824fa12 100644 --- a/client/transaction-pool/src/enactment_state.rs +++ b/client/transaction-pool/src/enactment_state.rs @@ -134,6 +134,16 @@ where Ok(Some(tree_route)) } + + /// Forces update of the state according to the given `ChainEvent`. Intended to be used as a + /// fallback when tree_route cannot be computed. + pub fn force_update(&mut self, event: &ChainEvent) { + match event { + ChainEvent::NewBestBlock { hash, .. } => self.recent_best_block = *hash, + ChainEvent::Finalized { hash, .. } => self.recent_finalized_block = *hash, + }; + log::debug!(target: "txpool", "forced update: {:?}, {:?}", self.recent_best_block, self.recent_finalized_block); + } } #[cfg(test)] @@ -576,4 +586,22 @@ mod enactment_state_tests { assert_eq!(result, false); assert_es_eq(&es, e1(), d1()); } + + #[test] + fn test_enactment_forced_update_best_block() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + es.force_update(&ChainEvent::NewBestBlock { hash: b1().hash, tree_route: None }); + assert_es_eq(&es, b1(), a()); + } + + #[test] + fn test_enactment_forced_update_finalize() { + sp_tracing::try_init_simple(); + let mut es = EnactmentState::new(a().hash, a().hash); + + es.force_update(&ChainEvent::Finalized { hash: b1().hash, tree_route: Arc::from([]) }); + assert_es_eq(&es, a(), b1()); + } } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 9e0a2e74bf421..e66c780a5ed8f 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -748,8 +748,8 @@ where match result { Err(msg) => { - log::warn!(target: "txpool", "{msg}"); - return + log::debug!(target: "txpool", "{msg}"); + self.enactment_state.lock().force_update(&event); }, Ok(None) => {}, Ok(Some(tree_route)) => { From ee9c3859c6a1644419f5472aaec432bd7f9d66a0 Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Mon, 7 Nov 2022 16:40:55 -0500 Subject: [PATCH 316/348] Add pallet dev mode (#12536) * stub for construct_dev_runtime! * revert * stub for dev_mode proc macro * preliminary docs for pallet::dev_mode (attribute) proc macro * add dev_mode to pallet_macros module * add docs item for dev_mode to frame_support * parsing of #[pallet(dev_mode)] * strip out dev_mode stub since it will be an arg for pallet instead * make pallet Def struct aware of dev mode * WIP * revert changes to call.rs * pass dev_mode to pallet parsing code * auto-specify default weights when in dev mode if not specified * add proof / expect for syn::parse in dev mode weight processing * set all storages to unbounded when in dev mode * just use 0 Co-authored-by: Shawn Tabrizi * add invalid pallet arg test * add passing dev mode pallet test * add test confirming that dev mode features only work in dev mode * cargo fmt + clean up * bump CI * fix pallet ui test * add docs for dev mode * add warning about using dev mode in production circumstances * remove comment about no other attributes being supported * fix unneeded assignment * make warning more explicit * more explicit warning about using dev mode in production * simpler assignment for dev_mode boolean Co-authored-by: Oliver Tale-Yazdi * add note about MEL requirement Co-authored-by: Oliver Tale-Yazdi * add comment specifying why weights can be omitted in example Co-authored-by: Oliver Tale-Yazdi * tweak wording of comments * bump ci Co-authored-by: Shawn Tabrizi Co-authored-by: Oliver Tale-Yazdi --- frame/support/procedural/src/lib.rs | 28 +++++++++++++++ frame/support/procedural/src/pallet/mod.rs | 22 ++++++++---- .../procedural/src/pallet/parse/call.rs | 9 +++++ .../procedural/src/pallet/parse/mod.rs | 8 +++-- .../procedural/src/pallet/parse/storage.rs | 5 ++- frame/support/src/lib.rs | 30 ++++++++++++++++ .../tests/pallet_ui/attr_non_empty.stderr | 4 +-- .../tests/pallet_ui/dev_mode_without_arg.rs | 33 +++++++++++++++++ .../pallet_ui/dev_mode_without_arg.stderr | 11 ++++++ .../dev_mode_without_arg_max_encoded_len.rs | 34 ++++++++++++++++++ ...ev_mode_without_arg_max_encoded_len.stderr | 17 +++++++++ .../tests/pallet_ui/pallet_invalid_arg.rs | 4 +++ .../tests/pallet_ui/pallet_invalid_arg.stderr | 5 +++ .../tests/pallet_ui/pass/dev_mode_valid.rs | 35 +++++++++++++++++++ 14 files changed, 233 insertions(+), 12 deletions(-) create mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs create mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr create mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs create mode 100644 frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr create mode 100644 frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs create mode 100644 frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr create mode 100644 frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index ccff5488c93be..cd30946ae7855 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -445,6 +445,34 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { /// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the /// `PartialStorageInfoTrait` implementation of storages. /// +/// ## Dev Mode (`#[pallet(dev_mode)]`) +/// +/// Specifying the argument `dev_mode` will allow you to enable dev mode for a pallet. The aim +/// of dev mode is to loosen some of the restrictions and requirements placed on production +/// pallets for easy tinkering and development. Dev mode pallets should not be used in +/// production. Enabling dev mode has the following effects: +/// +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By default, dev +/// mode pallets will assume a weight of zero (`0`) if a weight is not specified. This is +/// equivalent to specifying `#[weight(0)]` on all calls that do not specify a weight. +/// * All storages are marked as unbounded, meaning you do not need to implement `MaxEncodedLen` on +/// storage types. This is equivalent to specifying `#[pallet::unbounded]` on all storage type +/// definitions. +/// +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument +/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` +/// attribute macro. +/// +///

+/// WARNING:
+/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
+/// and therefore should never be done. Once you are done tinkering, you should remove the
+/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
+/// attempting to use your pallet in a production scenario.
+/// 
+/// /// See `frame_support::pallet` docs for more info. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { diff --git a/frame/support/procedural/src/pallet/mod.rs b/frame/support/procedural/src/pallet/mod.rs index ff9f122867746..3f85be81c1f7d 100644 --- a/frame/support/procedural/src/pallet/mod.rs +++ b/frame/support/procedural/src/pallet/mod.rs @@ -31,20 +31,30 @@ mod parse; pub use parse::Def; use syn::spanned::Spanned; +mod keyword { + syn::custom_keyword!(dev_mode); +} + pub fn pallet( attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { + let mut dev_mode = false; if !attr.is_empty() { - let msg = - "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ - `#[frame_support::pallet]` or `#[pallet]`"; - let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into() + if let Ok(_) = syn::parse::(attr.clone()) { + dev_mode = true; + } else { + let msg = "Invalid pallet macro call: unexpected attribute. Macro call must be \ + bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the \ + `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or \ + #[pallet(dev_mode)]."; + let span = proc_macro2::TokenStream::from(attr).span(); + return syn::Error::new(span, msg).to_compile_error().into() + } } let item = syn::parse_macro_input!(item as syn::ItemMod); - match parse::Def::try_from(item) { + match parse::Def::try_from(item, dev_mode) { Ok(def) => expand::expand(def).into(), Err(e) => e.to_compile_error().into(), } diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index f7b2c9544d831..fbca9a52c767c 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -144,6 +144,7 @@ impl CallDef { attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, + dev_mode: bool, ) -> syn::Result { let item_impl = if let syn::Item::Impl(item) = item { item @@ -213,6 +214,14 @@ impl CallDef { }, ); + if weight_attrs.is_empty() && dev_mode { + // inject a default O(1) weight when dev mode is enabled and no weight has + // been specified on the call + let empty_weight: syn::Expr = syn::parse(quote::quote!(0).into()) + .expect("we are parsing a quoted string; qed"); + weight_attrs.push(FunctionAttr::Weight(empty_weight)); + } + if weight_attrs.len() != 1 { let msg = if weight_attrs.is_empty() { "Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]`" diff --git a/frame/support/procedural/src/pallet/parse/mod.rs b/frame/support/procedural/src/pallet/parse/mod.rs index 8b4ab154b306a..f91159248281c 100644 --- a/frame/support/procedural/src/pallet/parse/mod.rs +++ b/frame/support/procedural/src/pallet/parse/mod.rs @@ -59,10 +59,11 @@ pub struct Def { pub type_values: Vec, pub frame_system: syn::Ident, pub frame_support: syn::Ident, + pub dev_mode: bool, } impl Def { - pub fn try_from(mut item: syn::ItemMod) -> syn::Result { + pub fn try_from(mut item: syn::ItemMod, dev_mode: bool) -> syn::Result { let frame_system = generate_crate_access_2018("frame-system")?; let frame_support = generate_crate_access_2018("frame-support")?; @@ -106,7 +107,7 @@ impl Def { hooks = Some(m); }, Some(PalletAttr::RuntimeCall(span)) if call.is_none() => - call = Some(call::CallDef::try_from(span, index, item)?), + call = Some(call::CallDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::Error(span)) if error.is_none() => error = Some(error::ErrorDef::try_from(span, index, item)?), Some(PalletAttr::RuntimeEvent(span)) if event.is_none() => @@ -124,7 +125,7 @@ impl Def { Some(PalletAttr::Inherent(_)) if inherent.is_none() => inherent = Some(inherent::InherentDef::try_from(index, item)?), Some(PalletAttr::Storage(span)) => - storages.push(storage::StorageDef::try_from(span, index, item)?), + storages.push(storage::StorageDef::try_from(span, index, item, dev_mode)?), Some(PalletAttr::ValidateUnsigned(_)) if validate_unsigned.is_none() => { let v = validate_unsigned::ValidateUnsignedDef::try_from(index, item)?; validate_unsigned = Some(v); @@ -173,6 +174,7 @@ impl Def { type_values, frame_system, frame_support, + dev_mode, }; def.check_instance_usage()?; diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index b16ff05803d98..8b551ab31d6c3 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -678,6 +678,7 @@ impl StorageDef { attr_span: proc_macro2::Span, index: usize, item: &mut syn::Item, + dev_mode: bool, ) -> syn::Result { let item = if let syn::Item::Type(item) = item { item @@ -686,9 +687,11 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let PalletStorageAttrInfo { getter, rename_as, unbounded, whitelisted } = + let PalletStorageAttrInfo { getter, rename_as, mut unbounded, whitelisted } = PalletStorageAttrInfo::from_attrs(attrs)?; + // set all storages to be unbounded if dev_mode is enabled + unbounded |= dev_mode; let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); let instances = vec![helper::check_type_def_gen(&item.generics, item.ident.span())?]; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 9b0ee84c34d8a..84e416e50544d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -1487,6 +1487,36 @@ pub mod pallet_prelude { /// non-instantiable pallets. For an example of an instantiable pallet, see [this /// example](#example-of-an-instantiable-pallet). /// +/// # Dev Mode (`#[pallet(dev_mode)]`) +/// +/// Specifying the argument `dev_mode` on the `#[pallet]` or `#[frame_support::pallet]` +/// attribute attached to your pallet module will allow you to enable dev mode for a pallet. +/// The aim of dev mode is to loosen some of the restrictions and requirements placed on +/// production pallets for easy tinkering and development. Dev mode pallets should not be used +/// in production. Enabling dev mode has the following effects: +/// +/// * Weights no longer need to be specified on every `#[pallet::call]` declaration. By +/// default, dev mode pallets will assume a weight of zero (`0`) if a weight is not +/// specified. This is equivalent to specifying `#[weight(0)]` on all calls that do not +/// specify a weight. +/// * All storages are marked as unbounded, meaning you do not need to implement +/// `MaxEncodedLen` on storage types. This is equivalent to specifying `#[pallet::unbounded]` +/// on all storage type definitions. +/// +/// Note that the `dev_mode` argument can only be supplied to the `#[pallet]` or +/// `#[frame_support::pallet]` attribute macro that encloses your pallet module. This argument +/// cannot be specified anywhere else, including but not limited to the `#[pallet::pallet]` +/// attribute macro. +/// +///
+/// WARNING:
+/// You should not deploy or use dev mode pallets in production. Doing so can break your chain
+/// and therefore should never be done. Once you are done tinkering, you should remove the
+/// 'dev_mode' argument from your #[pallet] declaration and fix any compile errors before
+/// attempting to use your pallet in a production scenario.
+/// 
+/// /// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) /// /// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify diff --git a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr index 144af5a17ea5c..9eac5de35db80 100644 --- a/frame/support/test/tests/pallet_ui/attr_non_empty.stderr +++ b/frame/support/test/tests/pallet_ui/attr_non_empty.stderr @@ -1,5 +1,5 @@ -error: Invalid pallet macro call: expected no attributes, e.g. macro call must be just `#[frame_support::pallet]` or `#[pallet]` - --> $DIR/attr_non_empty.rs:1:26 +error: Invalid pallet macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or #[pallet(dev_mode)]. + --> tests/pallet_ui/attr_non_empty.rs:1:26 | 1 | #[frame_support::pallet [foo]] | ^^^ diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs new file mode 100644 index 0000000000000..f044ae6d7878f --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.rs @@ -0,0 +1,33 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(_); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::storage] + type MyStorage = StorageValue<_, Vec>; + + // Your Pallet's callable functions. + #[pallet::call] + impl Pallet { + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } + + // Your Pallet's internal functions. + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr new file mode 100644 index 0000000000000..fac7fd77df9ae --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg.stderr @@ -0,0 +1,11 @@ +error: Invalid pallet::call, requires weight attribute i.e. `#[pallet::weight($expr)]` + --> tests/pallet_ui/dev_mode_without_arg.rs:24:7 + | +24 | pub fn my_call(_origin: OriginFor) -> DispatchResult { + | ^^ + +error[E0432]: unresolved import `pallet` + --> tests/pallet_ui/dev_mode_without_arg.rs:3:9 + | +3 | pub use pallet::*; + | ^^^^^^ help: a similar path exists: `test_pallet::pallet` diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs new file mode 100644 index 0000000000000..f6efcc3fc3d72 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs @@ -0,0 +1,34 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(_); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::storage] + type MyStorage = StorageValue<_, Vec>; + + // Your Pallet's callable functions. + #[pallet::call] + impl Pallet { + #[pallet::weight(0)] + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } + + // Your Pallet's internal functions. + impl Pallet {} +} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr new file mode 100644 index 0000000000000..2f8d0cc761ab2 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -0,0 +1,17 @@ +error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied + --> tests/pallet_ui/dev_mode_without_arg_max_encoded_len.rs:11:12 + | +11 | #[pallet::pallet] + | ^^^^^^ the trait `MaxEncodedLen` is not implemented for `Vec` + | + = help: the following other types implement trait `MaxEncodedLen`: + () + (TupleElement0, TupleElement1) + (TupleElement0, TupleElement1, TupleElement2) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) + (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) + and 78 others + = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` diff --git a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs new file mode 100644 index 0000000000000..1fc42f6511cfa --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.rs @@ -0,0 +1,4 @@ +#[frame_support::pallet(foo)] +pub mod pallet {} + +fn main() {} diff --git a/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr new file mode 100644 index 0000000000000..234dc07f2ece3 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pallet_invalid_arg.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet macro call: unexpected attribute. Macro call must be bare, such as `#[frame_support::pallet]` or `#[pallet]`, or must specify the `dev_mode` attribute, such as `#[frame_support::pallet(dev_mode)]` or #[pallet(dev_mode)]. + --> tests/pallet_ui/pallet_invalid_arg.rs:1:25 + | +1 | #[frame_support::pallet(foo)] + | ^^^ diff --git a/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs new file mode 100644 index 0000000000000..97e0d585d0ce9 --- /dev/null +++ b/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -0,0 +1,35 @@ +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + // The struct on which we build all of our Pallet logic. + #[pallet::pallet] + pub struct Pallet(_); + + // Your Pallet's configuration trait, representing custom external types and interfaces. + #[pallet::config] + pub trait Config: frame_system::Config {} + + // The MEL requirement for bounded pallets is skipped by `dev_mode`. + #[pallet::storage] + type MyStorage = StorageValue<_, Vec>; + + // Your Pallet's callable functions. + #[pallet::call] + impl Pallet { + // No need to define a `weight` attribute here because of `dev_mode`. + pub fn my_call(_origin: OriginFor) -> DispatchResult { + Ok(()) + } + } + + // Your Pallet's internal functions. + impl Pallet {} +} + +fn main() {} From 3a450ffe936d5c2e5dc1208abecd11cc71aefcbe Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 7 Nov 2022 22:42:16 +0100 Subject: [PATCH 317/348] BlockId removal: &Hash to Hash (#12626) It changes &Block::Hash argument to Block::Hash. This PR is part of BlockId::Number refactoring analysis (paritytech/substrate#11292) --- bin/node/bench/src/import.rs | 2 +- bin/node/inspect/src/lib.rs | 4 +- client/api/src/backend.rs | 38 +-- client/api/src/client.rs | 11 +- client/api/src/in_mem.rs | 52 +-- client/api/src/proof_provider.rs | 10 +- .../basic-authorship/src/basic_authorship.rs | 2 +- .../incoming_requests_handler.rs | 2 +- client/beefy/src/tests.rs | 26 +- client/beefy/src/worker.rs | 16 +- client/block-builder/src/lib.rs | 2 +- client/cli/src/commands/export_state_cmd.rs | 2 +- client/consensus/babe/src/tests.rs | 10 +- .../manual-seal/src/finalize_block.rs | 2 +- client/db/benches/state_access.rs | 12 +- client/db/src/lib.rs | 300 +++++++++--------- client/finality-grandpa/src/environment.rs | 2 +- client/finality-grandpa/src/finality_proof.rs | 6 +- client/finality-grandpa/src/import.rs | 6 +- client/finality-grandpa/src/tests.rs | 14 +- client/finality-grandpa/src/warp_proof.rs | 4 +- client/network/bitswap/src/lib.rs | 2 +- .../src/light_client_requests/handler.rs | 6 +- .../network/sync/src/block_request_handler.rs | 6 +- client/network/sync/src/lib.rs | 4 +- .../network/sync/src/state_request_handler.rs | 4 +- client/network/test/src/block_import.rs | 2 +- client/network/test/src/lib.rs | 14 +- client/network/test/src/sync.rs | 81 +++-- client/rpc/src/chain/tests.rs | 4 +- client/rpc/src/state/state_full.rs | 32 +- .../service/src/chain_ops/export_raw_state.rs | 2 +- client/service/src/client/call_executor.rs | 8 +- client/service/src/client/client.rs | 79 +++-- client/service/test/src/client/mod.rs | 42 +-- client/tracing/src/block/mod.rs | 2 +- client/transaction-pool/benches/basics.rs | 2 +- client/transaction-pool/src/api.rs | 2 +- client/transaction-pool/src/graph/pool.rs | 2 +- client/transaction-pool/src/lib.rs | 12 +- client/transaction-pool/src/tests.rs | 2 +- primitives/blockchain/src/backend.rs | 10 +- test-utils/client/src/client_ext.rs | 4 +- .../runtime/transaction-pool/src/lib.rs | 4 +- .../frame/benchmarking-cli/src/block/bench.rs | 2 +- .../frame/benchmarking-cli/src/storage/cmd.rs | 4 +- .../benchmarking-cli/src/storage/read.rs | 8 +- .../benchmarking-cli/src/storage/write.rs | 4 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 2 +- 49 files changed, 428 insertions(+), 441 deletions(-) diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 26f9391800ceb..28a322834271c 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -135,7 +135,7 @@ impl core::Benchmark for ImportBenchmark { // Sanity checks. context .client - .state_at(&hash) + .state_at(hash) .expect("state_at failed for block#1") .inspect_state(|| { match self.block_type { diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 1e44cec914995..528dce14f46a5 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -144,7 +144,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&hash)? + .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; @@ -155,7 +155,7 @@ impl> Inspector let not_found = format!("Could not find block {:?}", id); let body = self .chain - .block_body(&hash)? + .block_body(hash)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; let header = self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index df9c92b626ecc..79cc0d7a16bcc 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -215,13 +215,13 @@ pub trait BlockImportOperation { /// Mark a block as finalized. fn mark_finalized( &mut self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; /// Mark a block as new head. If both block import and set head are specified, set head /// overrides block import's best block rule. - fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()>; + fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()>; /// Add a transaction index operation. fn update_transaction_index(&mut self, index: Vec) @@ -251,7 +251,7 @@ pub trait Finalizer> { fn apply_finality( &self, operation: &mut ClientImportOperation, - block: &Block::Hash, + block: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -271,7 +271,7 @@ pub trait Finalizer> { /// while performing major synchronization work. fn finalize_block( &self, - block: &Block::Hash, + block: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()>; @@ -359,21 +359,21 @@ pub trait StorageProvider> { /// Given a block's `Hash` and a key, return the value under the key in that block. fn storage( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; /// Given a block's `Hash` and a key prefix, return the matching storage keys in that block. fn storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; /// Given a block's `Hash` and a key, return the value under the hash in that block. fn storage_hash( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -381,7 +381,7 @@ pub trait StorageProvider> { /// in that block. fn storage_pairs( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; @@ -389,7 +389,7 @@ pub trait StorageProvider> { /// keys in that block. fn storage_keys_iter<'a>( &self, - hash: &Block::Hash, + hash: Block::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; @@ -398,7 +398,7 @@ pub trait StorageProvider> { /// that block. fn child_storage( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -407,7 +407,7 @@ pub trait StorageProvider> { /// storage keys. fn child_storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result>; @@ -416,7 +416,7 @@ pub trait StorageProvider> { /// return a `KeyIterator` that iterates matching storage keys in that block. fn child_storage_keys_iter<'a>( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -426,7 +426,7 @@ pub trait StorageProvider> { /// block. fn child_storage_hash( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; @@ -466,7 +466,7 @@ pub trait Backend: AuxStore + Send + Sync { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> sp_blockchain::Result<()>; /// Commit block insertion. @@ -480,7 +480,7 @@ pub trait Backend: AuxStore + Send + Sync { /// This should only be called if the parent of the given block has been finalized. fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; @@ -489,7 +489,7 @@ pub trait Backend: AuxStore + Send + Sync { /// This should only be called for blocks that are already finalized. fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()>; @@ -503,12 +503,12 @@ pub trait Backend: AuxStore + Send + Sync { fn offchain_storage(&self) -> Option; /// Returns true if state for given block is available. - fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { + fn have_state_at(&self, hash: Block::Hash, _number: NumberFor) -> bool { self.state_at(hash).is_ok() } /// Returns state backend with post-state of given block. - fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result; + fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result; /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set it will attempt to /// revert past any finalized block, this is unsafe and can potentially leave the node in an @@ -524,7 +524,7 @@ pub trait Backend: AuxStore + Send + Sync { ) -> sp_blockchain::Result<(NumberFor, HashSet)>; /// Discard non-best, unfinalized leaf block. - fn remove_leaf_block(&self, hash: &Block::Hash) -> sp_blockchain::Result<()>; + fn remove_leaf_block(&self, hash: Block::Hash) -> sp_blockchain::Result<()>; /// Insert auxiliary data into key-value store. fn insert_aux< diff --git a/client/api/src/client.rs b/client/api/src/client.rs index a07198abfe8d6..bb88853d23afb 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -110,7 +110,7 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>>; /// Get all indexed transactions for a block, @@ -118,8 +118,7 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn block_indexed_body(&self, hash: &Block::Hash) - -> sp_blockchain::Result>>>; + fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>>; /// Get full block by id. fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; @@ -129,7 +128,7 @@ pub trait BlockBackend { -> sp_blockchain::Result; /// Get block justifications for the block with the given id. - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result>; + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result>; /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; @@ -138,10 +137,10 @@ pub trait BlockBackend { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>>; + fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>>; /// Check if transaction index exists. - fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { Ok(self.indexed_transaction(hash)?.is_some()) } diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 26364f28acca2..5a3e25ab5987b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -271,16 +271,16 @@ impl Blockchain { fn finalize_header( &self, - block: &Block::Hash, + block: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); - storage.finalized_hash = *block; + storage.finalized_hash = block; if justification.is_some() { let block = storage .blocks - .get_mut(block) + .get_mut(&block) .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { @@ -295,7 +295,7 @@ impl Blockchain { fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); @@ -405,17 +405,17 @@ impl HeaderMetadata for Blockchain { impl blockchain::Backend for Blockchain { fn body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { Ok(self .storage .read() .blocks - .get(hash) + .get(&hash) .and_then(|b| b.extrinsics().map(|x| x.to_vec()))) } - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { Ok(self.storage.read().blocks.get(&hash).and_then(|b| b.justifications().cloned())) } @@ -445,13 +445,13 @@ impl blockchain::Backend for Blockchain { unimplemented!() } - fn indexed_transaction(&self, _hash: &Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, _hash: Block::Hash) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } fn block_indexed_body( &self, - _hash: &Block::Hash, + _hash: Block::Hash, ) -> sp_blockchain::Result>>> { unimplemented!("Not supported by the in-mem backend.") } @@ -596,16 +596,16 @@ where fn mark_finalized( &mut self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { - self.finalized_blocks.push((*hash, justification)); + self.finalized_blocks.push((hash, justification)); Ok(()) } - fn mark_head(&mut self, hash: &Block::Hash) -> sp_blockchain::Result<()> { + fn mark_head(&mut self, hash: Block::Hash) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(*hash); + self.set_head = Some(hash); Ok(()) } @@ -677,7 +677,7 @@ where type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(&Default::default())?; + let old_state = self.state_at(Default::default())?; Ok(BlockImportOperation { pending_block: None, old_state, @@ -691,7 +691,7 @@ where fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> sp_blockchain::Result<()> { operation.old_state = self.state_at(block)?; Ok(()) @@ -700,7 +700,7 @@ where fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { - self.blockchain.finalize_header(&block, justification)?; + self.blockchain.finalize_header(block, justification)?; } } @@ -733,7 +733,7 @@ where fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { self.blockchain.finalize_header(hash, justification) @@ -741,7 +741,7 @@ where fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> sp_blockchain::Result<()> { self.blockchain.append_justification(hash, justification) @@ -759,14 +759,14 @@ where None } - fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { - if *hash == Default::default() { + fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { + if hash == Default::default() { return Ok(Self::State::default()) } self.states .read() - .get(hash) + .get(&hash) .cloned() .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", hash))) } @@ -779,7 +779,7 @@ where Ok((Zero::zero(), HashSet::new())) } - fn remove_leaf_block(&self, _hash: &Block::Hash) -> sp_blockchain::Result<()> { + fn remove_leaf_block(&self, _hash: Block::Hash) -> sp_blockchain::Result<()> { Ok(()) } @@ -862,13 +862,13 @@ mod tests { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - blockchain.append_justification(&last_finalized, (ID2, vec![4])).unwrap(); + blockchain.append_justification(last_finalized, (ID2, vec![4])).unwrap(); let justifications = { let mut just = Justifications::from((ID1, vec![3])); just.append((ID2, vec![4])); just }; - assert_eq!(blockchain.justifications(&last_finalized).unwrap(), Some(justifications)); + assert_eq!(blockchain.justifications(last_finalized).unwrap(), Some(justifications)); } #[test] @@ -876,9 +876,9 @@ mod tests { let blockchain = test_blockchain(); let last_finalized = blockchain.last_finalized().unwrap(); - blockchain.append_justification(&last_finalized, (ID2, vec![0])).unwrap(); + blockchain.append_justification(last_finalized, (ID2, vec![0])).unwrap(); assert!(matches!( - blockchain.append_justification(&last_finalized, (ID2, vec![1])), + blockchain.append_justification(last_finalized, (ID2, vec![1])), Err(sp_blockchain::Error::BadJustification(_)), )); } diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 4ddbf883b83f2..01e35df1dec1c 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -27,7 +27,7 @@ pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -35,7 +35,7 @@ pub trait ProofProvider { /// read proof. fn read_child_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result; @@ -46,7 +46,7 @@ pub trait ProofProvider { /// No changes are made. fn execution_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; @@ -61,7 +61,7 @@ pub trait ProofProvider { /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_keys: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)>; @@ -76,7 +76,7 @@ pub trait ProofProvider { /// end. fn storage_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result>; diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index da98ccab9cb07..b69294bf6ccb0 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -736,7 +736,7 @@ mod tests { let api = client.runtime_api(); api.execute_block(&block_id, proposal.block).unwrap(); - let state = backend.state_at(&genesis_hash).unwrap(); + let state = backend.state_at(genesis_hash).unwrap(); let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs index 3affbbe870ad7..9f02b7162b54c 100644 --- a/client/beefy/src/communication/request_response/incoming_requests_handler.rs +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -153,7 +153,7 @@ where self.client.block_hash(request.payload.begin).map_err(Error::Client)? { self.client - .justifications(&hash) + .justifications(hash) .map_err(Error::Client)? .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) // No BEEFY justification present. diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 62d8a45f4471c..2f0306209071e 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -512,7 +512,7 @@ fn finalize_block_and_wait_for_beefy( peers.clone().for_each(|(index, _)| { let client = net.lock().peer(index).client().as_client(); let finalize = client.expect_block_hash_from_id(&BlockId::number(*block)).unwrap(); - client.finalize_block(&finalize, None).unwrap(); + client.finalize_block(finalize, None).unwrap(); }) } @@ -608,7 +608,7 @@ fn lagging_validators() { .expect_block_hash_from_id(&BlockId::number(25)) .unwrap(); let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -616,7 +616,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #25 let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); - net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); @@ -637,7 +637,7 @@ fn lagging_validators() { .as_client() .expect_block_hash_from_id(&BlockId::number(60)) .unwrap(); - net.lock().peer(0).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); streams_empty_after_timeout(best_blocks, &net, &mut runtime, timeout); @@ -645,7 +645,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); - net.lock().peer(1).client().as_client().finalize_block(&finalize, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); @@ -696,9 +696,9 @@ fn correct_beefy_payload() { .as_client() .expect_block_hash_from_id(&BlockId::number(11)) .unwrap(); - net.lock().peer(0).client().as_client().finalize_block(&hashof11, None).unwrap(); - net.lock().peer(1).client().as_client().finalize_block(&hashof11, None).unwrap(); - net.lock().peer(3).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(0).client().as_client().finalize_block(hashof11, None).unwrap(); + net.lock().peer(1).client().as_client().finalize_block(hashof11, None).unwrap(); + net.lock().peer(3).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is _not_ reached let timeout = Some(Duration::from_millis(250)); @@ -708,7 +708,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); - net.lock().peer(2).client().as_client().finalize_block(&hashof11, None).unwrap(); + net.lock().peer(2).client().as_client().finalize_block(hashof11, None).unwrap(); // verify consensus is reached wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[11]); @@ -760,7 +760,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&hashof1) + .justifications(hashof1) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -799,7 +799,7 @@ fn beefy_importing_blocks() { // still not in backend (worker is responsible for appending to backend), assert_eq!( full_client - .justifications(&hashof2) + .justifications(hashof2) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -843,7 +843,7 @@ fn beefy_importing_blocks() { // none in backend, assert_eq!( full_client - .justifications(&hashof3) + .justifications(hashof3) .unwrap() .and_then(|j| j.get(BEEFY_ENGINE_ID).cloned()), None @@ -941,7 +941,7 @@ fn on_demand_beefy_justification_sync() { get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); let client = net.lock().peer(dave_index).client().as_client(); let hashof1 = client.expect_block_hash_from_id(&BlockId::number(1)).unwrap(); - client.finalize_block(&hashof1, None).unwrap(); + client.finalize_block(hashof1, None).unwrap(); // Give Dave task some cpu cycles to process the finality notification, run_for(Duration::from_millis(100), &net, &mut runtime); // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index ea9e0d0c33999..9c14128624518 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -511,7 +511,7 @@ where .expect("forwards closure result; the closure always returns Ok; qed."); self.backend - .append_justification(&hash, (BEEFY_ENGINE_ID, finality_proof.encode())) + .append_justification(hash, (BEEFY_ENGINE_ID, finality_proof.encode())) }) { error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } @@ -709,7 +709,7 @@ where // a BEEFY justification, or at this session's boundary; voter will resume from there. loop { if let Some(true) = blockchain - .justifications(&header.hash()) + .justifications(header.hash()) .ok() .flatten() .map(|justifs| justifs.get(BEEFY_ENGINE_ID).is_some()) @@ -1375,8 +1375,8 @@ pub(crate) mod tests { // finalize 1 and 2 without justifications let hashof1 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(1)).unwrap(); let hashof2 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(2)).unwrap(); - backend.finalize_block(&hashof1, None).unwrap(); - backend.finalize_block(&hashof2, None).unwrap(); + backend.finalize_block(hashof1, None).unwrap(); + backend.finalize_block(hashof2, None).unwrap(); let justif = create_finality_proof(2); // create new session at block #2 @@ -1401,7 +1401,7 @@ pub(crate) mod tests { })); // check BEEFY justifications are also appended to backend - let justifs = backend.blockchain().justifications(&hashof2).unwrap().unwrap(); + let justifs = backend.blockchain().justifications(hashof2).unwrap().unwrap(); assert!(justifs.get(BEEFY_ENGINE_ID).is_some()) } @@ -1512,7 +1512,7 @@ pub(crate) mod tests { // finalize 13 without justifications let hashof13 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(13)).unwrap(); - net.peer(0).client().as_client().finalize_block(&hashof13, None).unwrap(); + net.peer(0).client().as_client().finalize_block(hashof13, None).unwrap(); // Test initialization at session boundary. { @@ -1551,7 +1551,7 @@ pub(crate) mod tests { let hashof10 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(10)).unwrap(); backend - .append_justification(&hashof10, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashof10, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 10 @@ -1587,7 +1587,7 @@ pub(crate) mod tests { let hashof12 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(12)).unwrap(); backend - .append_justification(&hashof12, (BEEFY_ENGINE_ID, justif.encode())) + .append_justification(hashof12, (BEEFY_ENGINE_ID, justif.encode())) .unwrap(); // initialize voter at block 13, expect rounds initialized at last beefy finalized 12 diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index cd5e62e264200..b6c2ac3ba5d68 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -258,7 +258,7 @@ where let proof = self.api.extract_proof(); - let state = self.backend.state_at(&self.parent_hash)?; + let state = self.backend.state_at(self.parent_hash)?; let storage_changes = self .api diff --git a/client/cli/src/commands/export_state_cmd.rs b/client/cli/src/commands/export_state_cmd.rs index 1bcf21f388a62..04bce0c1d707a 100644 --- a/client/cli/src/commands/export_state_cmd.rs +++ b/client/cli/src/commands/export_state_cmd.rs @@ -69,7 +69,7 @@ impl ExportStateCmd { Some(id) => client.expect_block_hash_from_id(&id)?, None => client.usage_info().chain.best_hash, }; - let raw_state = sc_service::chain_ops::export_raw_state(client, &hash)?; + let raw_state = sc_service::chain_ops::export_raw_state(client, hash)?; input_spec.set_storage(raw_state); info!("Generating new chain spec..."); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index abffcb0a2b4c3..ee1605f037ff4 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -822,7 +822,7 @@ fn revert_not_allowed_for_finalized() { let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 3); // Finalize best block - client.finalize_block(&canon[2], None, false).unwrap(); + client.finalize_block(canon[2], None, false).unwrap(); // Revert canon chain to last finalized block revert(client.clone(), backend, 100).expect("revert should work for baked test scenario"); @@ -884,7 +884,7 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). - client.finalize_block(&canon_hashes[12], None, false).unwrap(); + client.finalize_block(canon_hashes[12], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); // at this point no hashes from the first fork must exist on the tree @@ -911,7 +911,7 @@ fn importing_epoch_change_block_prunes_tree() { .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(&canon_hashes[24], None, false).unwrap(); + client.finalize_block(canon_hashes[24], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); // at this point no hashes from the second fork must exist on the tree @@ -1049,7 +1049,7 @@ fn obsolete_blocks_aux_data_cleanup() { assert!(aux_data_check(&fork3_hashes, true)); // Finalize A3 - client.finalize_block(&fork1_hashes[2], None, true).unwrap(); + client.finalize_block(fork1_hashes[2], None, true).unwrap(); // Wiped: A1, A2 assert!(aux_data_check(&fork1_hashes[..2], false)); @@ -1060,7 +1060,7 @@ fn obsolete_blocks_aux_data_cleanup() { // Present C4, C5 assert!(aux_data_check(&fork3_hashes, true)); - client.finalize_block(&fork1_hashes[3], None, true).unwrap(); + client.finalize_block(fork1_hashes[3], None, true).unwrap(); // Wiped: A3 assert!(aux_data_check(&fork1_hashes[2..3], false)); diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index e11353e2da611..cee4d59b6d6e5 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -46,7 +46,7 @@ where { let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; - match finalizer.finalize_block(&hash, justification, true) { + match finalizer.finalize_block(hash, justification, true) { Err(e) => { log::warn!("Failed to finalize block {}", e); rpc::send_result(&mut sender, Err(e.into())) diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index ccceae1f5b419..bab79fe7c90db 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -66,7 +66,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 for i in 0..10 { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &parent_hash).unwrap(); + db.begin_state_operation(&mut op, parent_hash).unwrap(); let mut header = Header { number, @@ -83,7 +83,7 @@ fn insert_blocks(db: &Backend, storage: Vec<(Vec, Vec)>) -> H256 .map(|(k, v)| (k.clone(), Some(v.clone()))) .collect::>(); - let (state_root, tx) = db.state_at(&parent_hash).unwrap().storage_root( + let (state_root, tx) = db.state_at(parent_hash).unwrap().storage_root( changes.iter().map(|(k, v)| (k.as_slice(), v.as_deref())), StateVersion::V1, ); @@ -175,7 +175,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().cycle().take(keys.len() * multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -213,7 +213,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage(&key).expect("Doesn't fail").unwrap(); @@ -251,7 +251,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { for key in keys.iter().take(1).cycle().take(multiplier) { let _ = state.storage_hash(&key).expect("Doesn't fail").unwrap(); @@ -289,7 +289,7 @@ fn state_access_benchmarks(c: &mut Criterion) { group.bench_function(desc, |b| { b.iter_batched( - || backend.state_at(&block_hash).expect("Creates state"), + || backend.state_at(block_hash).expect("Creates state"), |state| { let _ = state .storage_hash(sp_core::storage::well_known_keys::CODE) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0138df36a38fb..fc031e2aaba59 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -577,9 +577,9 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } impl sc_client_api::blockchain::Backend for BlockchainDb { - fn body(&self, hash: &Block::Hash) -> ClientResult>> { + fn body(&self, hash: Block::Hash) -> ClientResult>> { if let Some(body) = - read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(*hash))? + read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, BlockId::Hash::(hash))? { // Plain body match Decode::decode(&mut &body[..]) { @@ -596,7 +596,7 @@ impl sc_client_api::blockchain::Backend for BlockchainDb(*hash), + BlockId::Hash::(hash), )? { match Vec::>::decode(&mut &index[..]) { Ok(index) => { @@ -642,12 +642,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult> { + fn justifications(&self, hash: Block::Hash) -> ClientResult> { match read_db( &*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATIONS, - BlockId::::Hash(*hash), + BlockId::::Hash(hash), )? { Some(justifications) => match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), @@ -686,20 +686,20 @@ impl sc_client_api::blockchain::Backend for BlockchainDb ClientResult>> { + fn indexed_transaction(&self, hash: Block::Hash) -> ClientResult>> { Ok(self.db.get(columns::TRANSACTION, hash.as_ref())) } - fn has_indexed_transaction(&self, hash: &Block::Hash) -> ClientResult { + fn has_indexed_transaction(&self, hash: Block::Hash) -> ClientResult { Ok(self.db.contains(columns::TRANSACTION, hash.as_ref())) } - fn block_indexed_body(&self, hash: &Block::Hash) -> ClientResult>>> { + fn block_indexed_body(&self, hash: Block::Hash) -> ClientResult>>> { let body = match read_db( &*self.db, columns::KEY_LOOKUP, columns::BODY_INDEX, - BlockId::::Hash(*hash), + BlockId::::Hash(hash), )? { Some(body) => body, None => return Ok(None), @@ -914,16 +914,16 @@ impl sc_client_api::backend::BlockImportOperation fn mark_finalized( &mut self, - block: &Block::Hash, + block: Block::Hash, justification: Option, ) -> ClientResult<()> { - self.finalized_blocks.push((*block, justification)); + self.finalized_blocks.push((block, justification)); Ok(()) } - fn mark_head(&mut self, hash: &Block::Hash) -> ClientResult<()> { + fn mark_head(&mut self, hash: Block::Hash) -> ClientResult<()> { assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(*hash); + self.set_head = Some(hash); Ok(()) } @@ -1176,7 +1176,7 @@ impl Backend { info.finalized_hash != Default::default() && sc_client_api::Backend::have_state_at( &backend, - &info.finalized_hash, + info.finalized_hash, info.finalized_number, ) { backend.blockchain.update_meta(MetaUpdate { @@ -1289,7 +1289,7 @@ impl Backend { fn finalize_block_with_transaction( &self, transaction: &mut Transaction, - hash: &Block::Hash, + hash: Block::Hash, header: &Block::Header, last_finalized: Option, justification: Option, @@ -1300,7 +1300,7 @@ impl Backend { self.ensure_sequential_finalization(header, last_finalized)?; let with_state = sc_client_api::Backend::have_state_at(self, hash, number); - self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?; + self.note_finalized(transaction, header, hash, finalization_displaced, with_state)?; if let Some(justification) = justification { transaction.set_from_vec( @@ -1309,7 +1309,7 @@ impl Backend { Justifications::from(justification).encode(), ); } - Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state }) + Ok(MetaUpdate { hash, number, is_best: false, is_finalized: true, with_state }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1340,7 +1340,7 @@ impl Backend { )) })? }; - if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { + if !sc_client_api::Backend::have_state_at(self, hash, new_canonical.saturated_into()) { return Ok(()) } @@ -1372,7 +1372,7 @@ impl Backend { let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; meta_updates.push(self.finalize_block_with_transaction( &mut transaction, - &block_hash, + block_hash, &block_header, Some(last_finalized_hash), justification, @@ -1703,7 +1703,7 @@ impl Backend { } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && + if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && self.storage .state_db .best_canonical() @@ -1978,9 +1978,9 @@ impl sc_client_api::backend::Backend for Backend { fn begin_state_operation( &self, operation: &mut Self::BlockImportOperation, - block: &Block::Hash, + block: Block::Hash, ) -> ClientResult<()> { - if *block == Default::default() { + if block == Default::default() { operation.old_state = self.empty_state()?; } else { operation.old_state = self.state_at(block)?; @@ -2008,11 +2008,11 @@ impl sc_client_api::backend::Backend for Backend { fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> ClientResult<()> { let mut transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; + let header = self.blockchain.expect_header(BlockId::Hash(hash))?; let mut displaced = None; let m = self.finalize_block_with_transaction( @@ -2030,11 +2030,11 @@ impl sc_client_api::backend::Backend for Backend { fn append_justification( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Justification, ) -> ClientResult<()> { let mut transaction: Transaction = Transaction::new(); - let header = self.blockchain.expect_header(BlockId::Hash(*hash))?; + let header = self.blockchain.expect_header(BlockId::Hash(hash))?; let number = *header.number(); // Check if the block is finalized first. @@ -2043,7 +2043,7 @@ impl sc_client_api::backend::Backend for Backend { // We can do a quick check first, before doing a proper but more expensive check if number > self.blockchain.info().finalized_number || - (*hash != last_finalized && !is_descendent_of(hash, &last_finalized)?) + (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) { return Err(ClientError::NotInFinalizedChain) } @@ -2061,7 +2061,7 @@ impl sc_client_api::backend::Backend for Backend { transaction.set_from_vec( columns::JUSTIFICATIONS, - &utils::number_and_hash_to_lookup_key(number, *hash)?, + &utils::number_and_hash_to_lookup_key(number, hash)?, justifications.encode(), ); @@ -2154,7 +2154,7 @@ impl sc_client_api::backend::Backend for Backend { let prev_hash = if prev_number == best_number { best_hash } else { *removed.parent_hash() }; - if !self.have_state_at(&prev_hash, prev_number) { + if !self.have_state_at(prev_hash, prev_number) { return Ok(c.saturated_into::>()) } @@ -2183,7 +2183,7 @@ impl sc_client_api::backend::Backend for Backend { if hash == hash_to_revert { if !number_to_revert.is_zero() && self.have_state_at( - &prev_hash, + prev_hash, number_to_revert - One::one(), ) { let lookup_key = utils::number_and_hash_to_lookup_key( @@ -2247,14 +2247,14 @@ impl sc_client_api::backend::Backend for Backend { Ok((reverted, reverted_finalized)) } - fn remove_leaf_block(&self, hash: &Block::Hash) -> ClientResult<()> { + fn remove_leaf_block(&self, hash: Block::Hash) -> ClientResult<()> { let best_hash = self.blockchain.info().best_hash; - if best_hash == *hash { + if best_hash == hash { return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) } - let hdr = self.blockchain.header_metadata(*hash)?; + let hdr = self.blockchain.header_metadata(hash)?; if !self.have_state_at(hash, hdr.number) { return Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2263,7 +2263,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut leaves = self.blockchain.leaves.write(); - if !leaves.contains(hdr.number, *hash) { + if !leaves.contains(hdr.number, hash) { return Err(sp_blockchain::Error::Backend(format!( "Can't remove non-leaf block {:?}", hash @@ -2271,7 +2271,7 @@ impl sc_client_api::backend::Backend for Backend { } let mut transaction = Transaction::new(); - if let Some(commit) = self.storage.state_db.remove(hash) { + if let Some(commit) = self.storage.state_db.remove(&hash) { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); @@ -2280,7 +2280,7 @@ impl sc_client_api::backend::Backend for Backend { .blockchain() .children(hdr.parent)? .into_iter() - .filter(|child_hash| child_hash != hash) + .filter(|child_hash| *child_hash != hash) .collect(); let parent_leaf = if children.is_empty() { children::remove_children( @@ -2301,7 +2301,7 @@ impl sc_client_api::backend::Backend for Backend { None }; - let remove_outcome = leaves.remove(*hash, hdr.number, parent_leaf); + let remove_outcome = leaves.remove(hash, hdr.number, parent_leaf); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); if let Err(e) = self.storage.db.commit(transaction) { if let Some(outcome) = remove_outcome { @@ -2309,7 +2309,7 @@ impl sc_client_api::backend::Backend for Backend { } return Err(e.into()) } - self.blockchain().remove_header_metadata(*hash); + self.blockchain().remove_header_metadata(hash); Ok(()) } @@ -2317,8 +2317,8 @@ impl sc_client_api::backend::Backend for Backend { &self.blockchain } - fn state_at(&self, hash: &Block::Hash) -> ClientResult { - if hash == &self.blockchain.meta.read().genesis_hash { + fn state_at(&self, hash: Block::Hash) -> ClientResult { + if hash == self.blockchain.meta.read().genesis_hash { if let Some(genesis_state) = &*self.genesis_state.read() { let root = genesis_state.root; let db_state = DbStateBuilder::::new(genesis_state.clone(), root) @@ -2330,7 +2330,7 @@ impl sc_client_api::backend::Backend for Backend { } } - match self.blockchain.header_metadata(*hash) { + match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { let hint = || { sc_state_db::NodeDb::get(self.storage.as_ref(), hdr.state_root.as_ref()) @@ -2338,7 +2338,7 @@ impl sc_client_api::backend::Backend for Backend { .is_some() }; if let Ok(()) = - self.storage.state_db.pin(hash, hdr.number.saturated_into::(), hint) + self.storage.state_db.pin(&hash, hdr.number.saturated_into::(), hint) { let root = hdr.state_root; let db_state = DbStateBuilder::::new(self.storage.clone(), root) @@ -2346,8 +2346,8 @@ impl sc_client_api::backend::Backend for Backend { self.shared_trie_cache.as_ref().map(|c| c.local_cache()), ) .build(); - let state = RefTrackingState::new(db_state, self.storage.clone(), Some(*hash)); - Ok(RecordStatsState::new(state, Some(*hash), self.state_usage.clone())) + let state = RefTrackingState::new(db_state, self.storage.clone(), Some(hash)); + Ok(RecordStatsState::new(state, Some(hash), self.state_usage.clone())) } else { Err(sp_blockchain::Error::UnknownBlock(format!( "State already discarded for {:?}", @@ -2359,9 +2359,9 @@ impl sc_client_api::backend::Backend for Backend { } } - fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { + fn have_state_at(&self, hash: Block::Hash, number: NumberFor) -> bool { if self.is_archive { - match self.blockchain.header_metadata(*hash) { + match self.blockchain.header_metadata(hash) { Ok(header) => sp_state_machine::Storage::get( self.storage.as_ref(), &header.state_root, @@ -2372,10 +2372,10 @@ impl sc_client_api::backend::Backend for Backend { _ => false, } } else { - match self.storage.state_db.is_pruned(hash, number.saturated_into::()) { + match self.storage.state_db.is_pruned(&hash, number.saturated_into::()) { IsPruned::Pruned => false, IsPruned::NotPruned => true, - IsPruned::MaybePruned => match self.blockchain.header_metadata(*hash) { + IsPruned::MaybePruned => match self.blockchain.header_metadata(hash) { Ok(header) => sp_state_machine::Storage::get( self.storage.as_ref(), &header.state_root, @@ -2459,7 +2459,7 @@ pub(crate) mod tests { let block_hash = if number == 0 { Default::default() } else { parent_hash }; let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block_hash).unwrap(); + backend.begin_state_operation(&mut op, block_hash).unwrap(); op.set_block_data(header, Some(body), None, None, NewBlockState::Best).unwrap(); if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); @@ -2507,7 +2507,7 @@ pub(crate) mod tests { }; let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &hash).unwrap(); + db.begin_state_operation(&mut op, hash).unwrap(); let header = Header { number: i, parent_hash: hash, @@ -2581,7 +2581,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(&hash).unwrap(); + let state = db.state_at(hash).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2592,7 +2592,7 @@ pub(crate) mod tests { { let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, &hash).unwrap(); + db.begin_state_operation(&mut op, hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2616,7 +2616,7 @@ pub(crate) mod tests { db.commit_operation(op).unwrap(); - let state = db.state_at(&header.hash()).unwrap(); + let state = db.state_at(header.hash()).unwrap(); assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); @@ -2633,7 +2633,7 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &Default::default()).unwrap(); + backend.begin_state_operation(&mut op, Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -2670,7 +2670,7 @@ pub(crate) mod tests { let hashof1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hash).unwrap(); + backend.begin_state_operation(&mut op, hash).unwrap(); let mut header = Header { number: 1, parent_hash: hash, @@ -2707,7 +2707,7 @@ pub(crate) mod tests { let hashof2 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hashof1).unwrap(); + backend.begin_state_operation(&mut op, hashof1).unwrap(); let mut header = Header { number: 2, parent_hash: hashof1, @@ -2741,7 +2741,7 @@ pub(crate) mod tests { let hashof3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hashof2).unwrap(); + backend.begin_state_operation(&mut op, hashof2).unwrap(); let mut header = Header { number: 3, parent_hash: hashof2, @@ -2771,9 +2771,9 @@ pub(crate) mod tests { hash }; - backend.finalize_block(&hashof1, None).unwrap(); - backend.finalize_block(&hashof2, None).unwrap(); - backend.finalize_block(&hashof3, None).unwrap(); + backend.finalize_block(hashof1, None).unwrap(); + backend.finalize_block(hashof2, None).unwrap(); + backend.finalize_block(hashof3, None).unwrap(); assert!(backend .storage .db @@ -2996,8 +2996,8 @@ pub(crate) mod tests { vec![block2_a, block2_b, block2_c, block1_c] ); - backend.finalize_block(&block1_a, None).unwrap(); - backend.finalize_block(&block2_a, None).unwrap(); + backend.finalize_block(block1_a, None).unwrap(); + backend.finalize_block(block2_a, None).unwrap(); // leaves at same height stay. Leaves at lower heights pruned. assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); @@ -3024,10 +3024,10 @@ pub(crate) mod tests { let block1 = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some((CONS0_ENGINE_ID, vec![1, 2, 3])); - backend.finalize_block(&block1, justification.clone()).unwrap(); + backend.finalize_block(block1, justification.clone()).unwrap(); assert_eq!( - backend.blockchain().justifications(&block1).unwrap(), + backend.blockchain().justifications(block1).unwrap(), justification.map(Justifications::from), ); } @@ -3042,14 +3042,14 @@ pub(crate) mod tests { let block1 = insert_header(&backend, 1, block0, None, Default::default()); let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block(&block1, Some(just0.clone().into())).unwrap(); + backend.finalize_block(block1, Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); - backend.append_justification(&block1, just1.clone()).unwrap(); + backend.append_justification(block1, just1.clone()).unwrap(); let just2 = (CONS1_ENGINE_ID, vec![6, 7]); assert!(matches!( - backend.append_justification(&block1, just2), + backend.append_justification(block1, just2), Err(ClientError::BadJustification(_)) )); @@ -3058,7 +3058,7 @@ pub(crate) mod tests { just.append(just1); just }; - assert_eq!(backend.blockchain().justifications(&block1).unwrap(), Some(justifications),); + assert_eq!(backend.blockchain().justifications(block1).unwrap(), Some(justifications),); } #[test] @@ -3072,16 +3072,16 @@ pub(crate) mod tests { let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block0).unwrap(); - op.mark_finalized(&block1, None).unwrap(); - op.mark_finalized(&block2, None).unwrap(); + backend.begin_state_operation(&mut op, block0).unwrap(); + op.mark_finalized(block1, None).unwrap(); + op.mark_finalized(block2, None).unwrap(); backend.commit_operation(op).unwrap(); } { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); - op.mark_finalized(&block3, None).unwrap(); - op.mark_finalized(&block4, None).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); + op.mark_finalized(block3, None).unwrap(); + op.mark_finalized(block4, None).unwrap(); backend.commit_operation(op).unwrap(); } } @@ -3093,7 +3093,7 @@ pub(crate) mod tests { let hash0 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &Default::default()).unwrap(); + backend.begin_state_operation(&mut op, Default::default()).unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -3127,11 +3127,11 @@ pub(crate) mod tests { hash }; - let block0_hash = backend.state_at(&hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block0_hash = backend.state_at(hash0).unwrap().storage_hash(&b"test"[..]).unwrap(); let hash1 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &hash0).unwrap(); + backend.begin_state_operation(&mut op, hash0).unwrap(); let mut header = Header { number: 1, parent_hash: hash0, @@ -3166,7 +3166,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend.state_at(&hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); + let block1_hash = backend.state_at(hash1).unwrap().storage_hash(&b"test"[..]).unwrap(); assert_ne!(block0_hash, block1_hash); } @@ -3180,8 +3180,8 @@ pub(crate) mod tests { let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block0).unwrap(); - op.mark_finalized(&block2, None).unwrap(); + backend.begin_state_operation(&mut op, block0).unwrap(); + op.mark_finalized(block2, None).unwrap(); backend.commit_operation(op).unwrap_err(); } } @@ -3208,18 +3208,18 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); for i in 1..5 { - op.mark_finalized(&blocks[i], None).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(&blocks[0]).unwrap()); - assert_eq!(None, bc.body(&blocks[1]).unwrap()); - assert_eq!(None, bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3243,18 +3243,18 @@ pub(crate) mod tests { } let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); for i in 1..3 { - op.mark_finalized(&blocks[i], None).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); } backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3300,27 +3300,27 @@ pub(crate) mod tests { .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_head(&blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); - assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[i]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[i]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } - assert_eq!(Some(vec![0.into()]), bc.body(&blocks[0]).unwrap()); - assert_eq!(Some(vec![1.into()]), bc.body(&blocks[1]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(Some(vec![0.into()]), bc.body(blocks[0]).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); - assert_eq!(Some(vec![2.into()]), bc.body(&fork_hash_root).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(fork_hash_root).unwrap()); assert_eq!(bc.info().best_number, 4); for i in 0..5 { assert!(bc.hash(i).unwrap().is_some()); @@ -3369,23 +3369,23 @@ pub(crate) mod tests { ) .unwrap(); let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_head(&blocks[4]).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); backend.commit_operation(op).unwrap(); for i in 1..5 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); } let bc = backend.blockchain(); - assert_eq!(None, bc.body(&blocks[0]).unwrap()); - assert_eq!(None, bc.body(&blocks[1]).unwrap()); - assert_eq!(None, bc.body(&blocks[2]).unwrap()); - assert_eq!(Some(vec![3.into()]), bc.body(&blocks[3]).unwrap()); - assert_eq!(Some(vec![4.into()]), bc.body(&blocks[4]).unwrap()); + assert_eq!(None, bc.body(blocks[0]).unwrap()); + assert_eq!(None, bc.body(blocks[1]).unwrap()); + assert_eq!(None, bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); } #[test] @@ -3419,17 +3419,17 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[1..]); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap().unwrap(), &x1[1..]); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[1..]); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap().unwrap(), &x1[1..]); let hashof0 = bc.info().genesis_hash; // Push one more blocks and make sure block is pruned and transaction index is cleared. let block1 = insert_block(&backend, 1, hash, None, Default::default(), vec![], None).unwrap(); - backend.finalize_block(&block1, None).unwrap(); - assert_eq!(bc.body(&hashof0).unwrap(), None); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap(), None); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); + backend.finalize_block(block1, None).unwrap(); + assert_eq!(bc.body(hashof0).unwrap(), None); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); } #[test] @@ -3463,8 +3463,8 @@ pub(crate) mod tests { ) .unwrap(); let bc = backend.blockchain(); - assert_eq!(bc.indexed_transaction(&x0_hash).unwrap().unwrap(), &x0[..]); - assert_eq!(bc.indexed_transaction(&x1_hash).unwrap(), None); + assert_eq!(bc.indexed_transaction(x0_hash).unwrap().unwrap(), &x0[..]); + assert_eq!(bc.indexed_transaction(x1_hash).unwrap(), None); } #[test] @@ -3502,14 +3502,14 @@ pub(crate) mod tests { for i in 1..10 { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &blocks[4]).unwrap(); - op.mark_finalized(&blocks[i], None).unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[i], None).unwrap(); backend.commit_operation(op).unwrap(); let bc = backend.blockchain(); if i < 6 { - assert!(bc.indexed_transaction(&x1_hash).unwrap().is_some()); + assert!(bc.indexed_transaction(x1_hash).unwrap().is_some()); } else { - assert!(bc.indexed_transaction(&x1_hash).unwrap().is_none()); + assert!(bc.indexed_transaction(x1_hash).unwrap().is_none()); } } } @@ -3561,31 +3561,31 @@ pub(crate) mod tests { .unwrap(); assert_eq!(backend.blockchain().info().best_hash, best_hash); - assert!(backend.remove_leaf_block(&best_hash).is_err()); + assert!(backend.remove_leaf_block(best_hash).is_err()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], blocks[3], best_hash]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2], blocks[3]]); - assert!(backend.have_state_at(&blocks[3], 2)); + assert!(backend.have_state_at(blocks[3], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[3]).unwrap(); - assert!(!backend.have_state_at(&blocks[3], 2)); + backend.remove_leaf_block(blocks[3]).unwrap(); + assert!(!backend.have_state_at(blocks[3], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[3])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![blocks[2], best_hash]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![blocks[2]]); - assert!(backend.have_state_at(&blocks[2], 2)); + assert!(backend.have_state_at(blocks[2], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[2]).unwrap(); - assert!(!backend.have_state_at(&blocks[2], 2)); + backend.remove_leaf_block(blocks[2]).unwrap(); + assert!(!backend.have_state_at(blocks[2], 2)); assert!(backend.blockchain().header(BlockId::hash(blocks[2])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash, blocks[1]]); assert_eq!(backend.blockchain().children(blocks[1]).unwrap(), vec![]); - assert!(backend.have_state_at(&blocks[1], 1)); + assert!(backend.have_state_at(blocks[1], 1)); assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_some()); - backend.remove_leaf_block(&blocks[1]).unwrap(); - assert!(!backend.have_state_at(&blocks[1], 1)); + backend.remove_leaf_block(blocks[1]).unwrap(); + assert!(!backend.have_state_at(blocks[1], 1)); assert!(backend.blockchain().header(BlockId::hash(blocks[1])).unwrap().is_none()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![best_hash]); assert_eq!(backend.blockchain().children(blocks[0]).unwrap(), vec![best_hash]); @@ -3678,7 +3678,7 @@ pub(crate) mod tests { let block1_a = insert_header(&backend, 1, block0, None, Default::default()); let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - backend.finalize_block(&block1_a, None).unwrap(); + backend.finalize_block(block1_a, None).unwrap(); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a]); // Insert a fork prior to finalization point. Leave should not be created. @@ -3702,7 +3702,7 @@ pub(crate) mod tests { let block3 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block1).unwrap(); + backend.begin_state_operation(&mut op, block1).unwrap(); let header = Header { number: 3, parent_hash: block2, @@ -3721,7 +3721,7 @@ pub(crate) mod tests { let block4 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); let header = Header { number: 4, parent_hash: block3, @@ -3740,7 +3740,7 @@ pub(crate) mod tests { let block3_fork = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, &block2).unwrap(); + backend.begin_state_operation(&mut op, block2).unwrap(); let header = Header { number: 3, parent_hash: block2, @@ -3757,22 +3757,22 @@ pub(crate) mod tests { header.hash() }; - assert!(backend.have_state_at(&block1, 1)); - assert!(backend.have_state_at(&block2, 2)); - assert!(backend.have_state_at(&block3, 3)); - assert!(backend.have_state_at(&block4, 4)); - assert!(backend.have_state_at(&block3_fork, 3)); + assert!(backend.have_state_at(block1, 1)); + assert!(backend.have_state_at(block2, 2)); + assert!(backend.have_state_at(block3, 3)); + assert!(backend.have_state_at(block4, 4)); + assert!(backend.have_state_at(block3_fork, 3)); assert_eq!(backend.blockchain.leaves().unwrap(), vec![block4, block3_fork]); assert_eq!(4, backend.blockchain.leaves.read().highest_leaf().unwrap().0); assert_eq!(3, backend.revert(1, false).unwrap().0); - assert!(backend.have_state_at(&block1, 1)); - assert!(!backend.have_state_at(&block2, 2)); - assert!(!backend.have_state_at(&block3, 3)); - assert!(!backend.have_state_at(&block4, 4)); - assert!(!backend.have_state_at(&block3_fork, 3)); + assert!(backend.have_state_at(block1, 1)); + assert!(!backend.have_state_at(block2, 2)); + assert!(!backend.have_state_at(block3, 3)); + assert!(!backend.have_state_at(block4, 4)); + assert!(!backend.have_state_at(block3_fork, 3)); assert_eq!(backend.blockchain.leaves().unwrap(), vec![block1]); assert_eq!(1, backend.blockchain.leaves.read().highest_leaf().unwrap().0); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 60720494a9f9a..f235c3a86c04e 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1352,7 +1352,7 @@ where // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. client - .apply_finality(import_op, &hash, persisted_justification, true) + .apply_finality(import_op, hash, persisted_justification, true) .map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {}", (hash, number), e); e diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 3070581350662..453b41bc63468 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -188,7 +188,7 @@ where .expect_block_hash_from_id(&BlockId::Number(last_block_for_set))?; let justification = if let Some(grandpa_justification) = backend .blockchain() - .justifications(&last_block_for_set_id)? + .justifications(last_block_for_set_id)? .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) { grandpa_justification @@ -312,7 +312,7 @@ mod tests { for block in to_finalize { let hash = blocks[*block as usize - 1].hash(); - client.finalize_block(&hash, None).unwrap(); + client.finalize_block(hash, None).unwrap(); } (client, backend, blocks) } @@ -492,7 +492,7 @@ mod tests { let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); client - .finalize_block(&block8.hash(), Some((ID, grandpa_just8.encode().clone()))) + .finalize_block(block8.hash(), Some((ID, grandpa_just8.encode().clone()))) .unwrap(); // Authority set change at block 8, so the justification stored there will be used in the diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d0a66888ec072..3715287eea31f 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -424,8 +424,8 @@ where } /// Read current set id form a given state. - fn current_set_id(&self, hash: &Block::Hash) -> Result { - let id = &BlockId::hash(*hash); + fn current_set_id(&self, hash: Block::Hash) -> Result { + let id = &BlockId::hash(hash); let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { ConsensusError::ClientImport(format!( "Unable to retrieve current runtime version. {}", @@ -480,7 +480,7 @@ where .runtime_api() .grandpa_authorities(&BlockId::hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let set_id = self.current_set_id(&hash)?; + let set_id = self.current_set_id(hash)?; let authority_set = AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d2db1feea0fef..93d20110ff5af 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -375,7 +375,7 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justifications(&hashof20).unwrap().is_none(), + net.lock().peer(0).client().justifications(hashof20).unwrap().is_none(), "Extra justification for block#1", ); } @@ -621,7 +621,7 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&hashof32).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(hashof32).unwrap().is_some()); } } @@ -665,12 +665,12 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&hashof21).unwrap().is_some()); + assert!(net.lock().peer(i).client().justifications(hashof21).unwrap().is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justifications(&hashof21).unwrap().is_none() { + if net.lock().peer(3).client().justifications(hashof21).unwrap().is_none() { net.lock().poll(cx); Poll::Pending } else { @@ -1428,7 +1428,7 @@ fn grandpa_environment_respects_voting_rules() { .as_client() .expect_block_hash_from_id(&BlockId::Number(19)) .unwrap(); - peer.client().finalize_block(&hashof19, None, false).unwrap(); + peer.client().finalize_block(hashof19, None, false).unwrap(); // the 3/4 environment should propose block 21 for voting assert_eq!( @@ -1455,7 +1455,7 @@ fn grandpa_environment_respects_voting_rules() { .as_client() .expect_block_hash_from_id(&BlockId::Number(21)) .unwrap(); - peer.client().finalize_block(&hashof21, None, false).unwrap(); + peer.client().finalize_block(hashof21, None, false).unwrap(); // even though the default environment will always try to not vote on the // best block, there's a hard rule that we can't cast any votes lower than @@ -1666,7 +1666,7 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!(client.justifications(&block_hash).unwrap().is_some()); + assert!(client.justifications(block_hash).unwrap().is_some()); } #[test] diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 786dfacf8b0b9..c9f762fc7d593 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -130,7 +130,7 @@ impl WarpSyncProof { } let justification = blockchain - .justifications(&header.hash())? + .justifications(header.hash())? .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .expect( "header is last in set and contains standard change signal; \ @@ -412,7 +412,7 @@ mod tests { let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client - .finalize_block(&target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) + .finalize_block(target_hash, Some((GRANDPA_ENGINE_ID, justification.encode()))) .unwrap(); authority_set_changes.push((current_set_id, n)); diff --git a/client/network/bitswap/src/lib.rs b/client/network/bitswap/src/lib.rs index aba7f40ce632f..62a18b18c839d 100644 --- a/client/network/bitswap/src/lib.rs +++ b/client/network/bitswap/src/lib.rs @@ -203,7 +203,7 @@ impl BitswapRequestHandler { let mut hash = B::Hash::default(); hash.as_mut().copy_from_slice(&cid.hash().digest()[0..32]); - let transaction = match self.client.indexed_transaction(&hash) { + let transaction = match self.client.indexed_transaction(hash) { Ok(ex) => ex, Err(e) => { error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 7156545fbd9aa..77904c7256295 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -172,7 +172,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let response = match self.client.execution_proof(&block, &request.method, &request.data) { + let response = match self.client.execution_proof(block, &request.method, &request.data) { Ok((_, proof)) => { let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; Some(schema::v1::light::response::Response::RemoteCallResponse(r)) @@ -212,7 +212,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let response = - match self.client.read_proof(&block, &mut request.keys.iter().map(AsRef::as_ref)) { + match self.client.read_proof(block, &mut request.keys.iter().map(AsRef::as_ref)) { Ok(proof) => { let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; Some(schema::v1::light::response::Response::RemoteReadResponse(r)) @@ -259,7 +259,7 @@ where }; let response = match child_info.and_then(|child_info| { self.client.read_child_proof( - &block, + block, &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 5eba1d52dc68c..467d898489b61 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -332,7 +332,7 @@ where let hash = header.hash(); let parent_hash = *header.parent_hash(); let justifications = - if get_justification { self.client.justifications(&hash)? } else { None }; + if get_justification { self.client.justifications(hash)? } else { None }; let (justifications, justification, is_empty_justification) = if support_multiple_justifications { @@ -361,7 +361,7 @@ where }; let body = if get_body { - match self.client.block_body(&hash)? { + match self.client.block_body(hash)? { Some(mut extrinsics) => extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { @@ -374,7 +374,7 @@ where }; let indexed_body = if get_indexed_body { - match self.client.block_indexed_body(&hash)? { + match self.client.block_indexed_body(hash)? { Some(transactions) => transactions, None => { log::trace!( diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index f369bdb47e1c6..7c484835951e8 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -3201,7 +3201,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); @@ -3333,7 +3333,7 @@ mod test { let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(&finalized_block.hash(), Some(just)).unwrap(); + client.finalize_block(finalized_block.hash(), Some(just)).unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 0a369c998dbd7..441400ef439b7 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -205,14 +205,14 @@ where if !request.no_proof { let (proof, _count) = self.client.read_proof_collection( - &block, + block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); } else { let entries = self.client.storage_collection( - &block, + block, request.start.as_slice(), MAX_RESPONSE_BYTES, )?; diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index a1d42f1e60440..b86f6787f30b5 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -40,7 +40,7 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); - let justifications = client.justifications(&hash).unwrap(); + let justifications = client.justifications(hash).unwrap(); let peer_id = PeerId::random(); ( client, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index f348d5cf94c43..035fc0a972a59 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -173,12 +173,12 @@ impl PeersClient { Some(header) => header, None => return false, }; - self.backend.have_state_at(&header.hash(), *header.number()) + self.backend.have_state_at(header.hash(), *header.number()) } pub fn justifications( &self, - hash: &::Hash, + hash: ::Hash, ) -> ClientResult> { self.client.justifications(hash) } @@ -193,7 +193,7 @@ impl PeersClient { pub fn finalize_block( &self, - hash: &::Hash, + hash: ::Hash, justification: Option, notify: bool, ) -> ClientResult<()> { @@ -535,14 +535,14 @@ where self.verifier.failed_verifications.lock().clone() } - pub fn has_block(&self, hash: &H256) -> bool { + pub fn has_block(&self, hash: H256) -> bool { self.backend .as_ref() - .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) + .map(|backend| backend.blockchain().header(BlockId::hash(hash)).unwrap().is_some()) .unwrap_or(false) } - pub fn has_body(&self, hash: &H256) -> bool { + pub fn has_body(&self, hash: H256) -> bool { self.backend .as_ref() .map(|backend| backend.blockchain().body(hash).unwrap().is_some()) @@ -1124,7 +1124,7 @@ impl JustificationImport for ForceFinalized { justification: Justification, ) -> Result<(), Self::Error> { self.0 - .finalize_block(&hash, Some(justification), true) + .finalize_block(hash, Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification) } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 9ae3014e497ce..bbba3bc6ded62 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -252,23 +252,14 @@ fn sync_justifications() { let hashof20 = backend.blockchain().expect_block_hash_from_id(&BlockId::Number(20)).unwrap(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); // we finalize block #10, #15 and #20 for peer 0 with a justification let just = (*b"FRNK", Vec::new()); - net.peer(0) - .client() - .finalize_block(&hashof10, Some(just.clone()), true) - .unwrap(); - net.peer(0) - .client() - .finalize_block(&hashof15, Some(just.clone()), true) - .unwrap(); - net.peer(0) - .client() - .finalize_block(&hashof20, Some(just.clone()), true) - .unwrap(); + net.peer(0).client().finalize_block(hashof10, Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(hashof15, Some(just.clone()), true).unwrap(); + net.peer(0).client().finalize_block(hashof20, Some(just.clone()), true).unwrap(); let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); let hashof15 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap().hash(); @@ -283,12 +274,12 @@ fn sync_justifications() { net.poll(cx); for hash in [hashof10, hashof15, hashof20] { - if net.peer(0).client().justifications(&hash).unwrap() != + if net.peer(0).client().justifications(hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending } - if net.peer(1).client().justifications(&hash).unwrap() != + if net.peer(1).client().justifications(hash).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -314,7 +305,7 @@ fn sync_justifications_across_forks() { net.block_until_sync(); let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(&f1_best, Some(just), true).unwrap(); + net.peer(0).client().finalize_block(f1_best, Some(just), true).unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -322,9 +313,9 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).client().justifications(&f1_best).unwrap() == + if net.peer(0).client().justifications(f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) && - net.peer(1).client().justifications(&f1_best).unwrap() == + net.peer(1).client().justifications(f1_best).unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) @@ -369,10 +360,10 @@ fn syncs_all_forks() { net.block_until_sync(); // Check that all peers have all of the branches. - assert!(net.peer(0).has_block(&b1)); - assert!(net.peer(0).has_block(&b2)); - assert!(net.peer(1).has_block(&b1)); - assert!(net.peer(1).has_block(&b2)); + assert!(net.peer(0).has_block(b1)); + assert!(net.peer(0).has_block(b2)); + assert!(net.peer(1).has_block(b1)); + assert!(net.peer(1).has_block(b2)); } #[test] @@ -555,7 +546,7 @@ fn syncs_header_only_forks() { net.peer(1).push_blocks(4, false); // Peer 1 will sync the small fork even though common block state is missing - while !net.peer(1).has_block(&small_hash) { + while !net.peer(1).has_block(small_hash) { net.block_until_idle(); } } @@ -657,13 +648,13 @@ fn can_sync_to_peers_with_wrong_common_block() { // both peers re-org to the same fork without notifying each other let just = Some((*b"FRNK", Vec::new())); - net.peer(0).client().finalize_block(&fork_hash, just.clone(), true).unwrap(); - net.peer(1).client().finalize_block(&fork_hash, just, true).unwrap(); + net.peer(0).client().finalize_block(fork_hash, just.clone(), true).unwrap(); + net.peer(1).client().finalize_block(fork_hash, just, true).unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); - assert!(net.peer(1).has_block(&final_hash)); + assert!(net.peer(1).has_block(final_hash)); } /// Returns `is_new_best = true` for each validated announcement. @@ -724,7 +715,7 @@ fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(2).has_block(&block_hash) { + while !net.peer(2).has_block(block_hash) { net.block_until_idle(); } } @@ -767,7 +758,7 @@ fn wait_until_deferred_block_announce_validation_is_ready() { ForkChoiceStrategy::Custom(false), ); - while !net.peer(1).has_block(&block_hash) { + while !net.peer(1).has_block(block_hash) { net.block_until_idle(); } } @@ -788,7 +779,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { net.block_until_idle(); // The peer should not have synced the block. - assert!(!net.peer(1).has_block(&block_hash)); + assert!(!net.peer(1).has_block(block_hash)); // Make sync protocol aware of the best block net.peer(0).network_service().new_best_block_imported(block_hash, 3); @@ -802,7 +793,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(2).has_block(&block_hash) { + if net.peer(2).has_block(block_hash) { Poll::Ready(()) } else { Poll::Pending @@ -810,7 +801,7 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { })); // However peer 1 should still not have the block. - assert!(!net.peer(1).has_block(&block_hash)); + assert!(!net.peer(1).has_block(block_hash)); } /// Ensures that if we as a syncing node sync to the tip while we are connected to another peer @@ -831,10 +822,10 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() { net.block_until_connected(); net.block_until_idle(); - assert!(!net.peer(2).has_block(&block_hash)); + assert!(!net.peer(2).has_block(block_hash)); net.peer(0).network_service().new_best_block_imported(block_hash, 10_000); - while !net.peer(2).has_block(&block_hash) && !net.peer(1).has_block(&block_hash) { + while !net.peer(2).has_block(block_hash) && !net.peer(1).has_block(block_hash) { net.block_until_idle(); } } @@ -895,7 +886,7 @@ fn block_announce_data_is_propagated() { let block_hash = net.peer(0).push_blocks_at_without_announcing(BlockId::Number(0), 1, true); net.peer(0).announce_block(block_hash, Some(vec![137])); - while !net.peer(1).has_block(&block_hash) || !net.peer(2).has_block(&block_hash) { + while !net.peer(1).has_block(block_hash) || !net.peer(2).has_block(block_hash) { net.block_until_idle(); } } @@ -939,7 +930,7 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() { let block_hash = net.peer(0).push_blocks(500, true); net.block_until_sync(); - assert!(net.peer(1).has_block(&block_hash)); + assert!(net.peer(1).has_block(block_hash)); } /// When being spammed by the same request of a peer, we ban this peer. However, we should only ban @@ -956,8 +947,8 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { let hashof10 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap().hash(); // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justifications(&hashof10).unwrap(), None); - assert_eq!(net.peer(1).client().justifications(&hashof10).unwrap(), None); + assert_eq!(net.peer(0).client().justifications(hashof10).unwrap(), None); + assert_eq!(net.peer(1).client().justifications(hashof10).unwrap(), None); // Let's assume block 10 was finalized, but we still need the justification from the network. net.peer(1).request_justification(&hashof10, 10); @@ -982,13 +973,13 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { // Finalize the block and make the justification available. net.peer(0) .client() - .finalize_block(&hashof10, Some((*b"FRNK", Vec::new())), true) + .finalize_block(hashof10, Some((*b"FRNK", Vec::new())), true) .unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(1).client().justifications(&hashof10).unwrap() != + if net.peer(1).client().justifications(hashof10).unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) { return Poll::Pending @@ -1110,7 +1101,7 @@ fn syncs_state() { .blockchain() .expect_block_hash_from_id(&BlockId::Number(60)) .unwrap(); - net.peer(1).client().finalize_block(&hashof60, Some(just), true).unwrap(); + net.peer(1).client().finalize_block(hashof60, Some(just), true).unwrap(); // Wait for state sync. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -1165,14 +1156,14 @@ fn syncs_indexed_blocks() { .peer(0) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_some()); assert!(net .peer(1) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_none()); @@ -1181,7 +1172,7 @@ fn syncs_indexed_blocks() { .peer(1) .client() .as_client() - .indexed_transaction(&indexed_key) + .indexed_transaction(indexed_key) .unwrap() .is_some()); } @@ -1210,7 +1201,7 @@ fn warp_sync() { // Wait for peer 1 download block history block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(3).has_body(&gap_end) && net.peer(3).has_body(&target) { + if net.peer(3).has_body(gap_end) && net.peer(3).has_body(target) { Poll::Ready(()) } else { Poll::Pending diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index a41d8e41b8fa7..1e6dbd5aca148 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -206,7 +206,7 @@ async fn should_return_finalized_hash() { assert_eq!(res, client.genesis_hash()); // finalize - client.finalize_block(&block_hash, None).unwrap(); + client.finalize_block(block_hash, None).unwrap(); let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); assert_eq!(res, block_hash); } @@ -235,7 +235,7 @@ async fn test_head_subscription(method: &str) { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let block_hash = block.hash(); client.import(BlockOrigin::Own, block).await.unwrap(); - client.finalize_block(&block_hash, None).unwrap(); + client.finalize_block(block_hash, None).unwrap(); sub }; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 7fc7f840a9daf..64b6cacaad700 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -147,7 +147,7 @@ where let mut block_changes = StorageChangeSet { block: *block_hash, changes: Vec::new() }; for key in keys { let (has_changed, data) = { - let curr_data = self.client.storage(block_hash, key).map_err(client_err)?; + let curr_data = self.client.storage(*block_hash, key).map_err(client_err)?; match last_values.get(key) { Some(prev_data) => (curr_data != *prev_data, curr_data), None => (true, curr_data), @@ -213,7 +213,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_keys(&block, &prefix)) + .and_then(|block| self.client.storage_keys(block, &prefix)) .map_err(client_err) } @@ -223,7 +223,7 @@ where prefix: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(&block, &prefix)) + .and_then(|block| self.client.storage_pairs(block, &prefix)) .map_err(client_err) } @@ -236,7 +236,7 @@ where ) -> std::result::Result, Error> { self.block_or_best(block) .and_then(|block| { - self.client.storage_keys_iter(&block, prefix.as_ref(), start_key.as_ref()) + self.client.storage_keys_iter(block, prefix.as_ref(), start_key.as_ref()) }) .map(|iter| iter.take(count as usize).collect()) .map_err(client_err) @@ -248,7 +248,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage(&block, &key)) + .and_then(|block| self.client.storage(block, &key)) .map_err(client_err) } @@ -262,14 +262,14 @@ where Err(e) => return Err(client_err(e)), }; - match self.client.storage(&block, &key) { + match self.client.storage(block, &key) { Ok(Some(d)) => return Ok(Some(d.0.len() as u64)), Err(e) => return Err(client_err(e)), Ok(None) => {}, } self.client - .storage_pairs(&block, &key) + .storage_pairs(block, &key) .map(|kv| { let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); if item_sum > 0 { @@ -287,7 +287,7 @@ where key: StorageKey, ) -> std::result::Result, Error> { self.block_or_best(block) - .and_then(|block| self.client.storage_hash(&block, &key)) + .and_then(|block| self.client.storage_hash(block, &key)) .map_err(client_err) } @@ -345,7 +345,7 @@ where self.block_or_best(block) .and_then(|block| { self.client - .read_proof(&block, &mut keys.iter().map(|key| key.0.as_ref())) + .read_proof(block, &mut keys.iter().map(|key| key.0.as_ref())) .map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) @@ -413,7 +413,7 @@ where let changes = keys .into_iter() .map(|key| { - let v = self.client.storage(&block, &key).ok().flatten(); + let v = self.client.storage(block, &key).ok().flatten(); (key, v) }) .collect(); @@ -494,7 +494,7 @@ where }; self.client .read_child_proof( - &block, + block, &child_info, &mut keys.iter().map(|key| key.0.as_ref()), ) @@ -517,7 +517,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_keys(&block, &child_info, &prefix) + self.client.child_storage_keys(block, &child_info, &prefix) }) .map_err(client_err) } @@ -538,7 +538,7 @@ where None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( - &block, + block, child_info, prefix.as_ref(), start_key.as_ref(), @@ -561,7 +561,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage(&block, &child_info, &key) + self.client.child_storage(block, &child_info, &key) }) .map_err(client_err) } @@ -584,7 +584,7 @@ where keys.into_iter() .map(move |key| { - client.clone().child_storage(&block, &child_info, &key).map_err(client_err) + client.clone().child_storage(block, &child_info, &key).map_err(client_err) }) .collect() } @@ -602,7 +602,7 @@ where ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_hash(&block, &child_info, &key) + self.client.child_storage_hash(block, &child_info, &key) }) .map_err(client_err) } diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 04dba387de908..ca7a070086f45 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -25,7 +25,7 @@ use std::{collections::HashMap, sync::Arc}; /// Export the raw state at the given `block`. If `block` is `None`, the /// best block will be used. -pub fn export_raw_state(client: Arc, hash: &B::Hash) -> Result +pub fn export_raw_state(client: Arc, hash: B::Hash) -> Result where C: UsageProvider + StorageProvider, B: BlockT, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 8ab332a24be78..a1a012dcedd9f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -148,7 +148,7 @@ where ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; @@ -193,7 +193,7 @@ where let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let changes = &mut *changes.borrow_mut(); @@ -251,7 +251,7 @@ where let mut overlay = OverlayedChanges::default(); let at_hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -269,7 +269,7 @@ where call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { let at_hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - let state = self.backend.state_at(&at_hash)?; + let state = self.backend.state_at(at_hash)?; let trie_backend = state.as_trie_backend(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a4890f2fcf06f..438d0b7f77061 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -414,14 +414,14 @@ where } /// Get a reference to the state at a given block. - pub fn state_at(&self, hash: &Block::Hash) -> sp_blockchain::Result { + pub fn state_at(&self, hash: Block::Hash) -> sp_blockchain::Result { self.backend.state_at(hash) } /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { let hash = self.backend.blockchain().expect_block_hash_from_id(id)?; - Ok(StorageProvider::storage(self, &hash, &StorageKey(well_known_keys::CODE.to_vec()))? + Ok(StorageProvider::storage(self, hash, &StorageKey(well_known_keys::CODE.to_vec()))? .expect( "None is returned if there's no value stored for the given key;\ ':code' key is always defined; qed", @@ -586,7 +586,7 @@ where Some(storage_changes) => { let storage_changes = match storage_changes { sc_consensus::StorageChanges::Changes(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, &parent_hash)?; + self.backend.begin_state_operation(&mut operation.op, parent_hash)?; let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); @@ -813,7 +813,7 @@ where Block::new(import_block.header.clone(), body.clone()), )?; - let state = self.backend.state_at(parent_hash)?; + let state = self.backend.state_at(*parent_hash)?; let gen_storage_changes = runtime_api .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; @@ -877,17 +877,17 @@ where // plugable we cannot make a better choice here. usages that need // an accurate "best" block need to go through `SelectChain` // instead. - operation.op.mark_head(&block)?; + operation.op.mark_head(block)?; } let enacted = route_from_finalized.enacted(); assert!(enacted.len() > 0); for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(&finalize_new.hash, None)?; + operation.op.mark_finalized(finalize_new.hash, None)?; } assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(&block, justification)?; + operation.op.mark_finalized(block, justification)?; if notify { let finalized = @@ -1033,7 +1033,7 @@ where }; match hash_and_number { Some((hash, number)) => - if self.backend.have_state_at(&hash, number) { + if self.backend.have_state_at(hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) @@ -1053,7 +1053,7 @@ where /// Get block body by id. pub fn body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { self.backend.blockchain().body(hash) } @@ -1151,7 +1151,7 @@ where { fn read_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { self.state_at(hash) @@ -1160,7 +1160,7 @@ where fn read_child_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, child_info: &ChildInfo, keys: &mut dyn Iterator, ) -> sp_blockchain::Result { @@ -1170,16 +1170,16 @@ where fn execution_proof( &self, - hash: &Block::Hash, + hash: Block::Hash, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { - self.executor.prove_execution(&BlockId::Hash(*hash), method, call_data) + self.executor.prove_execution(&BlockId::Hash(hash), method, call_data) } fn read_proof_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(CompactProof, u32)> { @@ -1198,14 +1198,14 @@ where fn storage_collection( &self, - hash: &Block::Hash, + hash: Block::Hash, start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())) } - let state = self.state_at(&hash)?; + let state = self.state_at(hash)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(storage_key); match ChildType::from_prefixed_key(storage_key) { @@ -1398,7 +1398,7 @@ where { fn storage_keys( &self, - hash: &Block::Hash, + hash: Block::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { let keys = self.state_at(hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); @@ -1407,7 +1407,7 @@ where fn storage_pairs( &self, - hash: &::Hash, + hash: ::Hash, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { let state = self.state_at(hash)?; @@ -1424,7 +1424,7 @@ where fn storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { @@ -1435,7 +1435,7 @@ where fn child_storage_keys_iter<'a>( &self, - hash: &::Hash, + hash: ::Hash, child_info: ChildInfo, prefix: Option<&'a StorageKey>, start_key: Option<&StorageKey>, @@ -1447,7 +1447,7 @@ where fn storage( &self, - hash: &Block::Hash, + hash: Block::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { Ok(self @@ -1459,7 +1459,7 @@ where fn storage_hash( &self, - hash: &::Hash, + hash: ::Hash, key: &StorageKey, ) -> sp_blockchain::Result> { self.state_at(hash)? @@ -1469,7 +1469,7 @@ where fn child_storage_keys( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key_prefix: &StorageKey, ) -> sp_blockchain::Result> { @@ -1484,7 +1484,7 @@ where fn child_storage( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -1497,7 +1497,7 @@ where fn child_storage_hash( &self, - hash: &::Hash, + hash: ::Hash, child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result> { @@ -1683,7 +1683,7 @@ where fn state_at(&self, at: &BlockId) -> Result { let hash = self.backend.blockchain().expect_block_hash_from_id(at)?; - self.state_at(&hash).map_err(Into::into) + self.state_at(hash).map_err(Into::into) } } @@ -1844,17 +1844,17 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { let last_best = self.backend.blockchain().info().best_hash; - self.apply_finality_with_block_hash(operation, *hash, justification, last_best, notify) + self.apply_finality_with_block_hash(operation, hash, justification, last_best, notify) } fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1873,7 +1873,7 @@ where fn apply_finality( &self, operation: &mut ClientImportOperation, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1882,7 +1882,7 @@ where fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, notify: bool, ) -> sp_blockchain::Result<()> { @@ -1939,7 +1939,7 @@ where { fn block_body( &self, - hash: &Block::Hash, + hash: Block::Hash, ) -> sp_blockchain::Result::Extrinsic>>> { self.body(hash) } @@ -1948,7 +1948,7 @@ where Ok(match self.header(id)? { Some(header) => { let hash = header.hash(); - match (self.body(&hash)?, self.justifications(&hash)?) { + match (self.body(hash)?, self.justifications(hash)?) { (Some(extrinsics), justifications) => Some(SignedBlock { block: Block::new(header, extrinsics), justifications }), _ => None, @@ -1962,7 +1962,7 @@ where Client::block_status(self, id) } - fn justifications(&self, hash: &Block::Hash) -> sp_blockchain::Result> { + fn justifications(&self, hash: Block::Hash) -> sp_blockchain::Result> { self.backend.blockchain().justifications(hash) } @@ -1970,18 +1970,15 @@ where self.backend.blockchain().hash(number) } - fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result>> { self.backend.blockchain().indexed_transaction(hash) } - fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> sp_blockchain::Result { self.backend.blockchain().has_indexed_transaction(hash) } - fn block_indexed_body( - &self, - hash: &Block::Hash, - ) -> sp_blockchain::Result>>> { + fn block_indexed_body(&self, hash: Block::Hash) -> sp_blockchain::Result>>> { self.backend.blockchain().block_indexed_body(hash) } @@ -2085,7 +2082,7 @@ where self.backend .blockchain() - .block_indexed_body(&hash) + .block_indexed_body(hash) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index c60ff4dd09d7b..788f119130ac0 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -346,7 +346,7 @@ fn block_builder_works_with_transactions() { .expect("block 1 was just imported. qed"); assert_eq!(client.chain_info().best_number, 1); - assert_ne!(client.state_at(&hash1).unwrap().pairs(), client.state_at(&hash0).unwrap().pairs()); + assert_ne!(client.state_at(hash1).unwrap().pairs(), client.state_at(hash0).unwrap().pairs()); assert_eq!( client .runtime_api() @@ -405,10 +405,10 @@ fn block_builder_does_not_include_invalid() { assert_eq!(client.chain_info().best_number, 1); assert_ne!( - client.state_at(&hashof1).unwrap().pairs(), - client.state_at(&hashof0).unwrap().pairs() + client.state_at(hashof1).unwrap().pairs(), + client.state_at(hashof0).unwrap().pairs() ); - assert_eq!(client.body(&hashof1).unwrap().unwrap().len(), 1) + assert_eq!(client.body(hashof1).unwrap().unwrap().len(), 1) } #[test] @@ -870,7 +870,7 @@ fn import_with_justification() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - client.finalize_block(&a2.hash(), None).unwrap(); + client.finalize_block(a2.hash(), None).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -884,11 +884,11 @@ fn import_with_justification() { assert_eq!(client.chain_info().finalized_hash, a3.hash()); - assert_eq!(client.justifications(&a3.hash()).unwrap(), Some(justification)); + assert_eq!(client.justifications(a3.hash()).unwrap(), Some(justification)); - assert_eq!(client.justifications(&a1.hash()).unwrap(), None); + assert_eq!(client.justifications(a1.hash()).unwrap(), None); - assert_eq!(client.justifications(&a2.hash()).unwrap(), None); + assert_eq!(client.justifications(a2.hash()).unwrap(), None); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); finality_notification_check(&mut finality_notifications, &[a3.hash()], &[]); @@ -999,7 +999,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. - ClientExt::finalize_block(&client, &b1.hash(), None).unwrap(); + ClientExt::finalize_block(&client, b1.hash(), None).unwrap(); // B1 should now be the latest finalized assert_eq!(client.chain_info().finalized_hash, b1.hash()); @@ -1023,7 +1023,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { assert_eq!(client.chain_info().best_hash, b3.hash()); - ClientExt::finalize_block(&client, &b3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, b3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[b1.hash()], &[]); finality_notification_check(&mut finality_notifications, &[b2.hash(), b3.hash()], &[a2.hash()]); @@ -1121,7 +1121,7 @@ fn finality_notifications_content() { // Postpone import to test behavior of import of finalized block. - ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); // Import and finalize D4 block_on(client.import_as_final(BlockOrigin::Own, d4.clone())).unwrap(); @@ -1285,7 +1285,7 @@ fn doesnt_import_blocks_that_revert_finality() { // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, &a2.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a2.hash(), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = @@ -1320,7 +1320,7 @@ fn doesnt_import_blocks_that_revert_finality() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - ClientExt::finalize_block(&client, &a3.hash(), None).unwrap(); + ClientExt::finalize_block(&client, a3.hash(), None).unwrap(); finality_notification_check(&mut finality_notifications, &[a1.hash(), a2.hash()], &[]); @@ -1620,7 +1620,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let child_prefix = StorageKey(b"sec".to_vec()); let res: Vec<_> = client - .storage_keys_iter(&block_hash, Some(&prefix), None) + .storage_keys_iter(block_hash, Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1635,7 +1635,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1646,7 +1646,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))), ) @@ -1656,7 +1656,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { assert_eq!(res, Vec::>::new()); let res: Vec<_> = client - .child_storage_keys_iter(&block_hash, child_info.clone(), Some(&child_prefix), None) + .child_storage_keys_iter(block_hash, child_info.clone(), Some(&child_prefix), None) .unwrap() .map(|x| x.0) .collect(); @@ -1664,7 +1664,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { let res: Vec<_> = client .child_storage_keys_iter( - &block_hash, + block_hash, child_info, None, Some(&StorageKey(b"second".to_vec())), @@ -1684,7 +1684,7 @@ fn storage_keys_iter_works() { let prefix = StorageKey(array_bytes::hex2bytes_unchecked("")); let res: Vec<_> = client - .storage_keys_iter(&block_hash, Some(&prefix), None) + .storage_keys_iter(block_hash, Some(&prefix), None) .unwrap() .take(9) .map(|x| array_bytes::bytes2hex("", &x.0)) @@ -1706,7 +1706,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))), ) @@ -1729,7 +1729,7 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter( - &block_hash, + block_hash, Some(&prefix), Some(&StorageKey(array_bytes::hex2bytes_unchecked( "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index ee524f5f72902..63fd1de374cba 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -225,7 +225,7 @@ where .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; let extrinsics = self .client - .block_body(&self.block) + .block_body(self.block) .map_err(Error::InvalidBlockId)? .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); diff --git a/client/transaction-pool/benches/basics.rs b/client/transaction-pool/benches/basics.rs index bc6f2f7d5e947..602e84b47775c 100644 --- a/client/transaction-pool/benches/basics.rs +++ b/client/transaction-pool/benches/basics.rs @@ -111,7 +111,7 @@ impl ChainApi for TestApi { (blake2_256(&encoded).into(), encoded.len()) } - fn block_body(&self, _id: &::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { ready(Ok(None)) } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index f162a02ddb643..c3f9b50f9482d 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -126,7 +126,7 @@ where Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; - fn block_body(&self, hash: &Block::Hash) -> Self::BodyFuture { + fn block_body(&self, hash: Block::Hash) -> Self::BodyFuture { ready(self.client.block_body(hash).map_err(error::Error::from)) } diff --git a/client/transaction-pool/src/graph/pool.rs b/client/transaction-pool/src/graph/pool.rs index 99119ac8fa8ab..7b3a8db15982a 100644 --- a/client/transaction-pool/src/graph/pool.rs +++ b/client/transaction-pool/src/graph/pool.rs @@ -91,7 +91,7 @@ pub trait ChainApi: Send + Sync { fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (ExtrinsicHash, usize); /// Returns a block body given the block. - fn block_body(&self, at: &::Hash) -> Self::BodyFuture; + fn block_body(&self, at: ::Hash) -> Self::BodyFuture; /// Returns a block header given the block id. fn block_header( diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index e66c780a5ed8f..a441bf9b2a9a0 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -550,12 +550,12 @@ impl RevalidationStatus { /// Prune the known txs for the given block. async fn prune_known_txs_for_block>( - block_hash: &Block::Hash, + block_hash: Block::Hash, api: &Api, pool: &graph::Pool, ) -> Vec> { let extrinsics = api - .block_body(&block_hash) + .block_body(block_hash) .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request: {}", e); @@ -567,7 +567,7 @@ async fn prune_known_txs_for_block h, Ok(None) => { log::debug!(target: "txpool", "Could not find header for {:?}.", block_hash); @@ -580,7 +580,7 @@ async fn prune_known_txs_for_block::Hash) -> Self::BodyFuture { + fn block_body(&self, _id: ::Hash) -> Self::BodyFuture { futures::future::ready(Ok(None)) } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index fdb56020661b4..dea3a7f285117 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -89,9 +89,9 @@ pub trait Backend: HeaderBackend + HeaderMetadata { /// Get block body. Returns `None` if block is not found. - fn body(&self, hash: &Block::Hash) -> Result::Extrinsic>>>; + fn body(&self, hash: Block::Hash) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. - fn justifications(&self, hash: &Block::Hash) -> Result>; + fn justifications(&self, hash: Block::Hash) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; @@ -231,14 +231,14 @@ pub trait Backend: /// Get single indexed transaction by content hash. Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction(&self, hash: &Block::Hash) -> Result>>; + fn indexed_transaction(&self, hash: Block::Hash) -> Result>>; /// Check if indexed transaction exists. - fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { + fn has_indexed_transaction(&self, hash: Block::Hash) -> Result { Ok(self.indexed_transaction(hash)?.is_some()) } - fn block_indexed_body(&self, hash: &Block::Hash) -> Result>>>; + fn block_indexed_body(&self, hash: Block::Hash) -> Result>>>; } /// Blockchain info diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index dd416b9102fc0..881c50d434264 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -29,7 +29,7 @@ pub trait ClientExt: Sized { /// Finalize a block. fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()>; @@ -75,7 +75,7 @@ where { fn finalize_block( &self, - hash: &Block::Hash, + hash: Block::Hash, justification: Option, ) -> sp_blockchain::Result<()> { Finalizer::finalize_block(self, hash, justification, true) diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index e2d6efccea424..f8d551a6fa5bd 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -315,12 +315,12 @@ impl sc_transaction_pool::ChainApi for TestApi { Self::hash_and_length_inner(ex) } - fn block_body(&self, hash: &::Hash) -> Self::BodyFuture { + fn block_body(&self, hash: ::Hash) -> Self::BodyFuture { futures::future::ready(Ok(self .chain .read() .block_by_hash - .get(hash) + .get(&hash) .map(|b| b.extrinsics().to_vec()))) } diff --git a/utils/frame/benchmarking-cli/src/block/bench.rs b/utils/frame/benchmarking-cli/src/block/bench.rs index 47cd047e158d0..5a67b11f494f5 100644 --- a/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/utils/frame/benchmarking-cli/src/block/bench.rs @@ -142,7 +142,7 @@ where let block_hash = self.client.expect_block_hash_from_id(block)?; let mut raw_weight = &self .client - .storage(&block_hash, &key)? + .storage(block_hash, &key)? .ok_or(format!("Could not find System::BlockWeight for block: {}", block))? .0[..]; diff --git a/utils/frame/benchmarking-cli/src/storage/cmd.rs b/utils/frame/benchmarking-cli/src/storage/cmd.rs index 32fd5da7f95f0..ce2d52e57d641 100644 --- a/utils/frame/benchmarking-cli/src/storage/cmd.rs +++ b/utils/frame/benchmarking-cli/src/storage/cmd.rs @@ -193,7 +193,7 @@ impl StorageCmd { { let hash = client.usage_info().chain.best_hash; let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&hash, &empty_prefix)?; + let mut keys = client.storage_keys(hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -201,7 +201,7 @@ impl StorageCmd { info!("Warmup round {}/{}", i + 1, self.params.warmups); for key in keys.as_slice() { let _ = client - .storage(&hash, &key) + .storage(hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty"); } diff --git a/utils/frame/benchmarking-cli/src/storage/read.rs b/utils/frame/benchmarking-cli/src/storage/read.rs index 2df7e697039e8..20c41e4a5196b 100644 --- a/utils/frame/benchmarking-cli/src/storage/read.rs +++ b/utils/frame/benchmarking-cli/src/storage/read.rs @@ -43,7 +43,7 @@ impl StorageCmd { info!("Preparing keys from block {}", best_hash); // Load all keys and randomly shuffle them. let empty_prefix = StorageKey(Vec::new()); - let mut keys = client.storage_keys(&best_hash, &empty_prefix)?; + let mut keys = client.storage_keys(best_hash, &empty_prefix)?; let (mut rng, _) = new_rng(None); keys.shuffle(&mut rng); @@ -55,7 +55,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(key.clone().0)) { (true, Some(info)) => { // child tree key - let child_keys = client.child_storage_keys(&best_hash, &info, &empty_prefix)?; + let child_keys = client.child_storage_keys(best_hash, &info, &empty_prefix)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -64,7 +64,7 @@ impl StorageCmd { // regular key let start = Instant::now(); let v = client - .storage(&best_hash, &key) + .storage(best_hash, &key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; @@ -79,7 +79,7 @@ impl StorageCmd { for (key, info) in child_nodes.as_slice() { let start = Instant::now(); let v = client - .child_storage(&best_hash, info, key) + .child_storage(best_hash, info, key) .expect("Checked above to exist") .ok_or("Value unexpectedly empty")?; record.append(v.0.len(), start.elapsed())?; diff --git a/utils/frame/benchmarking-cli/src/storage/write.rs b/utils/frame/benchmarking-cli/src/storage/write.rs index 2ee37a5619136..55a7b60d55552 100644 --- a/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/utils/frame/benchmarking-cli/src/storage/write.rs @@ -77,7 +77,7 @@ impl StorageCmd { match (self.params.include_child_trees, self.is_child_key(k.to_vec())) { (true, Some(info)) => { let child_keys = - client.child_storage_keys_iter(&best_hash, info.clone(), None, None)?; + client.child_storage_keys_iter(best_hash, info.clone(), None, None)?; for ck in child_keys { child_nodes.push((ck.clone(), info.clone())); } @@ -124,7 +124,7 @@ impl StorageCmd { for (key, info) in child_nodes { if let Some(original_v) = client - .child_storage(&best_hash, &info.clone(), &key) + .child_storage(best_hash, &info.clone(), &key) .expect("Checked above to exist") { let mut new_v = vec![0; original_v.0.len()]; diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index c3d3ec816f97e..ab180c7d45d5b 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -145,7 +145,7 @@ where self.deny_unsafe.check_if_safe()?; let hash = at.unwrap_or_else(|| self.client.info().best_hash); - let state = self.backend.state_at(&hash).map_err(error_into_rpc_err)?; + let state = self.backend.state_at(hash).map_err(error_into_rpc_err)?; let (top, child) = migration_status(&state).map_err(error_into_rpc_err)?; Ok(MigrationStatusResult { From 61e7ad45da3cfd805f1e85e5b1771d88105f9467 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Tue, 8 Nov 2022 00:57:29 +0200 Subject: [PATCH 318/348] Do not update peer information if ancestor search is in progress (#12631) * Do not update peer information if ancestor search is in progress If block announcement is received from a peer while ancestor search for that same peer is still in progress, do not update the peer's best hash and best number as that causes the ancestor search to yield different information from what was expected and can cause, for example, a fork of lower height not be be downloaded. * Block until peers are in sync --- client/network/sync/src/lib.rs | 10 +++++----- client/network/test/src/sync.rs | 4 ++++ 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 7c484835951e8..75ecb9322ca78 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -2043,17 +2043,17 @@ where return PollBlockAnnounceValidation::Nothing { is_best, who, announce } }; + if let PeerSyncState::AncestorSearch { .. } = peer.state { + trace!(target: "sync", "Peer state is ancestor search."); + return PollBlockAnnounceValidation::Nothing { is_best, who, announce } + } + if is_best { // update their best block peer.best_number = number; peer.best_hash = hash; } - if let PeerSyncState::AncestorSearch { .. } = peer.state { - trace!(target: "sync", "Peer state is ancestor search."); - return PollBlockAnnounceValidation::Nothing { is_best, who, announce } - } - // If the announced block is the best they have and is not ahead of us, our common number // is either one further ahead or it's the one they just announced, if we know about it. if is_best { diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index bbba3bc6ded62..4515677d0b1e0 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -549,6 +549,10 @@ fn syncs_header_only_forks() { while !net.peer(1).has_block(small_hash) { net.block_until_idle(); } + + net.block_until_sync(); + assert_eq!(net.peer(0).client().info().best_hash, net.peer(1).client().info().best_hash); + assert_ne!(small_hash, net.peer(0).client().info().best_hash); } #[test] From 853bd559f68b8a21a421471d3992d54b25d8619f Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 8 Nov 2022 09:07:32 +0100 Subject: [PATCH 319/348] Pipeline with ci image with rust 1.65 (#12628) * Pipeline with ci image with rust 1.65 * fix tests * use image with sha --- .gitlab-ci.yml | 2 +- .../no_std_genesis_config.stderr | 13 ------ .../undefined_event_part.stderr | 13 ------ .../undefined_genesis_config_part.stderr | 13 ------ .../undefined_origin_part.stderr | 13 ------ .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 20 ++++----- .../call_argument_invalid_bound_3.stderr | 2 +- .../pallet_ui/event_field_not_member.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 44 +++++++++---------- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 44 +++++++++---------- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 4 +- ...reference_in_impl_runtime_apis_call.stderr | 3 +- 14 files changed, 61 insertions(+), 116 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9053e39eb59bf..f663057faaad5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -47,7 +47,7 @@ variables: CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" - CI_IMAGE: "paritytech/ci-linux:production" + CI_IMAGE: "paritytech/ci-linux@sha256:c977b383c2033de50083fad18f6b9e698644230455e016ba708da00eb98d4683" # staging 07.11.2022 RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 26c0717c0ad37..d35565fb933ac 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -29,19 +29,6 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | -help: if you import `GenesisConfig`, refer to it directly - | -40 - construct_runtime! { -41 - pub enum Runtime where -42 - Block = Block, -43 - NodeBlock = Block, -44 - UncheckedExtrinsic = UncheckedExtrinsic -45 - { -46 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -47 - Pallet: test_pallet::{Pallet, Config}, -48 - } -49 - } - | error[E0283]: type annotations needed --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 8f2bf7be15749..ff8ecf3041bf6 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -32,16 +32,3 @@ help: consider importing this enum | 1 | use frame_system::Event; | -help: if you import `Event`, refer to it directly - | -49 - construct_runtime! { -50 - pub enum Runtime where -51 - Block = Block, -52 - NodeBlock = Block, -53 - UncheckedExtrinsic = UncheckedExtrinsic -54 - { -55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 - Pallet: pallet::{Pallet, Event}, -57 - } -58 - } - | diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index aae3aaa80c865..046369e1112b0 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -32,19 +32,6 @@ help: consider importing this struct | 1 | use frame_system::GenesisConfig; | -help: if you import `GenesisConfig`, refer to it directly - | -49 - construct_runtime! { -50 - pub enum Runtime where -51 - Block = Block, -52 - NodeBlock = Block, -53 - UncheckedExtrinsic = UncheckedExtrinsic -54 - { -55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 - Pallet: pallet::{Pallet, Config}, -57 - } -58 - } - | error[E0283]: type annotations needed --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 1a8fe64da1758..4907053b12877 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -32,19 +32,6 @@ help: consider importing this type alias | 1 | use frame_system::Origin; | -help: if you import `Origin`, refer to it directly - | -49 - construct_runtime! { -50 - pub enum Runtime where -51 - Block = Block, -52 - NodeBlock = Block, -53 - UncheckedExtrinsic = UncheckedExtrinsic -54 - { -55 - System: frame_system::{Pallet, Call, Storage, Config, Event}, -56 - Pallet: pallet::{Pallet, Origin}, -57 - } -58 - } - | error[E0282]: type annotations needed --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 86e8d33c8dad1..62d8649f8af49 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -5,7 +5,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index c6acccaaba7d4..f486c071631ea 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -5,7 +5,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^^^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied @@ -21,23 +21,21 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar: WrapperTypeEncode` is not satisfied - --> tests/pallet_ui/call_argument_invalid_bound_2.rs:1:1 + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 | -1 | #[frame_support::pallet] - | ^----------------------- - | | - | _in this procedural macro expansion - | | +1 | / #[frame_support::pallet] 2 | | mod pallet { 3 | | use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; 4 | | use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; ... | 16 | | 17 | | #[pallet::call] - | |__________________^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | |__________________- required by a bound introduced by this call +... +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | - = note: required because of the requirements on the impl of `Encode` for `::Bar` - = note: this error originates in the derive macro `frame_support::codec::Encode` which comes from the expansion of the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: required for `::Bar` to implement `Encode` error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:17:12 @@ -45,4 +43,4 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is 17 | #[pallet::call] | ^^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | - = note: required because of the requirements on the impl of `Decode` for `::Bar` + = note: required for `::Bar` to implement `Decode` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 4a0b2ea67c7d6..6e51bf2dbf862 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -6,7 +6,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` + = note: required for `&Bar` to implement `std::fmt::Debug` = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index f95da9deef90a..1161f4a190231 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -17,5 +17,5 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | ^ `::Bar` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` - = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` + = note: required for `&::Bar` to implement `std::fmt::Debug` = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index cced83f207c41..42ef5a34e4c30 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -9,9 +9,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:10:12 @@ -29,9 +29,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:10:12 @@ -49,10 +49,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -70,8 +70,8 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D, E) (A, B, C, D, E, F) and 161 others - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `StaticTypeInfo` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -84,9 +84,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -104,9 +104,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.rs:21:12 @@ -124,7 +124,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index ab377e05d3901..461d63ebb0d9c 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -9,9 +9,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:10:12 @@ -29,9 +29,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:10:12 @@ -49,10 +49,10 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `PartialStorageInfoTrait` error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -70,8 +70,8 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D, E) (A, B, C, D, E, F) and 161 others - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `StaticTypeInfo` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -84,9 +84,9 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied Box Rc frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Decode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -104,9 +104,9 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> and 278 others - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied --> tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:21:12 @@ -124,7 +124,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied Vec bytes::bytes::Bytes and 3 others - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `Bar` to implement `Encode` + = note: required for `Bar` to implement `FullEncode` + = note: required for `Bar` to implement `FullCodec` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageEntryMetadataBuilder` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 8d3d7a71a313e..cce9fa70b3da5 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -14,4 +14,4 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and 78 others - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` to implement `StorageInfoTrait` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index ebf24a1232e3c..877485dda2084 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -14,5 +14,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and 78 others - = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` + = note: required for `Key` to implement `KeyGeneratorMaxEncodedLen` + = note: required for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` to implement `StorageInfoTrait` diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 6a99dcd3a1aed..06f8226ec88bf 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -35,6 +35,5 @@ note: associated function defined here | ^^^^ help: consider removing the borrow | -19 - fn test(data: &u64) { -19 + fn test(data: &u64) { +19 | fn test(data: &u64) { | From 14c3ef3898802eec15a6c4a3a711e002a73fffa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 8 Nov 2022 10:04:55 +0100 Subject: [PATCH 320/348] `sp_trie::Recorder`: Fix recording the same key for different tries (#12636) With `StateVersion::V1` values over a certain size are not inlined and being put into the backend with their own hash. When accessing a value in the trie with a recorder, we check if the value is maybe already recorded and thus, we can check the cache. To check if a value is already recorded, we use the key of the value to differentiate them. The problem is when there are multiple tries, like multiple child tries that all have different values under the same key. Before this pull request we didn't have differentiated for which trie we already had recorded a (key, value) pair. This is now done by also taking the storage root into account in the recorder to differentiate the different (key, value) pair in the tries. --- primitives/state-machine/src/trie_backend.rs | 91 +++++++++++++++++++ .../state-machine/src/trie_backend_essence.rs | 71 ++++++++------- primitives/trie/src/cache/mod.rs | 4 +- primitives/trie/src/recorder.rs | 26 +++++- 4 files changed, 153 insertions(+), 39 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 20bb2c592e925..da4250b6ba3e1 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -1117,4 +1117,95 @@ pub mod tests { ); } } + + /// Test to ensure that recording the same `key` for different tries works as expected. + /// + /// Each trie stores a different value under the same key. The values are big enough to + /// be not inlined with `StateVersion::V1`, this is important to test the expected behavior. The + /// trie recorder is expected to differentiate key access based on the different storage roots + /// of the tries. + #[test] + fn recording_same_key_access_in_different_tries() { + recording_same_key_access_in_different_tries_inner(StateVersion::V0); + recording_same_key_access_in_different_tries_inner(StateVersion::V1); + } + fn recording_same_key_access_in_different_tries_inner(state_version: StateVersion) { + let key = b"test_key".to_vec(); + // Use some big values to ensure that we don't keep them inline + let top_trie_val = vec![1; 1024]; + let child_trie_1_val = vec![2; 1024]; + let child_trie_2_val = vec![3; 1024]; + + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; + let contents = vec![ + (None, vec![(key.clone(), Some(top_trie_val.clone()))]), + (Some(child_info_1.clone()), vec![(key.clone(), Some(child_trie_1_val.clone()))]), + (Some(child_info_2.clone()), vec![(key.clone(), Some(child_trie_2_val.clone()))]), + ]; + let in_memory = new_in_mem::>(); + let in_memory = in_memory.update(contents, state_version); + let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_version, + ) + .0; + assert_eq!(in_memory.storage(&key).unwrap().unwrap(), top_trie_val); + assert_eq!(in_memory.child_storage(child_info_1, &key).unwrap().unwrap(), child_trie_1_val); + assert_eq!(in_memory.child_storage(child_info_2, &key).unwrap().unwrap(), child_trie_2_val); + + for cache in [Some(SharedTrieCache::new(CacheSize::Unlimited)), None] { + // Run multiple times to have a different cache conditions. + for i in 0..5 { + eprintln!("Running with cache {}, iteration {}", cache.is_some(), i); + + if let Some(cache) = &cache { + if i == 2 { + cache.reset_node_cache(); + } else if i == 3 { + cache.reset_value_cache(); + } + } + + let trie = in_memory.as_trie_backend(); + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; + assert_eq!(in_memory_root, trie_root); + + let proving = TrieBackendBuilder::wrap(&trie) + .with_recorder(Recorder::default()) + .with_optional_cache(cache.as_ref().map(|c| c.local_cache())) + .build(); + assert_eq!(proving.storage(&key).unwrap().unwrap(), top_trie_val); + assert_eq!( + proving.child_storage(child_info_1, &key).unwrap().unwrap(), + child_trie_1_val + ); + assert_eq!( + proving.child_storage(child_info_2, &key).unwrap().unwrap(), + child_trie_2_val + ); + + let proof = proving.extract_proof().unwrap(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof) + .unwrap(); + + assert_eq!(proof_check.storage(&key).unwrap().unwrap(), top_trie_val); + assert_eq!( + proof_check.child_storage(child_info_1, &key).unwrap().unwrap(), + child_trie_1_val + ); + assert_eq!( + proof_check.child_storage(child_info_2, &key).unwrap().unwrap(), + child_trie_2_val + ); + } + } + } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index cd2a71163e2ee..cdd1bb0bba055 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -177,7 +177,7 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss ) -> R, ) -> R { let storage_root = storage_root.unwrap_or_else(|| self.root); - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { Some(recorder) => Some(recorder as &mut dyn TrieRecorder), None => None, @@ -209,16 +209,19 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss /// This function must only be used when the operation in `callback` is /// calculating a `storage_root`. It is expected that `callback` returns /// the new storage root. This is required to register the changes in the cache - /// for the correct storage root. + /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" + /// trie. If the value is not given, `self.root` is used. #[cfg(feature = "std")] fn with_recorder_and_cache_for_storage_root( &self, + storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, ) -> (Option, R), ) -> R { - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder()); + let storage_root = storage_root.unwrap_or_else(|| self.root); + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { Some(recorder) => Some(recorder as &mut dyn TrieRecorder), None => None, @@ -244,6 +247,7 @@ impl, H: Hasher, C: AsLocalTrieCache> TrieBackendEss #[cfg(not(feature = "std"))] fn with_recorder_and_cache_for_storage_root( &self, + _: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, @@ -675,7 +679,7 @@ where ) -> (H::Out, S::Overlay) { let mut write_overlay = S::Overlay::default(); - let root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { + let root = self.with_recorder_and_cache_for_storage_root(None, |recorder, cache| { let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); let res = match state_version { StateVersion::V0 => delta_trie_root::, _, _, _, _, _>( @@ -719,35 +723,36 @@ where }, }; - let new_child_root = self.with_recorder_and_cache_for_storage_root(|recorder, cache| { - let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); - match match state_version { - StateVersion::V0 => - child_delta_trie_root::, _, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - child_root, - delta, - recorder, - cache, - ), - StateVersion::V1 => - child_delta_trie_root::, _, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - child_root, - delta, - recorder, - cache, - ), - } { - Ok(ret) => (Some(ret), ret), - Err(e) => { - warn!(target: "trie", "Failed to write to trie: {}", e); - (None, child_root) - }, - } - }); + let new_child_root = + self.with_recorder_and_cache_for_storage_root(Some(child_root), |recorder, cache| { + let mut eph = Ephemeral::new(self.backend_storage(), &mut write_overlay); + match match state_version { + StateVersion::V0 => + child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + child_root, + delta, + recorder, + cache, + ), + StateVersion::V1 => + child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + child_root, + delta, + recorder, + cache, + ), + } { + Ok(ret) => (Some(ret), ret), + Err(e) => { + warn!(target: "trie", "Failed to write to trie: {}", e); + (None, child_root) + }, + } + }); let is_default = new_child_root == default_root; diff --git a/primitives/trie/src/cache/mod.rs b/primitives/trie/src/cache/mod.rs index f6a447763a80e..85539cf626857 100644 --- a/primitives/trie/src/cache/mod.rs +++ b/primitives/trie/src/cache/mod.rs @@ -490,7 +490,7 @@ mod tests { { let mut cache = local_cache.as_trie_db_cache(root); - let mut recorder = recorder.as_trie_recorder(); + let mut recorder = recorder.as_trie_recorder(root); let trie = TrieDBBuilder::::new(&db, &root) .with_cache(&mut cache) .with_recorder(&mut recorder) @@ -538,7 +538,7 @@ mod tests { { let mut db = db.clone(); let mut cache = local_cache.as_trie_db_cache(root); - let mut recorder = recorder.as_trie_recorder(); + let mut recorder = recorder.as_trie_recorder(root); let mut trie = TrieDBMutBuilder::::from_existing(&mut db, &mut new_root) .with_cache(&mut cache) .with_recorder(&mut recorder) diff --git a/primitives/trie/src/recorder.rs b/primitives/trie/src/recorder.rs index 6fbf698592a31..bc67cfc287942 100644 --- a/primitives/trie/src/recorder.rs +++ b/primitives/trie/src/recorder.rs @@ -41,7 +41,7 @@ const LOG_TARGET: &str = "trie-recorder"; /// The internals of [`Recorder`]. struct RecorderInner { /// The keys for that we have recorded the trie nodes and if we have recorded up to the value. - recorded_keys: HashMap, RecordedForKey>, + recorded_keys: HashMap, RecordedForKey>>, /// The encoded nodes we accessed while recording. accessed_nodes: HashMap>, } @@ -80,9 +80,16 @@ impl Clone for Recorder { impl Recorder { /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. - pub fn as_trie_recorder(&self) -> impl trie_db::TrieRecorder + '_ { + /// + /// - `storage_root`: The storage root of the trie for which accesses are recorded. This is + /// important when recording access to different tries at once (like top and child tries). + pub fn as_trie_recorder( + &self, + storage_root: H::Out, + ) -> impl trie_db::TrieRecorder + '_ { TrieRecorder:: { inner: self.inner.lock(), + storage_root, encoded_size_estimation: self.encoded_size_estimation.clone(), _phantom: PhantomData, } @@ -132,6 +139,7 @@ impl Recorder { /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. struct TrieRecorder { inner: I, + storage_root: H::Out, encoded_size_estimation: Arc, _phantom: PhantomData, } @@ -191,6 +199,8 @@ impl>> trie_db::TrieRecord self.inner .recorded_keys + .entry(self.storage_root) + .or_default() .entry(full_key.to_vec()) .and_modify(|e| *e = RecordedForKey::Value) .or_insert(RecordedForKey::Value); @@ -206,6 +216,8 @@ impl>> trie_db::TrieRecord // accounted for by the recorded node that holds the hash. self.inner .recorded_keys + .entry(self.storage_root) + .or_default() .entry(full_key.to_vec()) .or_insert(RecordedForKey::Hash); }, @@ -221,6 +233,8 @@ impl>> trie_db::TrieRecord // that the value doesn't exist in the trie. self.inner .recorded_keys + .entry(self.storage_root) + .or_default() .entry(full_key.to_vec()) .and_modify(|e| *e = RecordedForKey::Value) .or_insert(RecordedForKey::Value); @@ -231,7 +245,11 @@ impl>> trie_db::TrieRecord } fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { - self.inner.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) + self.inner + .recorded_keys + .get(&self.storage_root) + .and_then(|k| k.get(key).copied()) + .unwrap_or(RecordedForKey::None) } } @@ -267,7 +285,7 @@ mod tests { let recorder = Recorder::default(); { - let mut trie_recorder = recorder.as_trie_recorder(); + let mut trie_recorder = recorder.as_trie_recorder(root); let trie = TrieDBBuilder::::new(&db, &root) .with_recorder(&mut trie_recorder) .build(); From 327180db5cb2744dcd3de354195d79ed8402d8a0 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 8 Nov 2022 11:21:43 +0100 Subject: [PATCH 321/348] Fix UI tests (#12642) --- .../tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr index 2f8d0cc761ab2..a5ec31a9bb4e7 100644 --- a/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr +++ b/frame/support/test/tests/pallet_ui/dev_mode_without_arg_max_encoded_len.stderr @@ -14,4 +14,4 @@ error[E0277]: the trait bound `Vec: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) and 78 others - = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` + = note: required for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageMyStorage, Vec>` to implement `StorageInfoTrait` From a1c1286d2ca6360a16d772cc8bea2190f77f4d8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Tue, 8 Nov 2022 11:33:43 +0100 Subject: [PATCH 322/348] `payment_queryInfo`: Make it work with `WeightV2` (#12633) * `payment_queryInfo`: Make it work with `WeighV2` The runtime api for querying the payment info depends on the `Weight` type and broke for old runtimes that still use the `WeighV1`. This pull requests fixes this by: 1. Bumping the version of the runtime api. 2. Making the node side code use the correct runtime api function depending on the version of the runtime api. 3. Make the RPC always return `WeighV1`. Users of the api should switch to `state_call` and decide based on the version of the runtime api which `Weight` type is being returned. * Fix tests * Review comment --- Cargo.lock | 2 + frame/transaction-payment/rpc/Cargo.toml | 1 + .../rpc/runtime-api/Cargo.toml | 2 + .../rpc/runtime-api/src/lib.rs | 4 ++ frame/transaction-payment/rpc/src/lib.rs | 47 +++++++++++++++---- frame/transaction-payment/src/types.rs | 15 ++++-- primitives/weights/src/lib.rs | 6 +-- 7 files changed, 61 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 510161e225732..48205d9bd86da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6326,6 +6326,7 @@ dependencies = [ "sp-core", "sp-rpc", "sp-runtime", + "sp-weights", ] [[package]] @@ -6336,6 +6337,7 @@ dependencies = [ "parity-scale-codec", "sp-api", "sp-runtime", + "sp-weights", ] [[package]] diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 16c2cc55efefb..9dd42c12c8bbf 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -21,3 +21,4 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +sp-weights = { version = "4.0.0", path = "../../../primitives/weights" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 5e1cb46753524..c0b816684a2f3 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -17,6 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../transaction-payment" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../../../primitives/weights" } [features] default = ["std"] @@ -25,4 +26,5 @@ std = [ "pallet-transaction-payment/std", "sp-api/std", "sp-runtime/std", + "sp-weights/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 6944593daa57a..10fd2a9e61fc1 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -25,13 +25,17 @@ use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; sp_api::decl_runtime_apis! { + #[api_version(2)] pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay, { + #[changed_in(2)] + fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_info(uxt: Block::Extrinsic, len: u32) -> RuntimeDispatchInfo; fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } + #[api_version(2)] pub trait TransactionPaymentCallApi where Balance: Codec + MaybeDisplay, diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 0c7e26cec0f58..19007d37963ec 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -26,7 +26,7 @@ use jsonrpsee::{ types::error::{CallError, ErrorCode, ErrorObject}, }; use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_rpc::number::NumberOrHex; @@ -82,8 +82,10 @@ impl From for i32 { } impl - TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> - for TransactionPayment + TransactionPaymentApiServer< + ::Hash, + RuntimeDispatchInfo, + > for TransactionPayment where Block: BlockT, C: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, @@ -94,7 +96,7 @@ where &self, encoded_xt: Bytes, at: Option, - ) -> RpcResult> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); @@ -107,14 +109,41 @@ where Some(format!("{:?}", e)), )) })?; - api.query_info(&at, uxt, encoded_len).map_err(|e| { + + fn map_err(error: impl ToString, desc: &'static str) -> CallError { CallError::Custom(ErrorObject::owned( Error::RuntimeError.into(), - "Unable to query dispatch info.", - Some(e.to_string()), + desc, + Some(error.to_string()), )) - .into() - }) + } + + let api_version = api + .api_version::>(&at) + .map_err(|e| map_err(e, "Failed to get transaction payment runtime api version"))? + .ok_or_else(|| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Transaction payment runtime api wasn't found in the runtime", + None::, + )) + })?; + + if api_version < 2 { + #[allow(deprecated)] + api.query_info_before_version_2(&at, uxt, encoded_len) + .map_err(|e| map_err(e, "Unable to query dispatch info.").into()) + } else { + let res = api + .query_info(&at, uxt, encoded_len) + .map_err(|e| map_err(e, "Unable to query dispatch info."))?; + + Ok(RuntimeDispatchInfo { + weight: sp_weights::OldWeight(res.weight.ref_time()), + class: res.class, + partial_fee: res.partial_fee, + }) + } } fn query_fee_details( diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index fff41ef6937f5..d1a480b64e116 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize}; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; use sp_std::prelude::*; -use frame_support::{dispatch::DispatchClass, weights::Weight}; +use frame_support::dispatch::DispatchClass; /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] @@ -94,9 +94,15 @@ impl FeeDetails { #[derive(Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] -#[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] -pub struct RuntimeDispatchInfo { +#[cfg_attr( + feature = "std", + serde(bound(serialize = "Balance: std::fmt::Display, Weight: Serialize")) +)] +#[cfg_attr( + feature = "std", + serde(bound(deserialize = "Balance: std::str::FromStr, Weight: Deserialize<'de>")) +)] +pub struct RuntimeDispatchInfo { /// Weight of this dispatch. pub weight: Weight, /// Class of this dispatch. @@ -131,6 +137,7 @@ mod serde_balance { #[cfg(test)] mod tests { use super::*; + use frame_support::weights::Weight; #[test] fn should_serialize_and_deserialize_properly_with_string() { diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index e1ac7fcd4e892..954fea91e28dc 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -26,8 +26,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -extern crate self as sp_weights; - mod weight_v2; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; @@ -70,6 +68,8 @@ pub mod constants { MaxEncodedLen, TypeInfo, )] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(transparent))] pub struct OldWeight(pub u64); /// The weight of database operations that the runtime can invoke. @@ -106,7 +106,7 @@ impl RuntimeDbWeight { /// coeff_integer * x^(degree) + coeff_frac * x^(degree) /// ``` /// -/// The `negative` value encodes whether the term is added or substracted from the +/// The `negative` value encodes whether the term is added or subtracted from the /// overall polynomial result. #[derive(Clone, Encode, Decode, TypeInfo)] pub struct WeightToFeeCoefficient { From 335007ca286c964b8f64d47f20a7f4e808b76596 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Tue, 8 Nov 2022 11:58:02 +0100 Subject: [PATCH 323/348] State-db refactoring (#12239) * Prune discarded blocks immediately * state-db refactoring part 1 * Some renames * Get rid of pending state * Revert "Prune discarded blocks immediately" This reverts commit 790f54038b52ff379a573ed0806f38d09af098ec. * Cleanup * Make clippy happy * Minor changes --- client/db/src/lib.rs | 18 +- client/state-db/src/lib.rs | 57 +--- client/state-db/src/noncanonical.rs | 245 ++++------------ client/state-db/src/pruning.rs | 418 +++++++--------------------- 4 files changed, 181 insertions(+), 557 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index fc031e2aaba59..3bbff1625f2f9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1994,15 +1994,15 @@ impl sc_client_api::backend::Backend for Backend { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); - match self.try_commit_operation(operation) { - Ok(_) => { - self.storage.state_db.apply_pending(); - Ok(()) - }, - e @ Err(_) => { - self.storage.state_db.revert_pending(); - e - }, + if let Err(e) = self.try_commit_operation(operation) { + let state_meta_db = StateMetaDb(self.storage.db.clone()); + self.storage + .state_db + .reset(state_meta_db) + .map_err(sp_blockchain::Error::from_state_db)?; + Err(e) + } else { + Ok(()) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index f21b707a489f0..01a198a1b3c1e 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -291,6 +291,7 @@ pub struct StateDbSync { non_canonical: NonCanonicalOverlay, pruning: Option>, pinned: HashMap, + ref_counting: bool, } impl @@ -311,7 +312,7 @@ impl PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; - Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) + Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default(), ref_counting }) } fn insert_block( @@ -372,9 +373,9 @@ impl match self.pruning.as_ref() { None => IsPruned::NotPruned, Some(pruning) => match pruning.have_block(hash, number) { - HaveBlock::NotHave => IsPruned::Pruned, - HaveBlock::Have => IsPruned::NotPruned, - HaveBlock::MayHave => IsPruned::MaybePruned, + HaveBlock::No => IsPruned::Pruned, + HaveBlock::Yes => IsPruned::NotPruned, + HaveBlock::Maybe => IsPruned::MaybePruned, }, } } @@ -444,9 +445,9 @@ impl let have_block = self.non_canonical.have_block(hash) || self.pruning.as_ref().map_or(false, |pruning| { match pruning.have_block(hash, number) { - HaveBlock::NotHave => false, - HaveBlock::Have => true, - HaveBlock::MayHave => hint(), + HaveBlock::No => false, + HaveBlock::Yes => true, + HaveBlock::Maybe => hint(), } }); if have_block { @@ -496,30 +497,6 @@ impl db.get(key.as_ref()).map_err(Error::Db) } - fn apply_pending(&mut self) { - self.non_canonical.apply_pending(); - if let Some(pruning) = &mut self.pruning { - pruning.apply_pending(); - } - let next_hash = self.pruning.as_mut().map(|p| p.next_hash()); - trace!( - target: "forks", - "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", - next_hash, - self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), - self.non_canonical.last_canonicalized_hash(), - self.non_canonical.last_canonicalized_block_number().unwrap_or(0), - self.non_canonical.top_level(), - ); - } - - fn revert_pending(&mut self) { - if let Some(pruning) = &mut self.pruning { - pruning.revert_pending(); - } - self.non_canonical.revert_pending(); - } - fn memory_info(&self) -> StateDbMemoryInfo { StateDbMemoryInfo { non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), @@ -654,14 +631,11 @@ impl return self.db.read().is_pruned(hash, number) } - /// Apply all pending changes - pub fn apply_pending(&self) { - self.db.write().apply_pending(); - } - - /// Revert all pending changes - pub fn revert_pending(&self) { - self.db.write().revert_pending(); + /// Reset in-memory changes to the last disk-backed state. + pub fn reset(&self, db: D) -> Result<(), Error> { + let mut state_db = self.db.write(); + *state_db = StateDbSync::new(state_db.mode.clone(), state_db.ref_counting, db)?; + Ok(()) } /// Returns the current memory statistics of this instance. @@ -766,9 +740,7 @@ mod tests { ) .unwrap(), ); - state_db.apply_pending(); db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(1)).unwrap()); - state_db.apply_pending(); db.commit( &state_db .insert_block( @@ -779,11 +751,8 @@ mod tests { ) .unwrap(), ); - state_db.apply_pending(); db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(21)).unwrap()); - state_db.apply_pending(); db.commit(&state_db.canonicalize_block(&H256::from_low_u64_be(3)).unwrap()); - state_db.apply_pending(); (db, state_db) } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 559fc7ca023fe..3711cf7a42667 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -19,8 +19,6 @@ //! Canonicalization window. //! Maintains trees of block overlays and allows discarding trees/roots //! The overlays are added in `insert` and removed in `canonicalize`. -//! All pending changes are kept in memory until next call to `apply_pending` or -//! `revert_pending` use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb, StateDbError}; use codec::{Decode, Encode}; @@ -37,8 +35,6 @@ pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, levels: VecDeque>, parents: HashMap, - pending_canonicalizations: Vec, - pending_insertions: Vec, values: HashMap, // ref counted // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, @@ -229,8 +225,6 @@ impl NonCanonicalOverlay { last_canonicalized, levels, parents, - pending_canonicalizations: Default::default(), - pending_insertions: Default::default(), pinned: Default::default(), pinned_insertions: Default::default(), values, @@ -316,9 +310,8 @@ impl NonCanonicalOverlay { deleted: changeset.deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); + trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} {:?} ({} inserted, {} deleted)", number, index, hash, journal_record.inserted.len(), journal_record.deleted.len()); insert_values(&mut self.values, journal_record.inserted); - self.pending_insertions.push(hash.clone()); Ok(commit) } @@ -355,24 +348,7 @@ impl NonCanonicalOverlay { } pub fn last_canonicalized_block_number(&self) -> Option { - match self.last_canonicalized.as_ref().map(|&(_, n)| n) { - Some(n) => Some(n + self.pending_canonicalizations.len() as u64), - None if !self.pending_canonicalizations.is_empty() => - Some(self.pending_canonicalizations.len() as u64), - _ => None, - } - } - - pub fn last_canonicalized_hash(&self) -> Option { - self.last_canonicalized.as_ref().map(|&(ref h, _)| h.clone()) - } - - pub fn top_level(&self) -> Vec<(BlockHash, u64)> { - let start = self.last_canonicalized_block_number().unwrap_or(0); - self.levels - .get(self.pending_canonicalizations.len()) - .map(|level| level.blocks.iter().map(|r| (r.hash.clone(), start)).collect()) - .unwrap_or_default() + self.last_canonicalized.as_ref().map(|&(_, n)| n) } /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. @@ -384,10 +360,10 @@ impl NonCanonicalOverlay { commit: &mut CommitSet, ) -> Result { trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = self - .levels - .get(self.pending_canonicalizations.len()) - .ok_or(StateDbError::InvalidBlock)?; + let level = match self.levels.pop_front() { + Some(level) => level, + None => return Err(StateDbError::InvalidBlock), + }; let index = level .blocks .iter() @@ -396,91 +372,63 @@ impl NonCanonicalOverlay { let mut discarded_journals = Vec::new(); let mut discarded_blocks = Vec::new(); - for (i, overlay) in level.blocks.iter().enumerate() { - if i != index { + for (i, overlay) in level.blocks.into_iter().enumerate() { + let mut pinned_children = 0; + // That's the one we need to canonicalize + if i == index { + commit.data.inserted.extend(overlay.inserted.iter().map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in overlays there's a value in values") + .1 + .clone(), + ) + })); + commit.data.deleted.extend(overlay.deleted.clone()); + } else { + // Discard this overlay self.discard_journals( - self.pending_canonicalizations.len() + 1, + 0, &mut discarded_journals, &mut discarded_blocks, &overlay.hash, ); + pinned_children = discard_descendants( + &mut self.levels.as_mut_slices(), + &mut self.values, + &mut self.parents, + &self.pinned, + &mut self.pinned_insertions, + &overlay.hash, + ); + } + if self.pinned.contains_key(&overlay.hash) { + pinned_children += 1; + } + if pinned_children != 0 { + self.pinned_insertions + .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); + } else { + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); } discarded_journals.push(overlay.journal_key.clone()); discarded_blocks.push(overlay.hash.clone()); } - - // get the one we need to canonicalize - let overlay = &level.blocks[index]; - commit.data.inserted.extend(overlay.inserted.iter().map(|k| { - ( - k.clone(), - self.values - .get(k) - .expect("For each key in overlays there's a value in values") - .1 - .clone(), - ) - })); - commit.data.deleted.extend(overlay.deleted.clone()); - commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = - (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); + + let canonicalized = (hash.clone(), self.front_block_number()); commit .meta .inserted .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); - self.pending_canonicalizations.push(hash.clone()); - Ok(canonicalized.1) - } - fn apply_canonicalizations(&mut self) { - let last = self.pending_canonicalizations.last().cloned(); - let count = self.pending_canonicalizations.len() as u64; - for hash in self.pending_canonicalizations.drain(..) { - trace!(target: "state-db", "Post canonicalizing {:?}", hash); - let level = - self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); - let index = level - .blocks - .iter() - .position(|overlay| overlay.hash == hash) - .expect("Hash validity is checked in `canonicalize`"); - - // discard unfinalized overlays and values - for (i, overlay) in level.blocks.into_iter().enumerate() { - let mut pinned_children = if i != index { - discard_descendants( - &mut self.levels.as_mut_slices(), - &mut self.values, - &mut self.parents, - &self.pinned, - &mut self.pinned_insertions, - &overlay.hash, - ) - } else { - 0 - }; - if self.pinned.contains_key(&overlay.hash) { - pinned_children += 1; - } - if pinned_children != 0 { - self.pinned_insertions - .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); - } else { - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - } - } - if let Some(hash) = last { - let last_canonicalized = ( - hash, - self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1), - ); - self.last_canonicalized = Some(last_canonicalized); - } + let num = canonicalized.1; + self.last_canonicalized = Some(canonicalized); + Ok(num) } /// Get a value from the node overlay. This searches in every existing changeset. @@ -494,8 +442,7 @@ impl NonCanonicalOverlay { /// Check if the block is in the canonicalization queue. pub fn have_block(&self, hash: &BlockHash) -> bool { - (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) && - !self.pending_canonicalizations.contains(hash) + self.parents.contains_key(hash) } /// Revert a single level. Returns commit set that deletes the journal or `None` if not @@ -543,50 +490,8 @@ impl NonCanonicalOverlay { } } - fn revert_insertions(&mut self) { - self.pending_insertions.reverse(); - for hash in self.pending_insertions.drain(..) { - self.parents.remove(&hash); - // find a level. When iterating insertions backwards the hash is always last in the - // level. - let level_index = self - .levels - .iter() - .position(|level| { - level.blocks.last().expect("Hash is added in `insert` in reverse order").hash == - hash - }) - .expect("Hash is added in insert"); - - let overlay_index = self.levels[level_index].blocks.len() - 1; - let overlay = self.levels[level_index].remove(overlay_index); - discard_values(&mut self.values, overlay.inserted); - if self.levels[level_index].blocks.is_empty() { - debug_assert_eq!(level_index, self.levels.len() - 1); - self.levels.pop_back(); - } - } - } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.apply_canonicalizations(); - self.pending_insertions.clear(); - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - self.pending_canonicalizations.clear(); - self.revert_insertions(); - } - /// Pin state values in memory pub fn pin(&mut self, hash: &BlockHash) { - if self.pending_insertions.contains(hash) { - // Pinning pending state is not implemented. Pending states - // won't be pruned for quite some time anyway, so it's not a big deal. - return - } let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); @@ -779,7 +684,6 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -806,18 +710,13 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); db.commit(&commit); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 1); assert!(!contains(&overlay, 5)); assert!(contains(&overlay, 7)); + assert_eq!(overlay.levels.len(), 1); + assert_eq!(overlay.parents.len(), 1); let mut commit = CommitSet::default(); overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); @@ -836,13 +735,11 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); - assert!(contains(&overlay, 1)); - overlay.apply_pending(); assert!(!contains(&overlay, 1)); } #[test] - fn insert_with_pending_canonicalization() { + fn insert_and_canonicalize() { let h1 = H256::random(); let h2 = H256::random(); let h3 = H256::random(); @@ -851,13 +748,11 @@ mod tests { let changeset = make_changeset(&[], &[]); db.commit(&overlay.insert(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert(&h2, 2, &h1, changeset.clone()).unwrap()); - overlay.apply_pending(); let mut commit = CommitSet::default(); overlay.canonicalize(&h1, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); db.commit(&commit); db.commit(&overlay.insert(&h3, 3, &h2, changeset.clone()).unwrap()); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); } @@ -927,7 +822,6 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 2); assert_eq!(overlay.parents.len(), 6); assert!(!contains(&overlay, 1)); @@ -948,7 +842,6 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); assert_eq!(overlay.parents.len(), 3); assert!(!contains(&overlay, 11)); @@ -965,7 +858,6 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h_1_2_2, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 0); assert_eq!(overlay.parents.len(), 0); assert!(db.data_eq(&make_db(&[1, 12, 122]))); @@ -994,31 +886,6 @@ mod tests { assert!(overlay.revert_one().is_none()); } - #[test] - fn revert_pending_insertion() { - let h1 = H256::random(); - let h2_1 = H256::random(); - let h2_2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - let changeset3 = make_changeset(&[9], &[]); - overlay.insert(&h1, 1, &H256::default(), changeset1).unwrap(); - assert!(contains(&overlay, 5)); - overlay.insert(&h2_1, 2, &h1, changeset2).unwrap(); - overlay.insert(&h2_2, 2, &h1, changeset3).unwrap(); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert!(contains(&overlay, 9)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 3); - overlay.revert_pending(); - assert!(!contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - } - #[test] fn keeps_pinned() { let mut db = make_db(&[]); @@ -1033,14 +900,12 @@ mod tests { let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); - overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); overlay.canonicalize(&h_2, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); assert!(contains(&overlay, 1)); overlay.unpin(&h_1); assert!(!contains(&overlay, 1)); @@ -1064,14 +929,12 @@ mod tests { db.commit(&overlay.insert(&h_1, 1, &H256::default(), c_1).unwrap()); db.commit(&overlay.insert(&h_2, 1, &H256::default(), c_2).unwrap()); db.commit(&overlay.insert(&h_3, 1, &H256::default(), c_3).unwrap()); - overlay.apply_pending(); overlay.pin(&h_1); let mut commit = CommitSet::default(); overlay.canonicalize(&h_3, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); // 1_2 should be discarded, 1_1 is pinned assert!(contains(&overlay, 1)); overlay.unpin(&h_1); @@ -1094,14 +957,12 @@ mod tests { db.commit(&overlay.insert(&h_11, 1, &H256::default(), c_11).unwrap()); db.commit(&overlay.insert(&h_12, 1, &H256::default(), c_12).unwrap()); db.commit(&overlay.insert(&h_21, 2, &h_11, c_21).unwrap()); - overlay.apply_pending(); overlay.pin(&h_21); let mut commit = CommitSet::default(); overlay.canonicalize(&h_12, &mut commit).unwrap(); db.commit(&commit); - overlay.apply_pending(); // 1_1 and 2_1 should be both pinned assert!(contains(&overlay, 1)); overlay.unpin(&h_21); @@ -1129,12 +990,10 @@ mod tests { overlay.canonicalize(&root, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); - overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); assert!(contains(&overlay, 21)); assert!(!contains(&overlay, 11)); assert!(db.get_meta(&to_journal_key(12, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(12, 0)).unwrap().is_none()); // Restore into a new overlay and check that journaled value exists. let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); @@ -1143,7 +1002,6 @@ mod tests { let mut commit = CommitSet::default(); overlay.canonicalize(&h21, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); - overlay.apply_pending(); assert!(!contains(&overlay, 21)); } @@ -1167,7 +1025,6 @@ mod tests { overlay.canonicalize(&root, &mut commit).unwrap(); overlay.canonicalize(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); - overlay.apply_pending(); // add another block at top level. It should reuse journal index 0 of previously discarded // block diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 50a46def59541..458522b8119fd 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -29,11 +29,8 @@ use crate::{ DEFAULT_MAX_BLOCK_CONSTRAINT, }; use codec::{Decode, Encode}; -use log::{error, trace, warn}; -use std::{ - cmp, - collections::{HashMap, HashSet, VecDeque}, -}; +use log::trace; +use std::collections::{HashMap, HashSet, VecDeque}; pub(crate) const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -44,14 +41,8 @@ pub struct RefWindow { /// A queue of blocks keep tracking keys that should be deleted for each block in the /// pruning window. queue: DeathRowQueue, - /// Block number that corresponds to the front of `death_rows`. + /// Block number that is next to be pruned. base: u64, - /// Number of call of `note_canonical` after - /// last call `apply_pending` or `revert_pending` - pending_canonicalizations: usize, - /// Number of calls of `prune_one` after - /// last call `apply_pending` or `revert_pending` - pending_prunings: usize, } /// `DeathRowQueue` used to keep track of blocks in the pruning window, there are two flavors: @@ -72,13 +63,13 @@ enum DeathRowQueue { #[ignore_malloc_size_of = "Shared data"] db: D, /// A queue of keys that should be deleted for each block in the pruning window. - /// Only caching the first fews blocks of the pruning window, blocks inside are + /// Only caching the first few blocks of the pruning window, blocks inside are /// successive and ordered by block number cache: VecDeque>, /// A soft limit of the cache's size cache_capacity: usize, - /// The number of blocks in queue that are not loaded into `cache`. - uncached_blocks: usize, + /// Last block number added to the window + last: Option, }, } @@ -99,7 +90,7 @@ impl DeathRowQueue { let record: JournalRecord = Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - queue.import(base, record); + queue.import(base, block, record); }, None => break, } @@ -113,36 +104,30 @@ impl DeathRowQueue { fn new_db_backed( db: D, base: u64, - mut uncached_blocks: usize, + last: Option, window_size: u32, ) -> Result, Error> { // limit the cache capacity from 1 to `DEFAULT_MAX_BLOCK_CONSTRAINT` let cache_capacity = window_size.clamp(1, DEFAULT_MAX_BLOCK_CONSTRAINT) as usize; let mut cache = VecDeque::with_capacity(cache_capacity); trace!(target: "state-db", "Reading pruning journal for the database-backed queue. Pending #{}", base); - // Load block from db - DeathRowQueue::load_batch_from_db( - &db, - &mut uncached_blocks, - &mut cache, - base, - cache_capacity, - )?; - Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, uncached_blocks }) + DeathRowQueue::load_batch_from_db(&db, &mut cache, base, cache_capacity)?; + Ok(DeathRowQueue::DbBacked { db, cache, cache_capacity, last }) } /// import a new block to the back of the queue - fn import(&mut self, base: u64, journal_record: JournalRecord) { + fn import(&mut self, base: u64, num: u64, journal_record: JournalRecord) { let JournalRecord { hash, inserted, deleted } = journal_record; + trace!(target: "state-db", "Importing {}, base={}", num, base); match self { - DeathRowQueue::DbBacked { uncached_blocks, cache, cache_capacity, .. } => { - // `uncached_blocks` is zero means currently all block are loaded into `cache` - // thus if `cache` is not full, load the next block into `cache` too - if *uncached_blocks == 0 && cache.len() < *cache_capacity { + DeathRowQueue::DbBacked { cache, cache_capacity, last, .. } => { + // If the new block continues cached range and there is space, load it directly into + // cache. + if num == base + cache.len() as u64 && cache.len() < *cache_capacity { + trace!(target: "state-db", "Adding to DB backed cache {:?} (#{})", hash, num); cache.push_back(DeathRow { hash, deleted: deleted.into_iter().collect() }); - } else { - *uncached_blocks += 1; } + *last = Some(num); }, DeathRowQueue::Mem { death_rows, death_index } => { // remove all re-inserted keys from death rows @@ -168,16 +153,9 @@ impl DeathRowQueue { base: u64, ) -> Result>, Error> { match self { - DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { - if cache.is_empty() && *uncached_blocks != 0 { - // load more blocks from db since there are still blocks in it - DeathRowQueue::load_batch_from_db( - db, - uncached_blocks, - cache, - base, - *cache_capacity, - )?; + DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { + if cache.is_empty() { + DeathRowQueue::load_batch_from_db(db, cache, base, *cache_capacity)?; } Ok(cache.pop_front()) }, @@ -193,113 +171,37 @@ impl DeathRowQueue { } } - /// Revert recent additions to the queue, namely remove `amount` number of blocks from the back - /// of the queue, `base` is the block number of the first block of the queue - fn revert_recent_add(&mut self, base: u64, amout: usize) { - debug_assert!(amout <= self.len()); - match self { - DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => { - // remove from `uncached_blocks` if it can cover - if *uncached_blocks >= amout { - *uncached_blocks -= amout; - return - } - // reset `uncached_blocks` and remove remain blocks from `cache` - let remain = amout - *uncached_blocks; - *uncached_blocks = 0; - cache.truncate(cache.len() - remain); - }, - DeathRowQueue::Mem { death_rows, death_index } => { - // Revert recent addition to the queue - // Note that pending insertions might cause some existing deletions to be removed - // from `death_index` We don't bother to track and revert that for now. This means - // that a few nodes might end up no being deleted in case transaction fails and - // `revert_pending` is called. - death_rows.truncate(death_rows.len() - amout); - let new_max_block = death_rows.len() as u64 + base; - death_index.retain(|_, block| *block < new_max_block); - }, - } - } - - /// Load a batch of blocks from the backend database into `cache`, start from (and include) the - /// next block followe the last block of `cache`, `base` is the block number of the first block - /// of the queue + /// Load a batch of blocks from the backend database into `cache`, starting from `base` and up + /// to `base + cache_capacity` fn load_batch_from_db( db: &D, - uncached_blocks: &mut usize, cache: &mut VecDeque>, base: u64, cache_capacity: usize, ) -> Result<(), Error> { - // return if all blocks already loaded into `cache` and there are no other - // blocks in the backend database - if *uncached_blocks == 0 { - return Ok(()) - } let start = base + cache.len() as u64; - let batch_size = cmp::min(*uncached_blocks, cache_capacity); - let mut loaded = 0; + let batch_size = cache_capacity; for i in 0..batch_size as u64 { match load_death_row_from_db::(db, start + i)? { Some(row) => { cache.push_back(row); - loaded += 1; }, - // block may added to the queue but not commit into the db yet, if there are - // data missing in the db `load_death_row_from_db` should return a db error None => break, } } - *uncached_blocks -= loaded; Ok(()) } - /// Get the block in the given index of the queue, `base` is the block number of the - /// first block of the queue - fn get( - &mut self, - base: u64, - index: usize, - ) -> Result>, Error> { - match self { - DeathRowQueue::DbBacked { db, uncached_blocks, cache, cache_capacity } => { - // check if `index` target a block reside on disk - if index >= cache.len() && index < cache.len() + *uncached_blocks { - // if `index` target the next batch of `DeathRow`, load a batch from db - if index - cache.len() < cmp::min(*uncached_blocks, *cache_capacity) { - DeathRowQueue::load_batch_from_db( - db, - uncached_blocks, - cache, - base, - *cache_capacity, - )?; - } else { - // load a single `DeathRow` from db, but do not insert it to `cache` - // because `cache` is a queue of successive `DeathRow` - // NOTE: this branch should not be entered because blocks are visited - // in successive increasing order, just keeping it for robustness - return load_death_row_from_db(db, base + index as u64) - } - } - Ok(cache.get(index).cloned()) - }, - DeathRowQueue::Mem { death_rows, .. } => Ok(death_rows.get(index).cloned()), - } - } - /// Check if the block at the given `index` of the queue exist - /// it is the caller's responsibility to ensure `index` won't be out of bound + /// it is the caller's responsibility to ensure `index` won't be out of bounds fn have_block(&self, hash: &BlockHash, index: usize) -> HaveBlock { match self { DeathRowQueue::DbBacked { cache, .. } => { if cache.len() > index { (cache[index].hash == *hash).into() } else { - // the block not exist in `cache`, but it may exist in the unload - // blocks - HaveBlock::MayHave + // The block is not in the cache but it still may exist on disk. + HaveBlock::Maybe } }, DeathRowQueue::Mem { death_rows, .. } => (death_rows[index].hash == *hash).into(), @@ -307,11 +209,10 @@ impl DeathRowQueue { } /// Return the number of block in the pruning window - fn len(&self) -> usize { + fn len(&self, base: u64) -> u64 { match self { - DeathRowQueue::DbBacked { uncached_blocks, cache, .. } => - cache.len() + *uncached_blocks, - DeathRowQueue::Mem { death_rows, .. } => death_rows.len(), + DeathRowQueue::DbBacked { last, .. } => last.map_or(0, |l| l + 1 - base), + DeathRowQueue::Mem { death_rows, .. } => death_rows.len() as u64, } } @@ -326,10 +227,11 @@ impl DeathRowQueue { } #[cfg(test)] - fn get_db_backed_queue_state(&self) -> Option<(&VecDeque>, usize)> { + fn get_db_backed_queue_state( + &self, + ) -> Option<(&VecDeque>, Option)> { match self { - DeathRowQueue::DbBacked { cache, uncached_blocks, .. } => - Some((cache, *uncached_blocks)), + DeathRowQueue::DbBacked { cache, last, .. } => Some((cache, *last)), DeathRowQueue::Mem { .. } => None, } } @@ -369,20 +271,20 @@ fn to_journal_key(block: u64) -> Vec { /// The result return by `RefWindow::have_block` #[derive(Debug, PartialEq, Eq)] pub enum HaveBlock { - /// Definitely not having this block - NotHave, - /// May or may not have this block, need futher checking - MayHave, - /// Definitely having this block - Have, + /// Definitely don't have this block. + No, + /// May or may not have this block, need further checking + Maybe, + /// Definitely has this block + Yes, } impl From for HaveBlock { fn from(have: bool) -> Self { if have { - HaveBlock::Have + HaveBlock::Yes } else { - HaveBlock::NotHave + HaveBlock::No } } } @@ -409,37 +311,36 @@ impl RefWindow { let queue = if count_insertions { DeathRowQueue::new_mem(&db, base)? } else { - let unload = match last_canonicalized_number { + let last = match last_canonicalized_number { Some(last_canonicalized_number) => { debug_assert!(last_canonicalized_number + 1 >= base); - last_canonicalized_number + 1 - base + Some(last_canonicalized_number) }, // None means `LAST_CANONICAL` is never been wrote, since the pruning journals are // in the same `CommitSet` as `LAST_CANONICAL`, it means no pruning journal have // ever been committed to the db, thus set `unload` to zero - None => 0, + None => None, }; - DeathRowQueue::new_db_backed(db, base, unload as usize, window_size)? + DeathRowQueue::new_db_backed(db, base, last, window_size)? }; - Ok(RefWindow { queue, base, pending_canonicalizations: 0, pending_prunings: 0 }) + Ok(RefWindow { queue, base }) } pub fn window_size(&self) -> u64 { - (self.queue.len() - self.pending_prunings) as u64 + self.queue.len(self.base) as u64 } /// Get the hash of the next pruning block pub fn next_hash(&mut self) -> Result, Error> { - let res = match &self.queue { - DeathRowQueue::DbBacked { cache, .. } => - if self.pending_prunings < cache.len() { - cache.get(self.pending_prunings).map(|r| r.hash.clone()) - } else { - self.get(self.pending_prunings)?.map(|r| r.hash) - }, - DeathRowQueue::Mem { death_rows, .. } => - death_rows.get(self.pending_prunings).map(|r| r.hash.clone()), + let res = match &mut self.queue { + DeathRowQueue::DbBacked { db, cache, cache_capacity, .. } => { + if cache.is_empty() { + DeathRowQueue::load_batch_from_db(db, cache, self.base, *cache_capacity)?; + } + cache.front().map(|r| r.hash.clone()) + }, + DeathRowQueue::Mem { death_rows, .. } => death_rows.front().map(|r| r.hash.clone()), }; Ok(res) } @@ -448,68 +349,34 @@ impl RefWindow { 0 } - // Return the block number of the first block that not been pending pruned - pub fn pending(&self) -> u64 { - self.base + self.pending_prunings as u64 - } - fn is_empty(&self) -> bool { - self.queue.len() <= self.pending_prunings + self.window_size() == 0 } // Check if a block is in the pruning window and not be pruned yet pub fn have_block(&self, hash: &BlockHash, number: u64) -> HaveBlock { // if the queue is empty or the block number exceed the pruning window, we definitely // do not have this block - if self.is_empty() || - number < self.pending() || - number >= self.base + self.queue.len() as u64 - { - return HaveBlock::NotHave + if self.is_empty() || number < self.base || number >= self.base + self.window_size() { + return HaveBlock::No } self.queue.have_block(hash, (number - self.base) as usize) } - fn get(&mut self, index: usize) -> Result>, Error> { - if index >= self.queue.len() { - return Ok(None) - } - match self.queue.get(self.base, index)? { - None => { - if matches!(self.queue, DeathRowQueue::DbBacked { .. }) && - // whether trying to get a pending canonicalize block which may not commit to the db yet - index >= self.queue.len() - self.pending_canonicalizations - { - trace!(target: "state-db", "Trying to get a pending canonicalize block that not commit to the db yet"); - Err(Error::StateDb(StateDbError::BlockUnavailable)) - } else { - // A block of the queue is missing, this may happen if `CommitSet` are commit to - // db concurrently and calling `apply_pending/revert_pending` out of order, this - // should not happen under current implementation but keeping it as a defensive - error!(target: "state-db", "Block record is missing from the pruning window, block number {}", self.base + index as u64); - Err(Error::StateDb(StateDbError::BlockMissing)) - } - }, - s => Ok(s), - } - } - /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { - if let Some(pruned) = self.get(self.pending_prunings)? { + if let Some(pruned) = self.queue.pop_front(self.base)? { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.base + self.pending_prunings as u64; + let index = self.base; commit.data.deleted.extend(pruned.deleted.into_iter()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit - .meta - .deleted - .push(to_journal_key(self.base + self.pending_prunings as u64)); - self.pending_prunings += 1; + commit.meta.deleted.push(to_journal_key(self.base)); + self.base += 1; + Ok(()) } else { - warn!(target: "state-db", "Trying to prune when there's nothing to prune"); + trace!(target: "state-db", "Trying to prune when there's nothing to prune"); + Err(Error::StateDb(StateDbError::BlockUnavailable)) } - Ok(()) } /// Add a change set to the window. Creates a journal record and pushes it to `commit` @@ -519,10 +386,10 @@ impl RefWindow { number: u64, commit: &mut CommitSet, ) -> Result<(), Error> { - if self.base == 0 && self.queue.len() == 0 && number > 0 { + if self.base == 0 && self.is_empty() && number > 0 { // assume that parent was canonicalized self.base = number; - } else if (self.base + self.queue.len() as u64) != number { + } else if (self.base + self.window_size()) != number { return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); @@ -531,38 +398,12 @@ impl RefWindow { } else { Default::default() }; - let deleted = ::std::mem::take(&mut commit.data.deleted); + let deleted = std::mem::take(&mut commit.data.deleted); let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; commit.meta.inserted.push((to_journal_key(number), journal_record.encode())); - self.queue.import(self.base, journal_record); - self.pending_canonicalizations += 1; + self.queue.import(self.base, number, journal_record); Ok(()) } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.pending_canonicalizations = 0; - for _ in 0..self.pending_prunings { - let pruned = self - .queue - .pop_front(self.base) - // NOTE: `pop_front` should not return `MetaDb::Error` because blocks are visited - // by `RefWindow::prune_one` first then `RefWindow::apply_pending` and - // `DeathRowQueue::get` should load the blocks into cache already - .expect("block must loaded in cache thus no MetaDb::Error") - .expect("pending_prunings is always < queue.len()"); - trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - self.base += 1; - } - self.pending_prunings = 0; - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - self.queue.revert_recent_add(self.base, self.pending_canonicalizations); - self.pending_canonicalizations = 0; - self.pending_prunings = 0; - } } #[cfg(test)] @@ -601,13 +442,14 @@ mod tests { let mut pruning: RefWindow = RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, true).unwrap(); let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit).unwrap(); + assert_eq!( + Err(Error::StateDb(StateDbError::BlockUnavailable)), + pruning.prune_one(&mut commit) + ); assert_eq!(pruning.base, 0); let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); assert!(death_rows.is_empty()); assert!(death_index.is_empty()); - assert!(pruning.pending_prunings == 0); - assert!(pruning.pending_canonicalizations == 0); } #[test] @@ -619,9 +461,8 @@ mod tests { let hash = H256::random(); pruning.note_canonical(&hash, 0, &mut commit).unwrap(); db.commit(&commit); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); - pruning.apply_pending(); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Have); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::Yes); assert!(commit.data.deleted.is_empty()); let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); assert_eq!(death_rows.len(), 1); @@ -631,10 +472,9 @@ mod tests { let mut commit = CommitSet::default(); pruning.prune_one(&mut commit).unwrap(); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); db.commit(&commit); - pruning.apply_pending(); - assert_eq!(pruning.have_block(&hash, 0), HaveBlock::NotHave); + assert_eq!(pruning.have_block(&hash, 0), HaveBlock::No); assert!(db.data_eq(&make_db(&[2, 4, 5]))); let (death_rows, death_index) = pruning.queue.get_mem_queue_state().unwrap(); assert!(death_rows.is_empty()); @@ -653,7 +493,6 @@ mod tests { let mut commit = make_commit(&[5], &[2]); pruning.note_canonical(&H256::random(), 1, &mut commit).unwrap(); db.commit(&commit); - pruning.apply_pending(); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); @@ -661,12 +500,10 @@ mod tests { let mut commit = CommitSet::default(); pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); - pruning.apply_pending(); assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); let mut commit = CommitSet::default(); pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); - pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); assert_eq!(pruning.base, 2); } @@ -690,7 +527,6 @@ mod tests { let mut commit = CommitSet::default(); pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); - pruning.apply_pending(); assert!(db.data_eq(&make_db(&[3, 4, 5]))); assert_eq!(pruning.base, 2); } @@ -710,7 +546,6 @@ mod tests { pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.apply_pending(); check_journal(&pruning, &db); @@ -725,7 +560,6 @@ mod tests { pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); assert_eq!(pruning.base, 3); } @@ -756,7 +590,6 @@ mod tests { pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); assert_eq!(pruning.base, 3); } @@ -775,7 +608,6 @@ mod tests { pruning.note_canonical(&H256::random(), 2, &mut commit).unwrap(); db.commit(&commit); assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.apply_pending(); check_journal(&pruning, &db); @@ -861,9 +693,9 @@ mod tests { let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; // start as an empty queue - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), 0); - assert_eq!(uncached_blocks, 0); + assert_eq!(last, None); // import blocks // queue size and content should match @@ -872,21 +704,19 @@ mod tests { pruning.note_canonical(&(i as u64), i as u64, &mut commit).unwrap(); push_last_canonicalized(i as u64, &mut commit); db.commit(&commit); - // block will fill in cache first - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + // blocks will fill the cache first + let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); if i < cache_capacity { assert_eq!(cache.len(), i + 1); - assert_eq!(uncached_blocks, 0); } else { assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, i - cache_capacity + 1); } + assert_eq!(last, Some(i as u64)); } - pruning.apply_pending(); - assert_eq!(pruning.queue.len(), cache_capacity + 10); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); + let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, 10); + assert_eq!(last, Some(cache_capacity as u64 + 10 - 1)); for i in 0..cache_capacity { assert_eq!(cache[i].hash, i as u64); } @@ -897,29 +727,26 @@ mod tests { pruning .note_canonical(&(cache_capacity as u64 + 10), cache_capacity as u64 + 10, &mut commit) .unwrap(); - assert_eq!(pruning.queue.len(), cache_capacity + 11); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.window_size(), cache_capacity as u64 + 11); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, 11); // revert the last add that no apply yet // NOTE: do not commit the previous `CommitSet` to db - pruning.revert_pending(); - assert_eq!(pruning.queue.len(), cache_capacity + 10); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + pruning = RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); + let cache_capacity = DEFAULT_MAX_BLOCK_CONSTRAINT as usize; + assert_eq!(pruning.window_size(), cache_capacity as u64 + 10); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, 10); // remove one block from the start of the queue // block is removed from the head of cache let mut commit = CommitSet::default(); pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); - pruning.apply_pending(); - assert_eq!(pruning.queue.len(), cache_capacity + 9); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity - 1); - assert_eq!(uncached_blocks, 10); for i in 0..(cache_capacity - 1) { assert_eq!(cache[i].hash, (i + 1) as u64); } @@ -928,10 +755,9 @@ mod tests { // `cache` is full again but the content of the queue should be the same let pruning: RefWindow = RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.queue.len(), cache_capacity + 9); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.window_size(), cache_capacity as u64 + 9); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, 9); for i in 0..cache_capacity { assert_eq!(cache[i].hash, (i + 1) as u64); } @@ -952,24 +778,13 @@ mod tests { db.commit(&commit); } - // the following operations won't triger loading block from db: + // the following operations won't trigger loading block from db: // - getting block in cache // - getting block not in the queue - let index = cache_capacity; - assert_eq!( - pruning.queue.get(0, index - 1).unwrap().unwrap().hash, - cache_capacity as u64 - 1 - ); - assert_eq!(pruning.queue.get(0, cache_capacity * 2 + 10).unwrap(), None); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.next_hash().unwrap().unwrap(), 0); + let (cache, last) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), cache_capacity); - assert_eq!(uncached_blocks, cache_capacity + 10); - - // getting a block not in cache will triger loading block from db - assert_eq!(pruning.queue.get(0, index).unwrap().unwrap().hash, cache_capacity as u64); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); - assert_eq!(cache.len(), cache_capacity * 2); - assert_eq!(uncached_blocks, 10); + assert_eq!(last, Some(cache_capacity as u64 * 2 + 10 - 1)); // clear all block loaded in cache for _ in 0..cache_capacity * 2 { @@ -977,29 +792,22 @@ mod tests { pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); } - pruning.apply_pending(); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert!(cache.is_empty()); - assert_eq!(uncached_blocks, 10); - // getting the hash of block that not in cache will also triger loading + // getting the hash of block that not in cache will also trigger loading // the remaining blocks from db - assert_eq!( - pruning.queue.get(pruning.base, 0).unwrap().unwrap().hash, - (cache_capacity * 2) as u64 - ); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.next_hash().unwrap().unwrap(), (cache_capacity * 2) as u64); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), 10); - assert_eq!(uncached_blocks, 0); // load a new queue from db // `cache` should be the same let pruning: RefWindow = RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, false).unwrap(); - assert_eq!(pruning.queue.len(), 10); - let (cache, uncached_blocks) = pruning.queue.get_db_backed_queue_state().unwrap(); + assert_eq!(pruning.window_size(), 10); + let (cache, _) = pruning.queue.get_db_backed_queue_state().unwrap(); assert_eq!(cache.len(), 10); - assert_eq!(uncached_blocks, 0); for i in 0..10 { assert_eq!(cache[i].hash, (cache_capacity * 2 + i) as u64); } @@ -1030,11 +838,8 @@ mod tests { assert_eq!(pruning.next_hash().unwrap(), Some(i)); pruning.prune_one(&mut commit).unwrap(); } - // return `BlockUnavailable` for block that did not commit to db - assert_eq!( - pruning.next_hash().unwrap_err(), - Error::StateDb(StateDbError::BlockUnavailable) - ); + // return `None` for block that did not commit to db + assert_eq!(pruning.next_hash().unwrap(), None); assert_eq!( pruning.prune_one(&mut commit).unwrap_err(), Error::StateDb(StateDbError::BlockUnavailable) @@ -1044,12 +849,5 @@ mod tests { assert_eq!(pruning.next_hash().unwrap(), Some(index)); pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); - - // import a block and do not commit it to db before calling `apply_pending` - pruning - .note_canonical(&(index + 1), index + 1, &mut make_commit(&[], &[])) - .unwrap(); - pruning.apply_pending(); - assert_eq!(pruning.next_hash().unwrap_err(), Error::StateDb(StateDbError::BlockMissing)); } } From eae25c901ccd4ef5c3ceb082e3f2c66753879062 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 8 Nov 2022 14:01:47 +0100 Subject: [PATCH 324/348] Remove duplicate units (#12634) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- client/sysinfo/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client/sysinfo/src/lib.rs b/client/sysinfo/src/lib.rs index 372c7c1a6d219..cef5a4d210df1 100644 --- a/client/sysinfo/src/lib.rs +++ b/client/sysinfo/src/lib.rs @@ -131,14 +131,14 @@ pub fn print_sysinfo(sysinfo: &sc_telemetry::SysInfo) { /// Prints out the results of the hardware benchmarks in the logs. pub fn print_hwbench(hwbench: &HwBench) { - log::info!("🏁 CPU score: {}MB/s", hwbench.cpu_hashrate_score); - log::info!("🏁 Memory score: {}MB/s", hwbench.memory_memcpy_score); + log::info!("🏁 CPU score: {}", hwbench.cpu_hashrate_score); + log::info!("🏁 Memory score: {}", hwbench.memory_memcpy_score); if let Some(score) = hwbench.disk_sequential_write_score { - log::info!("🏁 Disk score (seq. writes): {}MB/s", score); + log::info!("🏁 Disk score (seq. writes): {}", score); } if let Some(score) = hwbench.disk_random_write_score { - log::info!("🏁 Disk score (rand. writes): {}MB/s", score); + log::info!("🏁 Disk score (rand. writes): {}", score); } } From b85f85e08dafa2935dc20d1b13af2ca36ba3ea64 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Tue, 8 Nov 2022 16:15:55 +0000 Subject: [PATCH 325/348] Add batching to fast-unstake pallet (#12394) * implement a brand new batch with all tests passing. * fix benchmarks as well * make benchmarks more or less work * fix migration * add some testing * Update frame/fast-unstake/src/benchmarking.rs Co-authored-by: Roman Useinov * review comments * some fixes * fix review comments * fix build * fmt * fix benchmarks * fmt * update Co-authored-by: Roman Useinov --- bin/node/runtime/src/lib.rs | 1 + frame/fast-unstake/Cargo.toml | 3 +- frame/fast-unstake/src/benchmarking.rs | 63 ++-- frame/fast-unstake/src/lib.rs | 183 ++++++---- frame/fast-unstake/src/migrations.rs | 77 ++++ frame/fast-unstake/src/mock.rs | 45 ++- frame/fast-unstake/src/tests.rs | 479 ++++++++++++++++++------- frame/fast-unstake/src/types.rs | 22 +- 8 files changed, 609 insertions(+), 264 deletions(-) create mode 100644 frame/fast-unstake/src/migrations.rs diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d49312bd6fe3f..999b178d10c55 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -584,6 +584,7 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ControlOrigin = frame_system::EnsureRoot; + type BatchSize = ConstU32<128>; type Deposit = ConstU128<{ DOLLARS }>; type Currency = Balances; type Staking = Staking; diff --git a/frame/fast-unstake/Cargo.toml b/frame/fast-unstake/Cargo.toml index ea1abeb0b48c5..f14a5e7b9c20b 100644 --- a/frame/fast-unstake/Cargo.toml +++ b/frame/fast-unstake/Cargo.toml @@ -60,6 +60,7 @@ std = [ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-system/runtime-benchmarks", - "sp-staking/runtime-benchmarks" + "sp-staking/runtime-benchmarks", + "pallet-staking/runtime-benchmarks" ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 762a59c96bcfa..b4a5e21dcfc13 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -36,10 +36,15 @@ const MAX_VALIDATORS: u32 = 128; type CurrencyOf = ::Currency; -fn create_unexposed_nominator() -> T::AccountId { - let account = frame_benchmarking::account::("nominator_42", 0, USER_SEED); - fund_and_bond_account::(&account); - account +fn create_unexposed_nominators() -> Vec { + (0..T::BatchSize::get()) + .map(|i| { + let account = + frame_benchmarking::account::("unexposed_nominator", i, USER_SEED); + fund_and_bond_account::(&account); + account + }) + .collect() } fn fund_and_bond_account(account: &T::AccountId) { @@ -90,21 +95,27 @@ fn on_idle_full_block() { } benchmarks! { - // on_idle, we we don't check anyone, but fully unbond and move them to another pool. + // on_idle, we don't check anyone, but fully unbond them. on_idle_unstake { ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominator::(); - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); + for who in create_unexposed_nominators::() { + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(who.clone()).into(), + )); + } // run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); - assert_eq!( + + assert!(matches!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), deposit: T::Deposit::get() }) - ); + Some(UnstakeRequest { + checked, + stashes, + .. + }) if checked.len() == 1 && stashes.len() as u32 == T::BatchSize::get() + )); } : { on_idle_full_block::(); @@ -112,7 +123,7 @@ benchmarks! { verify { assert!(matches!( fast_unstake_events::().last(), - Some(Event::Unstaked { .. }) + Some(Event::BatchFinished) )); } @@ -129,10 +140,13 @@ benchmarks! { // setup staking with v validators and u eras of data (0..=u) setup_staking::(v, u); - let who = create_unexposed_nominator::(); - assert_ok!(FastUnstake::::register_fast_unstake( - RawOrigin::Signed(who.clone()).into(), - )); + + let stashes = create_unexposed_nominators::().into_iter().map(|s| { + assert_ok!(FastUnstake::::register_fast_unstake( + RawOrigin::Signed(s.clone()).into(), + )); + (s, T::Deposit::get()) + }).collect::>(); // no one is queued thus far. assert_eq!(Head::::get(), None); @@ -141,20 +155,19 @@ benchmarks! { on_idle_full_block::(); } verify { - let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); - assert_eq!( - Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked, deposit: T::Deposit::get() }) - ); + let checked = (1..=u).rev().collect::>(); + let request = Head::::get().unwrap(); + assert_eq!(checked, request.checked.into_inner()); assert!(matches!( fast_unstake_events::().last(), - Some(Event::Checking { .. }) + Some(Event::BatchChecked { .. }) )); + assert!(stashes.iter().all(|(s, _)| request.stashes.iter().find(|(ss, _)| ss == s).is_some())); } register_fast_unstake { ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominator::(); + let who = create_unexposed_nominators::().get(0).cloned().unwrap(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); @@ -166,7 +179,7 @@ benchmarks! { deregister { ErasToCheckPerBlock::::put(1); - let who = create_unexposed_nominator::(); + let who = create_unexposed_nominators::().get(0).cloned().unwrap(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 0477bb251aa00..c83054189feb7 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -60,6 +60,7 @@ mod tests; // NOTE: enable benchmarking in tests as well. #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migrations; pub mod types; pub mod weights; @@ -82,7 +83,7 @@ pub mod pallet { use crate::types::*; use frame_support::{ pallet_prelude::*, - traits::{Defensive, ReservableCurrency}, + traits::{Defensive, ReservableCurrency, StorageVersion}, }; use frame_system::pallet_prelude::*; use sp_runtime::{ @@ -103,7 +104,10 @@ pub mod pallet { } } + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] @@ -123,6 +127,11 @@ pub mod pallet { /// The origin that can control this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; + /// Batch size. + /// + /// This many stashes are processed in each unstake request. + type BatchSize: Get; + /// The access to staking functionality. type Staking: StakingInterface, AccountId = Self::AccountId>; @@ -132,8 +141,7 @@ pub mod pallet { /// The current "head of the queue" being unstaked. #[pallet::storage] - pub type Head = - StorageValue<_, UnstakeRequest, BalanceOf>, OptionQuery>; + pub type Head = StorageValue<_, UnstakeRequest, OptionQuery>; /// The map of all accounts wishing to be unstaked. /// @@ -158,13 +166,18 @@ pub mod pallet { Unstaked { stash: T::AccountId, result: DispatchResult }, /// A staker was slashed for requesting fast-unstake whilst being exposed. Slashed { stash: T::AccountId, amount: BalanceOf }, - /// A staker was partially checked for the given eras, but the process did not finish. - Checking { stash: T::AccountId, eras: Vec }, /// Some internal error happened while migrating stash. They are removed as head as a /// consequence. Errored { stash: T::AccountId }, /// An internal error happened. Operations will be paused now. InternalError, + /// A batch was partially checked for the given eras, but the process did not finish. + BatchChecked { eras: Vec }, + /// A batch was terminated. + /// + /// This is always follows by a number of `Unstaked` or `Slashed` events, marking the end + /// of the batch. A new batch will be created upon next block. + BatchFinished, } #[pallet::error] @@ -225,12 +238,7 @@ pub mod pallet { let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; ensure!(!Queue::::contains_key(&stash_account), Error::::AlreadyQueued); - ensure!( - Head::::get() - .map_or(true, |UnstakeRequest { stash, .. }| stash_account != stash), - Error::::AlreadyHead - ); - + ensure!(!Self::is_head(&stash_account), Error::::AlreadyHead); ensure!(!T::Staking::is_unbonding(&stash_account)?, Error::::NotFullyBonded); // chill and fully unstake. @@ -260,19 +268,13 @@ pub mod pallet { let stash_account = T::Staking::stash_by_ctrl(&ctrl).map_err(|_| Error::::NotController)?; ensure!(Queue::::contains_key(&stash_account), Error::::NotQueued); - ensure!( - Head::::get() - .map_or(true, |UnstakeRequest { stash, .. }| stash_account != stash), - Error::::AlreadyHead - ); + ensure!(!Self::is_head(&stash_account), Error::::AlreadyHead); let deposit = Queue::::take(stash_account.clone()); if let Some(deposit) = deposit.defensive() { let remaining = T::Currency::unreserve(&stash_account, deposit); if !remaining.is_zero() { - frame_support::defensive!("`not enough balance to unreserve`"); - ErasToCheckPerBlock::::put(0); - Self::deposit_event(Event::::InternalError) + Self::halt("not enough balance to unreserve"); } } @@ -291,6 +293,20 @@ pub mod pallet { } impl Pallet { + /// Returns `true` if `staker` is anywhere to be found in the `head`. + pub(crate) fn is_head(staker: &T::AccountId) -> bool { + Head::::get().map_or(false, |UnstakeRequest { stashes, .. }| { + stashes.iter().any(|(stash, _)| stash == staker) + }) + } + + /// Halt the operations of this pallet. + pub(crate) fn halt(reason: &'static str) { + frame_support::defensive!(reason); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } + /// process up to `remaining_weight`. /// /// Returns the actual weight consumed. @@ -336,28 +352,30 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked, deposit } = - match Head::::take().or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, deposit)| UnstakeRequest { - stash, - deposit, - checked: Default::default(), - }) - .next() - }) { - None => { - // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) - }, - Some(head) => head, - }; + let UnstakeRequest { stashes, mut checked } = match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + let stashes: BoundedVec<_, T::BatchSize> = Queue::::drain() + .take(T::BatchSize::get() as usize) + .collect::>() + .try_into() + .expect("take ensures bound is met; qed"); + if stashes.is_empty() { + None + } else { + Some(UnstakeRequest { stashes, checked: Default::default() }) + } + }) { + None => { + // There's no `Head` and nothing in the `Queue`, nothing to do here. + return T::DbWeight::get().reads(4) + }, + Some(head) => head, + }; log!( debug, - "checking {:?}, eras_to_check_per_block = {:?}, remaining_weight = {:?}", - stash, + "checking {:?} stashes, eras_to_check_per_block = {:?}, remaining_weight = {:?}", + stashes.len(), eras_to_check_per_block, remaining_weight ); @@ -365,9 +383,11 @@ pub mod pallet { // the range that we're allowed to check in this round. let current_era = T::Staking::current_era(); let bonding_duration = T::Staking::bonding_duration(); + // prune all the old eras that we don't care about. This will help us keep the bound // of `checked`. checked.retain(|e| *e >= current_era.saturating_sub(bonding_duration)); + let unchecked_eras_to_check = { // get the last available `bonding_duration` eras up to current era in reverse // order. @@ -397,66 +417,75 @@ pub mod pallet { unchecked_eras_to_check ); - if unchecked_eras_to_check.is_empty() { + let unstake_stash = |stash: T::AccountId, deposit| { let result = T::Staking::force_unstake(stash.clone()); - let remaining = T::Currency::unreserve(&stash, deposit); if !remaining.is_zero() { - frame_support::defensive!("`not enough balance to unreserve`"); - ErasToCheckPerBlock::::put(0); - Self::deposit_event(Event::::InternalError) + Self::halt("not enough balance to unreserve"); } else { log!(info, "unstaked {:?}, outcome: {:?}", stash, result); Self::deposit_event(Event::::Unstaked { stash, result }); } + }; - ::WeightInfo::on_idle_unstake() - } else { - // eras remaining to be checked. - let mut eras_checked = 0u32; + let check_stash = |stash, deposit, eras_checked: &mut u32| { let is_exposed = unchecked_eras_to_check.iter().any(|e| { eras_checked.saturating_inc(); T::Staking::is_exposed_in_era(&stash, e) }); - log!( - debug, - "checked {:?} eras, exposed? {}, (v: {:?}, u: {:?})", - eras_checked, - is_exposed, - validator_count, - unchecked_eras_to_check.len() - ); - - // NOTE: you can be extremely unlucky and get slashed here: You are not exposed in - // the last 28 eras, have registered yourself to be unstaked, midway being checked, - // you are exposed. if is_exposed { T::Currency::slash_reserved(&stash, deposit); log!(info, "slashed {:?} by {:?}", stash, deposit); Self::deposit_event(Event::::Slashed { stash, amount: deposit }); + false } else { - // Not exposed in these eras. - match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { - Ok(_) => { - Head::::put(UnstakeRequest { - stash: stash.clone(), - checked, - deposit, - }); - Self::deposit_event(Event::::Checking { - stash, + true + } + }; + + if unchecked_eras_to_check.is_empty() { + // `stash` is not exposed in any era now -- we can let go of them now. + stashes.into_iter().for_each(|(stash, deposit)| unstake_stash(stash, deposit)); + Self::deposit_event(Event::::BatchFinished); + ::WeightInfo::on_idle_unstake() + } else { + // eras checked so far. + let mut eras_checked = 0u32; + + let pre_length = stashes.len(); + let stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize> = stashes + .into_iter() + .filter(|(stash, deposit)| { + check_stash(stash.clone(), *deposit, &mut eras_checked) + }) + .collect::>() + .try_into() + .expect("filter can only lessen the length; still in bound; qed"); + let post_length = stashes.len(); + + log!( + debug, + "checked {:?} eras, pre stashes: {:?}, post: {:?}", + eras_checked, + pre_length, + post_length, + ); + + match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { + Ok(_) => + if stashes.is_empty() { + Self::deposit_event(Event::::BatchFinished); + } else { + Head::::put(UnstakeRequest { stashes, checked }); + Self::deposit_event(Event::::BatchChecked { eras: unchecked_eras_to_check, }); }, - Err(_) => { - // don't put the head back in -- there is an internal error in the - // pallet. - frame_support::defensive!("`checked is pruned via retain above`"); - ErasToCheckPerBlock::::put(0); - Self::deposit_event(Event::::InternalError); - }, - } + Err(_) => { + // don't put the head back in -- there is an internal error in the pallet. + Self::halt("checked is pruned via retain above") + }, } ::WeightInfo::on_idle_check(validator_count * eras_checked) diff --git a/frame/fast-unstake/src/migrations.rs b/frame/fast-unstake/src/migrations.rs new file mode 100644 index 0000000000000..10d8e54134785 --- /dev/null +++ b/frame/fast-unstake/src/migrations.rs @@ -0,0 +1,77 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod v1 { + use crate::{types::BalanceOf, *}; + use frame_support::{ + storage::unhashed, + traits::{Defensive, Get, GetStorageVersion, OnRuntimeUpgrade}, + weights::Weight, + }; + use sp_staking::EraIndex; + use sp_std::prelude::*; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + log!( + info, + "Running migration with current storage version {:?} / onchain {:?}", + current, + onchain + ); + + if current == 1 && onchain == 0 { + // if a head exists, then we put them back into the queue. + if Head::::exists() { + if let Some((stash, _, deposit)) = + unhashed::take::<(T::AccountId, Vec, BalanceOf)>( + &Head::::hashed_key(), + ) + .defensive() + { + Queue::::insert(stash, deposit); + current.put::>(); + } else { + // not much we can do here -- head is already deleted. + } + T::DbWeight::get().reads_writes(2, 3) + } else { + T::DbWeight::get().reads(2) + } + } else { + log!(info, "Migration did not execute. This probably should be removed"); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(Pallet::::on_chain_storage_version(), 0); + Ok(Default::default()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), &'static str> { + assert_eq!(Pallet::::on_chain_storage_version(), 1); + Ok(()) + } + } +} diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 87e89d90d6304..bac2d9aa9c8e4 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -16,8 +16,12 @@ // limitations under the License. use crate::{self as fast_unstake}; +use frame_benchmarking::frame_support::assert_ok; use frame_support::{ - pallet_prelude::*, parameter_types, traits::ConstU64, weights::constants::WEIGHT_PER_SECOND, + pallet_prelude::*, + parameter_types, + traits::{ConstU64, Currency}, + weights::constants::WEIGHT_PER_SECOND, }; use sp_runtime::traits::{Convert, IdentityLookup}; @@ -168,15 +172,17 @@ impl Convert for U256ToBalance { } parameter_types! { - pub static DepositAmount: u128 = 7; + pub static Deposit: u128 = 7; + pub static BatchSize: u32 = 1; } impl fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type Deposit = DepositAmount; + type Deposit = Deposit; type Currency = Balances; type Staking = Staking; type ControlOrigin = frame_system::EnsureRoot; + type BatchSize = BatchSize; type WeightInfo = (); } @@ -212,13 +218,13 @@ pub(crate) fn fast_unstake_events_since_last_call() -> Vec } pub struct ExtBuilder { - exposed_nominators: Vec<(AccountId, AccountId, Balance)>, + unexposed: Vec<(AccountId, AccountId, Balance)>, } impl Default for ExtBuilder { fn default() -> Self { Self { - exposed_nominators: vec![ + unexposed: vec![ (1, 2, 7 + 100), (3, 4, 7 + 100), (5, 6, 7 + 100), @@ -255,6 +261,11 @@ impl ExtBuilder { }); } + pub(crate) fn batch(self, size: u32) -> Self { + BatchSize::set(size); + self + } + pub(crate) fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = @@ -266,12 +277,12 @@ impl ExtBuilder { let _ = pallet_balances::GenesisConfig:: { balances: self - .exposed_nominators + .unexposed .clone() .into_iter() .map(|(stash, _, balance)| (stash, balance * 2)) .chain( - self.exposed_nominators + self.unexposed .clone() .into_iter() .map(|(_, ctrl, balance)| (ctrl, balance * 2)), @@ -284,7 +295,7 @@ impl ExtBuilder { let _ = pallet_staking::GenesisConfig:: { stakers: self - .exposed_nominators + .unexposed .into_iter() .map(|(x, y, z)| (x, y, z, pallet_staking::StakerStatus::Nominator(vec![42]))) .chain(validators_range.map(|x| (x, x, 100, StakerStatus::Validator))) @@ -307,6 +318,7 @@ impl ExtBuilder { // because we read this value as a measure of how many validators we have. pallet_staking::ValidatorCount::::put(VALIDATORS_PER_ERA as u32); }); + ext } @@ -347,3 +359,20 @@ pub fn assert_unstaked(stash: &AccountId) { assert!(!pallet_staking::Validators::::contains_key(stash)); assert!(!pallet_staking::Nominators::::contains_key(stash)); } + +pub fn create_exposed_nominator(exposed: AccountId, era: u32) { + // create an exposed nominator in era 1 + pallet_staking::ErasStakers::::mutate(era, VALIDATORS_PER_ERA, |expo| { + expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); + }); + Balances::make_free_balance_be(&exposed, 100); + assert_ok!(Staking::bond( + RuntimeOrigin::signed(exposed), + exposed, + 10, + pallet_staking::RewardDestination::Staked + )); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + // register the exposed one. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); +} diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 18a9d59e9da66..522aa1d0fac28 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use crate::{mock::*, types::*, weights::WeightInfo, Event}; use frame_support::{assert_noop, assert_ok, bounded_vec, pallet_prelude::*, traits::Currency}; -use pallet_staking::{CurrentEra, IndividualExposure, RewardDestination}; +use pallet_staking::{CurrentEra, RewardDestination}; use sp_runtime::traits::BadOrigin; use sp_staking::StakingInterface; @@ -107,9 +107,8 @@ fn cannot_register_if_head() { ErasToCheckPerBlock::::put(1); // Insert some Head item for stash Head::::put(UnstakeRequest { - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![], - deposit: DepositAmount::get(), }); // Controller attempts to regsiter assert_noop!( @@ -142,7 +141,7 @@ fn deregister_works() { // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(::Currency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); @@ -191,9 +190,8 @@ fn cannot_deregister_already_head() { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. Head::::put(UnstakeRequest { - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![], - deposit: DepositAmount::get(), }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); @@ -227,14 +225,14 @@ mod on_idle { // set up Queue item assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); }); } @@ -247,7 +245,7 @@ mod on_idle { // given assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -262,13 +260,12 @@ mod on_idle { // then assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Checking { stash: 1, eras: vec![3] }] + vec![Event::BatchChecked { eras: vec![3] }] ); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3] }) ); @@ -282,13 +279,12 @@ mod on_idle { // then: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Checking { stash: 1, eras: bounded_vec![2] }] + vec![Event::BatchChecked { eras: bounded_vec![2] }] ); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -308,13 +304,12 @@ mod on_idle { // then: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Checking { stash: 1, eras: vec![1, 0] }] + vec![Event::BatchChecked { eras: vec![1, 0] }] ); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -329,8 +324,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -348,7 +342,7 @@ mod on_idle { // then we finish the unbonding: assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Unstaked { stash: 1, result: Ok(()) }] + vec![Event::Unstaked { stash: 1, result: Ok(()) }, Event::BatchFinished], ); assert_eq!(Head::::get(), None,); @@ -371,7 +365,7 @@ mod on_idle { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); - assert_eq!(::Currency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(::Currency::reserved_balance(&1), Deposit::get()); assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -383,8 +377,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -404,8 +397,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 5, + stashes: bounded_vec![(5, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }), ); @@ -416,9 +408,10 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::Checking { stash: 5, eras: vec![3, 2, 1, 0] } + Event::BatchFinished, + Event::BatchChecked { eras: vec![3, 2, 1, 0] } ] ); }); @@ -432,9 +425,9 @@ mod on_idle { // register multi accounts for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_eq!(Queue::::get(3), Some(DepositAmount::get())); + assert_eq!(Queue::::get(3), Some(Deposit::get())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -463,10 +456,12 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 1, result: Ok(()) }, - Event::Checking { stash: 3, eras: vec![3, 2, 1, 0] }, + Event::BatchFinished, + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, Event::Unstaked { stash: 3, result: Ok(()) }, + Event::BatchFinished, ] ); @@ -483,7 +478,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); // process on idle next_block(true); @@ -495,8 +490,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -507,8 +501,9 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) } + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::BatchFinished ] ); assert_unstaked(&1); @@ -525,7 +520,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); // process on idle next_block(true); @@ -537,8 +532,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -549,8 +543,9 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 1, result: Ok(()) } + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::BatchFinished ] ); assert_unstaked(&1); @@ -566,7 +561,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); // process on idle next_block(true); @@ -578,8 +573,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3] }) ); @@ -589,8 +583,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -600,8 +593,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1] }) ); @@ -611,8 +603,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -624,11 +615,12 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3] }, - Event::Checking { stash: 1, eras: vec![2] }, - Event::Checking { stash: 1, eras: vec![1] }, - Event::Checking { stash: 1, eras: vec![0] }, - Event::Unstaked { stash: 1, result: Ok(()) } + Event::BatchChecked { eras: vec![3] }, + Event::BatchChecked { eras: vec![2] }, + Event::BatchChecked { eras: vec![1] }, + Event::BatchChecked { eras: vec![0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::BatchFinished ] ); assert_unstaked(&1); @@ -647,14 +639,13 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(DepositAmount::get())); + assert_eq!(Queue::::get(1), Some(Deposit::get())); next_block(true); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3] }) ); @@ -663,8 +654,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -673,8 +663,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1] }) ); @@ -683,8 +672,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -698,10 +686,9 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], - deposit: DepositAmount::get(), }) ); @@ -711,12 +698,13 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3] }, - Event::Checking { stash: 1, eras: vec![2] }, - Event::Checking { stash: 1, eras: vec![1] }, - Event::Checking { stash: 1, eras: vec![0] }, - Event::Checking { stash: 1, eras: vec![4] }, - Event::Unstaked { stash: 1, result: Ok(()) } + Event::BatchChecked { eras: vec![3] }, + Event::BatchChecked { eras: vec![2] }, + Event::BatchChecked { eras: vec![1] }, + Event::BatchChecked { eras: vec![0] }, + Event::BatchChecked { eras: vec![4] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::BatchFinished ] ); assert_unstaked(&1); @@ -738,8 +726,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3] }) ); @@ -748,8 +735,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -762,8 +748,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -772,8 +757,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -788,8 +772,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 4] }) ); @@ -799,8 +782,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 1, + stashes: bounded_vec![(1, Deposit::get())], checked: bounded_vec![3, 2, 4, 1] }) ); @@ -812,11 +794,12 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 1, eras: vec![3] }, - Event::Checking { stash: 1, eras: vec![2] }, - Event::Checking { stash: 1, eras: vec![4] }, - Event::Checking { stash: 1, eras: vec![1] }, - Event::Unstaked { stash: 1, result: Ok(()) } + Event::BatchChecked { eras: vec![3] }, + Event::BatchChecked { eras: vec![2] }, + Event::BatchChecked { eras: vec![4] }, + Event::BatchChecked { eras: vec![1] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::BatchFinished ] ); @@ -831,30 +814,15 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 - let exposed = 666 as AccountId; - pallet_staking::ErasStakers::::mutate(1, VALIDATORS_PER_ERA, |expo| { - expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - }); - Balances::make_free_balance_be(&exposed, 100); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(exposed), - exposed, - 10, - RewardDestination::Staked - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); - - Balances::make_free_balance_be(&exposed, 100_000); - // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); + let exposed = 666; + create_exposed_nominator(exposed, 1); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: exposed, + stashes: bounded_vec![(exposed, Deposit::get())], checked: bounded_vec![3] }) ); @@ -862,8 +830,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: exposed, + stashes: bounded_vec![(exposed, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -873,9 +840,10 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: exposed, eras: vec![3] }, - Event::Checking { stash: exposed, eras: vec![2] }, - Event::Slashed { stash: exposed, amount: DepositAmount::get() } + Event::BatchChecked { eras: vec![3] }, + Event::BatchChecked { eras: vec![2] }, + Event::Slashed { stash: exposed, amount: Deposit::get() }, + Event::BatchFinished ] ); }); @@ -889,30 +857,16 @@ mod on_idle { ErasToCheckPerBlock::::put(2); CurrentEra::::put(BondingDuration::get()); - // create an exposed nominator in era 1 - let exposed = 666 as AccountId; - pallet_staking::ErasStakers::::mutate(0, VALIDATORS_PER_ERA, |expo| { - expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - }); - Balances::make_free_balance_be(&exposed, DepositAmount::get() + 100); - assert_ok!(Staking::bond( - RuntimeOrigin::signed(exposed), - exposed, - 10, - RewardDestination::Staked - )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); - - // register the exposed one. - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); + // create an exposed nominator in era 0 + let exposed = 666; + create_exposed_nominator(exposed, 0); // a few blocks later, we realize they are slashed next_block(true); assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: exposed, + stashes: bounded_vec![(exposed, Deposit::get())], checked: bounded_vec![3, 2] }) ); @@ -923,8 +877,9 @@ mod on_idle { fast_unstake_events_since_last_call(), // we slash them vec![ - Event::Checking { stash: exposed, eras: vec![3, 2] }, - Event::Slashed { stash: exposed, amount: DepositAmount::get() } + Event::BatchChecked { eras: vec![3, 2] }, + Event::Slashed { stash: exposed, amount: Deposit::get() }, + Event::BatchFinished ] ); }); @@ -955,7 +910,7 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: DepositAmount::get() }] + vec![Event::Slashed { stash: 100, amount: Deposit::get() }, Event::BatchFinished] ); }); } @@ -967,7 +922,7 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // create a new validator that 100% not exposed. - Balances::make_free_balance_be(&42, 100 + DepositAmount::get()); + Balances::make_free_balance_be(&42, 100 + Deposit::get()); assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); @@ -979,8 +934,7 @@ mod on_idle { assert_eq!( Head::::get(), Some(UnstakeRequest { - deposit: DepositAmount::get(), - stash: 42, + stashes: bounded_vec![(42, Deposit::get())], checked: bounded_vec![3, 2, 1, 0] }) ); @@ -990,8 +944,255 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), vec![ - Event::Checking { stash: 42, eras: vec![3, 2, 1, 0] }, - Event::Unstaked { stash: 42, result: Ok(()) } + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 42, result: Ok(()) }, + Event::BatchFinished + ] + ); + }); + } +} + +mod batched { + use super::*; + + #[test] + fn single_block_batched_successful() { + ExtBuilder::default().batch(3).build_and_execute(|| { + ErasToCheckPerBlock::::put(BondingDuration::get() + 1); + CurrentEra::::put(BondingDuration::get()); + + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); + + assert_eq!(Queue::::count(), 4); + assert_eq!(Head::::get(), None); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![ + (1, Deposit::get()), + (5, Deposit::get()), + (7, Deposit::get()) + ], + checked: bounded_vec![3, 2, 1, 0] + }) + ); + assert_eq!(Queue::::count(), 1); + + // when + next_block(true); + + // then + assert_eq!(Head::::get(), None); + assert_eq!(Queue::::count(), 1); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::BatchChecked { eras: vec![3, 2, 1, 0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::Unstaked { stash: 5, result: Ok(()) }, + Event::Unstaked { stash: 7, result: Ok(()) }, + Event::BatchFinished + ] + ); + }); + } + + #[test] + fn multi_block_batched_successful() { + ExtBuilder::default().batch(3).build_and_execute(|| { + ErasToCheckPerBlock::::put(2); + CurrentEra::::put(BondingDuration::get()); + + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); + + assert_eq!(Queue::::count(), 4); + assert_eq!(Head::::get(), None); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![ + (1, Deposit::get()), + (5, Deposit::get()), + (7, Deposit::get()) + ], + checked: bounded_vec![3, 2] + }) + ); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![ + (1, Deposit::get()), + (5, Deposit::get()), + (7, Deposit::get()) + ], + checked: bounded_vec![3, 2, 1, 0] + }) + ); + + // when + next_block(true); + + // then + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::BatchChecked { eras: vec![3, 2] }, + Event::BatchChecked { eras: vec![1, 0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::Unstaked { stash: 5, result: Ok(()) }, + Event::Unstaked { stash: 7, result: Ok(()) }, + Event::BatchFinished + ] + ); + }); + } + + #[test] + fn multi_block_batched_some_fail() { + ExtBuilder::default().batch(4).build_and_execute(|| { + ErasToCheckPerBlock::::put(2); + CurrentEra::::put(BondingDuration::get()); + + // register two good ones. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + create_exposed_nominator(666, 1); + create_exposed_nominator(667, 3); + + // then + assert_eq!(Queue::::count(), 4); + assert_eq!(Head::::get(), None); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![ + (1, Deposit::get()), + (3, Deposit::get()), + (666, Deposit::get()) + ], + checked: bounded_vec![3, 2] + }) + ); + + // when + next_block(true); + + // then + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![(1, Deposit::get()), (3, Deposit::get()),], + checked: bounded_vec![3, 2, 1, 0] + }) + ); + + // when + next_block(true); + + // then + assert_eq!(Head::::get(), None); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Slashed { stash: 667, amount: 7 }, + Event::BatchChecked { eras: vec![3, 2] }, + Event::Slashed { stash: 666, amount: 7 }, + Event::BatchChecked { eras: vec![1, 0] }, + Event::Unstaked { stash: 1, result: Ok(()) }, + Event::Unstaked { stash: 3, result: Ok(()) }, + Event::BatchFinished + ] + ); + }); + } + + #[test] + fn multi_block_batched_all_fail_early_exit() { + ExtBuilder::default().batch(2).build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + CurrentEra::::put(BondingDuration::get()); + + // register two bad ones. + create_exposed_nominator(666, 3); + create_exposed_nominator(667, 2); + + // then + assert_eq!(Queue::::count(), 2); + assert_eq!(Head::::get(), None); + + // when we progress a block.. + next_block(true); + + // ..and register two good ones. + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); + + // then one of the bad ones is reaped. + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![(667, Deposit::get())], + checked: bounded_vec![3] + }) + ); + + // when we go to next block + next_block(true); + + // then the head is empty, we early terminate the batch. + assert_eq!(Head::::get(), None); + + // upon next block, we will assemble a new head. + next_block(true); + + assert_eq!( + Head::::get(), + Some(UnstakeRequest { + stashes: bounded_vec![(1, Deposit::get()), (3, Deposit::get()),], + checked: bounded_vec![3] + }) + ); + + assert_eq!( + fast_unstake_events_since_last_call(), + vec![ + Event::Slashed { stash: 666, amount: Deposit::get() }, + Event::BatchChecked { eras: vec![3] }, + Event::Slashed { stash: 667, amount: Deposit::get() }, + Event::BatchFinished, + Event::BatchChecked { eras: vec![3] } ] ); }); diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 460c9fa4354e5..34ca6517f3168 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,15 +17,14 @@ //! Types used in the Fast Unstake pallet. -use crate::Config; +use crate::{Config, MaxChecking}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - traits::{Currency, Get}, - BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, + traits::Currency, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_staking::EraIndex; -use sp_std::{fmt::Debug, prelude::*}; +use sp_std::prelude::*; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -33,15 +32,10 @@ pub type BalanceOf = #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, )] -pub struct UnstakeRequest< - AccountId: Eq + PartialEq + Debug, - MaxChecked: Get, - Balance: PartialEq + Debug, -> { - /// Their stash account. - pub(crate) stash: AccountId, +#[scale_info(skip_type_params(T))] +pub struct UnstakeRequest { + /// This list of stashes being processed in this request, and their corresponding deposit. + pub(crate) stashes: BoundedVec<(T::AccountId, BalanceOf), T::BatchSize>, /// The list of eras for which they have been checked. - pub(crate) checked: BoundedVec, - /// Deposit to be slashed if the unstake was unsuccessful. - pub(crate) deposit: Balance, + pub(crate) checked: BoundedVec>, } From 14f9fb2e5ec839ae554efb898ab966fa4bb4e14d Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 8 Nov 2022 18:38:01 +0100 Subject: [PATCH 326/348] [ci] Use ci-linux:production image in ci (#12648) --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f663057faaad5..9053e39eb59bf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -47,7 +47,7 @@ variables: CARGO_INCREMENTAL: 0 DOCKER_OS: "debian:stretch" ARCH: "x86_64" - CI_IMAGE: "paritytech/ci-linux@sha256:c977b383c2033de50083fad18f6b9e698644230455e016ba708da00eb98d4683" # staging 07.11.2022 + CI_IMAGE: "paritytech/ci-linux:production" RUSTY_CACHIER_SINGLE_BRANCH: master RUSTY_CACHIER_DONT_OPERATE_ON_MAIN_BRANCH: "true" From b70c64d597c8446a3ab75acc6df81de2b97f32ad Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Tue, 8 Nov 2022 14:50:28 -0400 Subject: [PATCH 327/348] New Weights for All Pallets (#12325) * new weights for everything * fix * fmt * new batch * fmt * new batch * Update run_all_benchmarks.sh * add headers * update weights * Update lib.rs * block and extrinsic weight --- frame/alliance/src/weights.rs | 377 ++-- frame/assets/src/weights.rs | 238 +-- frame/bags-list/src/weights.rs | 55 +- frame/balances/src/weights.rs | 51 +- frame/benchmarking/src/weights.rs | 97 +- frame/bounties/src/weights.rs | 103 +- frame/child-bounties/src/weights.rs | 67 +- frame/collective/src/weights.rs | 261 ++- frame/contracts/src/weights.rs | 1804 ++++++++--------- frame/conviction-voting/src/weights.rs | 71 +- frame/democracy/src/weights.rs | 209 +- .../src/weights.rs | 181 +- frame/elections-phragmen/src/weights.rs | 151 +- frame/fast-unstake/src/weights.rs | 138 +- frame/gilt/src/weights.rs | 109 +- frame/identity/src/weights.rs | 352 ++-- frame/im-online/src/weights.rs | 35 +- frame/indices/src/weights.rs | 39 +- frame/lottery/src/weights.rs | 55 +- frame/membership/src/weights.rs | 121 +- frame/multisig/src/weights.rs | 145 +- frame/nomination-pools/src/weights.rs | 271 +-- frame/preimage/src/weights.rs | 97 +- frame/proxy/src/weights.rs | 209 +- frame/ranked-collective/src/weights.rs | 110 +- frame/recovery/src/weights.rs | 113 +- frame/referenda/src/weights.rs | 181 +- frame/remark/src/weights.rs | 25 +- frame/scheduler/src/weights.rs | 133 +- frame/session/src/weights.rs | 21 +- frame/staking/src/weights.rs | 561 ++--- frame/state-trie-migration/src/weights.rs | 61 +- frame/support/src/weights/block_weights.rs | 26 +- .../support/src/weights/extrinsic_weights.rs | 26 +- frame/system/src/weights.rs | 93 +- frame/timestamp/src/weights.rs | 21 +- frame/tips/src/weights.rs | 105 +- frame/transaction-storage/src/weights.rs | 55 +- frame/treasury/src/weights.rs | 73 +- frame/uniques/src/weights.rs | 254 +-- frame/utility/src/weights.rs | 63 +- frame/vesting/src/weights.rs | 217 +- frame/whitelist/src/weights.rs | 59 +- 43 files changed, 4169 insertions(+), 3264 deletions(-) diff --git a/frame/alliance/src/weights.rs b/frame/alliance/src/weights.rs index 9e2ee5681aa99..29038efd2c25c 100644 --- a/frame/alliance/src/weights.rs +++ b/frame/alliance/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_alliance //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-05, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_alliance // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_alliance -// --chain=dev // --output=./frame/alliance/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -80,14 +81,13 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. - fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(34_420_000 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(145_000 as u64).saturating_mul(x as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(y as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(197_000 as u64).saturating_mul(p as u64)) + fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 43_720 nanoseconds. + Weight::from_ref_time(44_766_307 as u64) + // Standard Error: 2_522 + .saturating_add(Weight::from_ref_time(54_721 as u64).saturating_mul(y as u64)) + // Standard Error: 2_301 + .saturating_add(Weight::from_ref_time(173_300 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -95,10 +95,13 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. - fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(48_443_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(y as u64)) + fn vote(x: u32, y: u32, ) -> Weight { + // Minimum execution time: 46_984 nanoseconds. + Weight::from_ref_time(46_837_255 as u64) + // Standard Error: 32_860 + .saturating_add(Weight::from_ref_time(273_691 as u64).saturating_mul(x as u64)) + // Standard Error: 2_781 + .saturating_add(Weight::from_ref_time(126_964 as u64).saturating_mul(y as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -108,9 +111,10 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_056_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(171_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 34_734 nanoseconds. + Weight::from_ref_time(37_652_708 as u64) + // Standard Error: 1_270 + .saturating_add(Weight::from_ref_time(183_078 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -123,13 +127,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_929_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(287_000 as u64).saturating_mul(x as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 50_147 nanoseconds. + Weight::from_ref_time(42_719_616 as u64) + // Standard Error: 19_981 + .saturating_add(Weight::from_ref_time(188_796 as u64).saturating_mul(x as u64)) + // Standard Error: 1_947 + .saturating_add(Weight::from_ref_time(95_998 as u64).saturating_mul(y as u64)) + // Standard Error: 1_739 + .saturating_add(Weight::from_ref_time(177_837 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -142,14 +147,17 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_085_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(100_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(210_000 as u64).saturating_mul(p as u64)) + fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 59_495 nanoseconds. + Weight::from_ref_time(53_137_721 as u64) + // Standard Error: 138 + .saturating_add(Weight::from_ref_time(1_979 as u64).saturating_mul(b as u64)) + // Standard Error: 16_388 + .saturating_add(Weight::from_ref_time(8_198 as u64).saturating_mul(x as u64)) + // Standard Error: 1_599 + .saturating_add(Weight::from_ref_time(86_577 as u64).saturating_mul(y as u64)) + // Standard Error: 1_428 + .saturating_add(Weight::from_ref_time(215_905 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -163,11 +171,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_377_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 52_405 nanoseconds. + Weight::from_ref_time(44_494_732 as u64) + // Standard Error: 1_759 + .saturating_add(Weight::from_ref_time(118_517 as u64).saturating_mul(y as u64)) + // Standard Error: 1_572 + .saturating_add(Weight::from_ref_time(198_256 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -181,14 +190,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(41_417_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(67_000 as u64).saturating_mul(x as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(p as u64)) + fn close_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 52_737 nanoseconds. + Weight::from_ref_time(45_874_458 as u64) + // Standard Error: 140 + .saturating_add(Weight::from_ref_time(601 as u64).saturating_mul(b as u64)) + // Standard Error: 1_623 + .saturating_add(Weight::from_ref_time(88_372 as u64).saturating_mul(y as u64)) + // Standard Error: 1_449 + .saturating_add(Weight::from_ref_time(197_595 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -197,12 +207,15 @@ impl WeightInfo for SubstrateWeight { /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(37_202_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(149_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(120_000 as u64).saturating_mul(z as u64)) + fn init_members(x: u32, y: u32, z: u32, ) -> Weight { + // Minimum execution time: 48_821 nanoseconds. + Weight::from_ref_time(32_972_152 as u64) + // Standard Error: 17_618 + .saturating_add(Weight::from_ref_time(230_451 as u64).saturating_mul(x as u64)) + // Standard Error: 1_865 + .saturating_add(Weight::from_ref_time(172_532 as u64).saturating_mul(y as u64)) + // Standard Error: 1_682 + .saturating_add(Weight::from_ref_time(145_258 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -216,13 +229,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_447_000 as u64).saturating_mul(x as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_512_000 as u64).saturating_mul(y as u64)) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(10_760_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 256_235 nanoseconds. + Weight::from_ref_time(258_695_000 as u64) + // Standard Error: 19_643 + .saturating_add(Weight::from_ref_time(436_821 as u64).saturating_mul(x as u64)) + // Standard Error: 19_549 + .saturating_add(Weight::from_ref_time(496_858 as u64).saturating_mul(y as u64)) + // Standard Error: 39_062 + .saturating_add(Weight::from_ref_time(9_169_692 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(y as u64))) @@ -232,18 +246,21 @@ impl WeightInfo for SubstrateWeight { } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_101_000 as u64) + // Minimum execution time: 19_205 nanoseconds. + Weight::from_ref_time(19_502_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_090_000 as u64) + // Minimum execution time: 22_562 nanoseconds. + Weight::from_ref_time(22_842_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(22_118_000 as u64) + // Minimum execution time: 23_773 nanoseconds. + Weight::from_ref_time(24_212_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -252,14 +269,16 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(53_446_000 as u64) + // Minimum execution time: 57_709 nanoseconds. + Weight::from_ref_time(59_155_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_690_000 as u64) + // Minimum execution time: 44_576 nanoseconds. + Weight::from_ref_time(45_162_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -268,7 +287,8 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_396_000 as u64) + // Minimum execution time: 38_913 nanoseconds. + Weight::from_ref_time(39_637_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -278,7 +298,8 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_644_000 as u64) + // Minimum execution time: 42_947 nanoseconds. + Weight::from_ref_time(43_414_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -287,7 +308,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_440_000 as u64) + // Minimum execution time: 46_281 nanoseconds. + Weight::from_ref_time(46_703_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -298,33 +320,36 @@ impl WeightInfo for SubstrateWeight { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_060_000 as u64) + // Minimum execution time: 65_274 nanoseconds. + Weight::from_ref_time(65_762_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[1, 100]`. - /// The range of component `l` is `[1, 255]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_362_000 as u64).saturating_mul(n as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_396 nanoseconds. + Weight::from_ref_time(17_638_000 as u64) + // Standard Error: 2_602 + .saturating_add(Weight::from_ref_time(1_286_177 as u64).saturating_mul(n as u64)) + // Standard Error: 1_019 + .saturating_add(Weight::from_ref_time(70_947 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[1, 100]`. - /// The range of component `l` is `[1, 255]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 147_000 - .saturating_add(Weight::from_ref_time(21_060_000 as u64).saturating_mul(n as u64)) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(3_683_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_446 nanoseconds. + Weight::from_ref_time(17_725_000 as u64) + // Standard Error: 163_579 + .saturating_add(Weight::from_ref_time(12_823_232 as u64).saturating_mul(n as u64)) + // Standard Error: 64_064 + .saturating_add(Weight::from_ref_time(496_642 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -341,14 +366,13 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `p` is `[1, 100]`. - fn propose_proposed(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(34_420_000 as u64) - // Standard Error: 25_000 - .saturating_add(Weight::from_ref_time(145_000 as u64).saturating_mul(x as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(y as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(197_000 as u64).saturating_mul(p as u64)) + fn propose_proposed(_b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 43_720 nanoseconds. + Weight::from_ref_time(44_766_307 as u64) + // Standard Error: 2_522 + .saturating_add(Weight::from_ref_time(54_721 as u64).saturating_mul(y as u64)) + // Standard Error: 2_301 + .saturating_add(Weight::from_ref_time(173_300 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -356,10 +380,13 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:1 w:1) /// The range of component `x` is `[3, 10]`. /// The range of component `y` is `[2, 90]`. - fn vote(_x: u32, y: u32, ) -> Weight { - Weight::from_ref_time(48_443_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(y as u64)) + fn vote(x: u32, y: u32, ) -> Weight { + // Minimum execution time: 46_984 nanoseconds. + Weight::from_ref_time(46_837_255 as u64) + // Standard Error: 32_860 + .saturating_add(Weight::from_ref_time(273_691 as u64).saturating_mul(x as u64)) + // Standard Error: 2_781 + .saturating_add(Weight::from_ref_time(126_964 as u64).saturating_mul(y as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -369,9 +396,10 @@ impl WeightInfo for () { // Storage: AllianceMotion Voting (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn veto(p: u32, ) -> Weight { - Weight::from_ref_time(35_056_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(171_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 34_734 nanoseconds. + Weight::from_ref_time(37_652_708 as u64) + // Standard Error: 1_270 + .saturating_add(Weight::from_ref_time(183_078 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -384,13 +412,14 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(36_929_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(287_000 as u64).saturating_mul(x as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(180_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 50_147 nanoseconds. + Weight::from_ref_time(42_719_616 as u64) + // Standard Error: 19_981 + .saturating_add(Weight::from_ref_time(188_796 as u64).saturating_mul(x as u64)) + // Standard Error: 1_947 + .saturating_add(Weight::from_ref_time(95_998 as u64).saturating_mul(y as u64)) + // Standard Error: 1_739 + .saturating_add(Weight::from_ref_time(177_837 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -403,14 +432,17 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(48_085_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(100_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(210_000 as u64).saturating_mul(p as u64)) + fn close_early_approved(b: u32, x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 59_495 nanoseconds. + Weight::from_ref_time(53_137_721 as u64) + // Standard Error: 138 + .saturating_add(Weight::from_ref_time(1_979 as u64).saturating_mul(b as u64)) + // Standard Error: 16_388 + .saturating_add(Weight::from_ref_time(8_198 as u64).saturating_mul(x as u64)) + // Standard Error: 1_599 + .saturating_add(Weight::from_ref_time(86_577 as u64).saturating_mul(y as u64)) + // Standard Error: 1_428 + .saturating_add(Weight::from_ref_time(215_905 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -424,11 +456,12 @@ impl WeightInfo for () { /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(_x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(43_377_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(99_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 52_405 nanoseconds. + Weight::from_ref_time(44_494_732 as u64) + // Standard Error: 1_759 + .saturating_add(Weight::from_ref_time(118_517 as u64).saturating_mul(y as u64)) + // Standard Error: 1_572 + .saturating_add(Weight::from_ref_time(198_256 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -442,14 +475,15 @@ impl WeightInfo for () { /// The range of component `x` is `[2, 10]`. /// The range of component `y` is `[2, 90]`. /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, x: u32, y: u32, p: u32, ) -> Weight { - Weight::from_ref_time(41_417_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(67_000 as u64).saturating_mul(x as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(p as u64)) + fn close_approved(b: u32, _x: u32, y: u32, p: u32, ) -> Weight { + // Minimum execution time: 52_737 nanoseconds. + Weight::from_ref_time(45_874_458 as u64) + // Standard Error: 140 + .saturating_add(Weight::from_ref_time(601 as u64).saturating_mul(b as u64)) + // Standard Error: 1_623 + .saturating_add(Weight::from_ref_time(88_372 as u64).saturating_mul(y as u64)) + // Standard Error: 1_449 + .saturating_add(Weight::from_ref_time(197_595 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -458,12 +492,15 @@ impl WeightInfo for () { /// The range of component `x` is `[1, 10]`. /// The range of component `y` is `[0, 90]`. /// The range of component `z` is `[0, 100]`. - fn init_members(_x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(37_202_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(149_000 as u64).saturating_mul(y as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(120_000 as u64).saturating_mul(z as u64)) + fn init_members(x: u32, y: u32, z: u32, ) -> Weight { + // Minimum execution time: 48_821 nanoseconds. + Weight::from_ref_time(32_972_152 as u64) + // Standard Error: 17_618 + .saturating_add(Weight::from_ref_time(230_451 as u64).saturating_mul(x as u64)) + // Standard Error: 1_865 + .saturating_add(Weight::from_ref_time(172_532 as u64).saturating_mul(y as u64)) + // Standard Error: 1_682 + .saturating_add(Weight::from_ref_time(145_258 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -477,13 +514,14 @@ impl WeightInfo for () { /// The range of component `y` is `[0, 100]`. /// The range of component `z` is `[0, 50]`. fn disband(x: u32, y: u32, z: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_447_000 as u64).saturating_mul(x as u64)) - // Standard Error: 9_000 - .saturating_add(Weight::from_ref_time(1_512_000 as u64).saturating_mul(y as u64)) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(10_760_000 as u64).saturating_mul(z as u64)) + // Minimum execution time: 256_235 nanoseconds. + Weight::from_ref_time(258_695_000 as u64) + // Standard Error: 19_643 + .saturating_add(Weight::from_ref_time(436_821 as u64).saturating_mul(x as u64)) + // Standard Error: 19_549 + .saturating_add(Weight::from_ref_time(496_858 as u64).saturating_mul(y as u64)) + // Standard Error: 39_062 + .saturating_add(Weight::from_ref_time(9_169_692 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(y as u64))) @@ -493,18 +531,21 @@ impl WeightInfo for () { } // Storage: Alliance Rule (r:0 w:1) fn set_rule() -> Weight { - Weight::from_ref_time(18_101_000 as u64) + // Minimum execution time: 19_205 nanoseconds. + Weight::from_ref_time(19_502_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn announce() -> Weight { - Weight::from_ref_time(21_090_000 as u64) + // Minimum execution time: 22_562 nanoseconds. + Weight::from_ref_time(22_842_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Alliance Announcements (r:1 w:1) fn remove_announcement() -> Weight { - Weight::from_ref_time(22_118_000 as u64) + // Minimum execution time: 23_773 nanoseconds. + Weight::from_ref_time(24_212_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -513,14 +554,16 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Alliance DepositOf (r:0 w:1) fn join_alliance() -> Weight { - Weight::from_ref_time(53_446_000 as u64) + // Minimum execution time: 57_709 nanoseconds. + Weight::from_ref_time(59_155_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Alliance Members (r:4 w:1) // Storage: Alliance UnscrupulousAccounts (r:1 w:0) fn nominate_ally() -> Weight { - Weight::from_ref_time(42_690_000 as u64) + // Minimum execution time: 44_576 nanoseconds. + Weight::from_ref_time(45_162_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -529,7 +572,8 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn elevate_ally() -> Weight { - Weight::from_ref_time(37_396_000 as u64) + // Minimum execution time: 38_913 nanoseconds. + Weight::from_ref_time(39_637_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -539,7 +583,8 @@ impl WeightInfo for () { // Storage: AllianceMotion Prime (r:0 w:1) // Storage: Alliance RetiringMembers (r:0 w:1) fn give_retirement_notice() -> Weight { - Weight::from_ref_time(40_644_000 as u64) + // Minimum execution time: 42_947 nanoseconds. + Weight::from_ref_time(43_414_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -548,7 +593,8 @@ impl WeightInfo for () { // Storage: Alliance DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn retire() -> Weight { - Weight::from_ref_time(43_440_000 as u64) + // Minimum execution time: 46_281 nanoseconds. + Weight::from_ref_time(46_703_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -559,33 +605,36 @@ impl WeightInfo for () { // Storage: AllianceMotion Members (r:0 w:1) // Storage: AllianceMotion Prime (r:0 w:1) fn kick_member() -> Weight { - Weight::from_ref_time(61_060_000 as u64) + // Minimum execution time: 65_274 nanoseconds. + Weight::from_ref_time(65_762_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[1, 100]`. - /// The range of component `l` is `[1, 255]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(1_362_000 as u64).saturating_mul(n as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_396 nanoseconds. + Weight::from_ref_time(17_638_000 as u64) + // Standard Error: 2_602 + .saturating_add(Weight::from_ref_time(1_286_177 as u64).saturating_mul(n as u64)) + // Standard Error: 1_019 + .saturating_add(Weight::from_ref_time(70_947 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Alliance UnscrupulousAccounts (r:1 w:1) // Storage: Alliance UnscrupulousWebsites (r:1 w:1) - /// The range of component `n` is `[1, 100]`. - /// The range of component `l` is `[1, 255]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 147_000 - .saturating_add(Weight::from_ref_time(21_060_000 as u64).saturating_mul(n as u64)) - // Standard Error: 57_000 - .saturating_add(Weight::from_ref_time(3_683_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_446 nanoseconds. + Weight::from_ref_time(17_725_000 as u64) + // Standard Error: 163_579 + .saturating_add(Weight::from_ref_time(12_823_232 as u64).saturating_mul(n as u64)) + // Standard Error: 64_064 + .saturating_add(Weight::from_ref_time(496_642 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/assets/src/weights.rs b/frame/assets/src/weights.rs index c260da70e3480..3b29e55b306fe 100644 --- a/frame/assets/src/weights.rs +++ b/frame/assets/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,24 +18,24 @@ //! Autogenerated weights for pallet_assets //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_assets // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --chain=dev // --output=./frame/assets/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -77,15 +77,15 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - // Minimum execution time: 32_200 nanoseconds. - Weight::from_ref_time(32_739_000 as u64) + // Minimum execution time: 33_241 nanoseconds. + Weight::from_ref_time(33_873_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - // Minimum execution time: 19_192 nanoseconds. - Weight::from_ref_time(19_615_000 as u64) + // Minimum execution time: 19_883 nanoseconds. + Weight::from_ref_time(20_651_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -98,12 +98,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 5000]`. /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - // Minimum execution time: 75_299_531 nanoseconds. - Weight::from_ref_time(75_567_795_000 as u64) - // Standard Error: 126_181 - .saturating_add(Weight::from_ref_time(8_378_363 as u64).saturating_mul(c as u64)) - // Standard Error: 126_181 - .saturating_add(Weight::from_ref_time(10_950_076 as u64).saturating_mul(s as u64)) + // Minimum execution time: 76_222_544 nanoseconds. + Weight::from_ref_time(76_864_587_000 as u64) + // Standard Error: 127_086 + .saturating_add(Weight::from_ref_time(8_645_143 as u64).saturating_mul(c as u64)) + // Standard Error: 127_086 + .saturating_add(Weight::from_ref_time(11_281_301 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(c as u64))) .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) @@ -116,16 +116,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - // Minimum execution time: 35_066 nanoseconds. - Weight::from_ref_time(36_217_000 as u64) + // Minimum execution time: 36_782 nanoseconds. + Weight::from_ref_time(37_340_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - // Minimum execution time: 44_915 nanoseconds. - Weight::from_ref_time(45_807_000 as u64) + // Minimum execution time: 44_425 nanoseconds. + Weight::from_ref_time(45_485_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -133,8 +133,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 57_245 nanoseconds. - Weight::from_ref_time(58_179_000 as u64) + // Minimum execution time: 58_294 nanoseconds. + Weight::from_ref_time(59_447_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -142,8 +142,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 47_028 nanoseconds. - Weight::from_ref_time(47_571_000 as u64) + // Minimum execution time: 46_704 nanoseconds. + Weight::from_ref_time(47_521_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -151,53 +151,53 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 57_069 nanoseconds. - Weight::from_ref_time(58_245_000 as u64) + // Minimum execution time: 57_647 nanoseconds. + Weight::from_ref_time(58_417_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 26_823 nanoseconds. - Weight::from_ref_time(27_689_000 as u64) + // Minimum execution time: 26_827 nanoseconds. + Weight::from_ref_time(27_373_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 26_984 nanoseconds. - Weight::from_ref_time(27_591_000 as u64) + // Minimum execution time: 26_291 nanoseconds. + Weight::from_ref_time(26_854_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - // Minimum execution time: 23_803 nanoseconds. - Weight::from_ref_time(24_269_000 as u64) + // Minimum execution time: 22_694 nanoseconds. + Weight::from_ref_time(23_613_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - // Minimum execution time: 22_977 nanoseconds. - Weight::from_ref_time(23_527_000 as u64) + // Minimum execution time: 22_572 nanoseconds. + Weight::from_ref_time(24_121_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - // Minimum execution time: 23_815 nanoseconds. - Weight::from_ref_time(24_597_000 as u64) + // Minimum execution time: 23_949 nanoseconds. + Weight::from_ref_time(24_347_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 22_691 nanoseconds. - Weight::from_ref_time(23_173_000 as u64) + // Minimum execution time: 23_102 nanoseconds. + Weight::from_ref_time(23_518_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -205,17 +205,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Metadata (r:1 w:1) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { - // Minimum execution time: 40_675 nanoseconds. - Weight::from_ref_time(42_869_113 as u64) + fn set_metadata(_n: u32, s: u32, ) -> Weight { + // Minimum execution time: 41_032 nanoseconds. + Weight::from_ref_time(42_845_624 as u64) + // Standard Error: 1_274 + .saturating_add(Weight::from_ref_time(1_875 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 42_361 nanoseconds. - Weight::from_ref_time(42_912_000 as u64) + // Minimum execution time: 42_570 nanoseconds. + Weight::from_ref_time(42_957_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -224,35 +226,35 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Minimum execution time: 22_998 nanoseconds. - Weight::from_ref_time(23_844_675 as u64) - // Standard Error: 506 - .saturating_add(Weight::from_ref_time(2_874 as u64).saturating_mul(n as u64)) - // Standard Error: 506 - .saturating_add(Weight::from_ref_time(3_817 as u64).saturating_mul(s as u64)) + // Minimum execution time: 22_768 nanoseconds. + Weight::from_ref_time(23_868_816 as u64) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(1_602 as u64).saturating_mul(n as u64)) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(2_097 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - // Minimum execution time: 42_305 nanoseconds. - Weight::from_ref_time(42_678_000 as u64) + // Minimum execution time: 41_863 nanoseconds. + Weight::from_ref_time(42_643_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - // Minimum execution time: 22_052 nanoseconds. - Weight::from_ref_time(22_610_000 as u64) + // Minimum execution time: 21_747 nanoseconds. + Weight::from_ref_time(22_595_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 44_900 nanoseconds. - Weight::from_ref_time(46_032_000 as u64) + // Minimum execution time: 45_602 nanoseconds. + Weight::from_ref_time(46_004_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -261,24 +263,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - // Minimum execution time: 72_261 nanoseconds. - Weight::from_ref_time(73_186_000 as u64) + // Minimum execution time: 70_944 nanoseconds. + Weight::from_ref_time(71_722_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 47_268 nanoseconds. - Weight::from_ref_time(47_712_000 as u64) + // Minimum execution time: 46_316 nanoseconds. + Weight::from_ref_time(46_910_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - // Minimum execution time: 47_363 nanoseconds. - Weight::from_ref_time(48_696_000 as u64) + // Minimum execution time: 47_145 nanoseconds. + Weight::from_ref_time(47_611_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -288,15 +290,15 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) fn create() -> Weight { - // Minimum execution time: 32_200 nanoseconds. - Weight::from_ref_time(32_739_000 as u64) + // Minimum execution time: 33_241 nanoseconds. + Weight::from_ref_time(33_873_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_create() -> Weight { - // Minimum execution time: 19_192 nanoseconds. - Weight::from_ref_time(19_615_000 as u64) + // Minimum execution time: 19_883 nanoseconds. + Weight::from_ref_time(20_651_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -309,12 +311,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 5000]`. /// The range of component `a` is `[0, 500]`. fn destroy(c: u32, s: u32, a: u32, ) -> Weight { - // Minimum execution time: 75_299_531 nanoseconds. - Weight::from_ref_time(75_567_795_000 as u64) - // Standard Error: 126_181 - .saturating_add(Weight::from_ref_time(8_378_363 as u64).saturating_mul(c as u64)) - // Standard Error: 126_181 - .saturating_add(Weight::from_ref_time(10_950_076 as u64).saturating_mul(s as u64)) + // Minimum execution time: 76_222_544 nanoseconds. + Weight::from_ref_time(76_864_587_000 as u64) + // Standard Error: 127_086 + .saturating_add(Weight::from_ref_time(8_645_143 as u64).saturating_mul(c as u64)) + // Standard Error: 127_086 + .saturating_add(Weight::from_ref_time(11_281_301 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(c as u64))) .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) @@ -327,16 +329,16 @@ impl WeightInfo for () { // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn mint() -> Weight { - // Minimum execution time: 35_066 nanoseconds. - Weight::from_ref_time(36_217_000 as u64) + // Minimum execution time: 36_782 nanoseconds. + Weight::from_ref_time(37_340_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Account (r:1 w:1) fn burn() -> Weight { - // Minimum execution time: 44_915 nanoseconds. - Weight::from_ref_time(45_807_000 as u64) + // Minimum execution time: 44_425 nanoseconds. + Weight::from_ref_time(45_485_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -344,8 +346,8 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - // Minimum execution time: 57_245 nanoseconds. - Weight::from_ref_time(58_179_000 as u64) + // Minimum execution time: 58_294 nanoseconds. + Weight::from_ref_time(59_447_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -353,8 +355,8 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - // Minimum execution time: 47_028 nanoseconds. - Weight::from_ref_time(47_571_000 as u64) + // Minimum execution time: 46_704 nanoseconds. + Weight::from_ref_time(47_521_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -362,53 +364,53 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - // Minimum execution time: 57_069 nanoseconds. - Weight::from_ref_time(58_245_000 as u64) + // Minimum execution time: 57_647 nanoseconds. + Weight::from_ref_time(58_417_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn freeze() -> Weight { - // Minimum execution time: 26_823 nanoseconds. - Weight::from_ref_time(27_689_000 as u64) + // Minimum execution time: 26_827 nanoseconds. + Weight::from_ref_time(27_373_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Account (r:1 w:1) fn thaw() -> Weight { - // Minimum execution time: 26_984 nanoseconds. - Weight::from_ref_time(27_591_000 as u64) + // Minimum execution time: 26_291 nanoseconds. + Weight::from_ref_time(26_854_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn freeze_asset() -> Weight { - // Minimum execution time: 23_803 nanoseconds. - Weight::from_ref_time(24_269_000 as u64) + // Minimum execution time: 22_694 nanoseconds. + Weight::from_ref_time(23_613_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn thaw_asset() -> Weight { - // Minimum execution time: 22_977 nanoseconds. - Weight::from_ref_time(23_527_000 as u64) + // Minimum execution time: 22_572 nanoseconds. + Weight::from_ref_time(24_121_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Metadata (r:1 w:0) fn transfer_ownership() -> Weight { - // Minimum execution time: 23_815 nanoseconds. - Weight::from_ref_time(24_597_000 as u64) + // Minimum execution time: 23_949 nanoseconds. + Weight::from_ref_time(24_347_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 22_691 nanoseconds. - Weight::from_ref_time(23_173_000 as u64) + // Minimum execution time: 23_102 nanoseconds. + Weight::from_ref_time(23_518_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -416,17 +418,19 @@ impl WeightInfo for () { // Storage: Assets Metadata (r:1 w:1) /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { - // Minimum execution time: 40_675 nanoseconds. - Weight::from_ref_time(42_869_113 as u64) + fn set_metadata(_n: u32, s: u32, ) -> Weight { + // Minimum execution time: 41_032 nanoseconds. + Weight::from_ref_time(42_845_624 as u64) + // Standard Error: 1_274 + .saturating_add(Weight::from_ref_time(1_875 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 42_361 nanoseconds. - Weight::from_ref_time(42_912_000 as u64) + // Minimum execution time: 42_570 nanoseconds. + Weight::from_ref_time(42_957_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -435,35 +439,35 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 50]`. /// The range of component `s` is `[0, 50]`. fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Minimum execution time: 22_998 nanoseconds. - Weight::from_ref_time(23_844_675 as u64) - // Standard Error: 506 - .saturating_add(Weight::from_ref_time(2_874 as u64).saturating_mul(n as u64)) - // Standard Error: 506 - .saturating_add(Weight::from_ref_time(3_817 as u64).saturating_mul(s as u64)) + // Minimum execution time: 22_768 nanoseconds. + Weight::from_ref_time(23_868_816 as u64) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(1_602 as u64).saturating_mul(n as u64)) + // Standard Error: 612 + .saturating_add(Weight::from_ref_time(2_097 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:0) // Storage: Assets Metadata (r:1 w:1) fn force_clear_metadata() -> Weight { - // Minimum execution time: 42_305 nanoseconds. - Weight::from_ref_time(42_678_000 as u64) + // Minimum execution time: 41_863 nanoseconds. + Weight::from_ref_time(42_643_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) fn force_asset_status() -> Weight { - // Minimum execution time: 22_052 nanoseconds. - Weight::from_ref_time(22_610_000 as u64) + // Minimum execution time: 21_747 nanoseconds. + Weight::from_ref_time(22_595_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 44_900 nanoseconds. - Weight::from_ref_time(46_032_000 as u64) + // Minimum execution time: 45_602 nanoseconds. + Weight::from_ref_time(46_004_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -472,24 +476,24 @@ impl WeightInfo for () { // Storage: Assets Account (r:2 w:2) // Storage: System Account (r:1 w:1) fn transfer_approved() -> Weight { - // Minimum execution time: 72_261 nanoseconds. - Weight::from_ref_time(73_186_000 as u64) + // Minimum execution time: 70_944 nanoseconds. + Weight::from_ref_time(71_722_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 47_268 nanoseconds. - Weight::from_ref_time(47_712_000 as u64) + // Minimum execution time: 46_316 nanoseconds. + Weight::from_ref_time(46_910_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Assets Asset (r:1 w:1) // Storage: Assets Approvals (r:1 w:1) fn force_cancel_approval() -> Weight { - // Minimum execution time: 47_363 nanoseconds. - Weight::from_ref_time(48_696_000 as u64) + // Minimum execution time: 47_145 nanoseconds. + Weight::from_ref_time(47_611_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/bags-list/src/weights.rs b/frame/bags-list/src/weights.rs index 46f001972c519..a298dc3172f79 100644 --- a/frame/bags-list/src/weights.rs +++ b/frame/bags-list/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_bags_list //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/bags-list/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,29 +57,32 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: BagsList ListNodes (r:4 w:4) - // Storage: BagsList ListBags (r:1 w:1) + // Storage: VoterList ListNodes (r:4 w:4) + // Storage: VoterList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - Weight::from_ref_time(55_040_000 as u64) + // Minimum execution time: 73_553 nanoseconds. + Weight::from_ref_time(74_366_000 as u64) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - Weight::from_ref_time(53_671_000 as u64) + // Minimum execution time: 72_700 nanoseconds. + Weight::from_ref_time(73_322_000 as u64) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } - // Storage: BagsList ListNodes (r:4 w:4) + // Storage: VoterList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:0) - // Storage: BagsList CounterForListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - Weight::from_ref_time(56_410_000 as u64) + // Minimum execution time: 71_691 nanoseconds. + Weight::from_ref_time(72_798_000 as u64) .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -86,29 +92,32 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: BagsList ListNodes (r:4 w:4) - // Storage: BagsList ListBags (r:1 w:1) + // Storage: VoterList ListNodes (r:4 w:4) + // Storage: VoterList ListBags (r:1 w:1) fn rebag_non_terminal() -> Weight { - Weight::from_ref_time(55_040_000 as u64) + // Minimum execution time: 73_553 nanoseconds. + Weight::from_ref_time(74_366_000 as u64) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn rebag_terminal() -> Weight { - Weight::from_ref_time(53_671_000 as u64) + // Minimum execution time: 72_700 nanoseconds. + Weight::from_ref_time(73_322_000 as u64) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } - // Storage: BagsList ListNodes (r:4 w:4) + // Storage: VoterList ListNodes (r:4 w:4) // Storage: Staking Bonded (r:2 w:0) // Storage: Staking Ledger (r:2 w:0) - // Storage: BagsList CounterForListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) fn put_in_front_of() -> Weight { - Weight::from_ref_time(56_410_000 as u64) + // Minimum execution time: 71_691 nanoseconds. + Weight::from_ref_time(72_798_000 as u64) .saturating_add(RocksDbWeight::get().reads(10 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 42d165e61a38e..6324745fd4310 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/balances/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,43 +61,50 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(41_860_000 as u64) + // Minimum execution time: 48_134 nanoseconds. + Weight::from_ref_time(48_811_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(32_760_000 as u64) + // Minimum execution time: 36_586 nanoseconds. + Weight::from_ref_time(36_966_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - Weight::from_ref_time(22_279_000 as u64) + // Minimum execution time: 28_486 nanoseconds. + Weight::from_ref_time(28_940_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - Weight::from_ref_time(25_488_000 as u64) + // Minimum execution time: 31_225 nanoseconds. + Weight::from_ref_time(31_946_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - Weight::from_ref_time(42_190_000 as u64) + // Minimum execution time: 47_347 nanoseconds. + Weight::from_ref_time(48_005_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - Weight::from_ref_time(37_789_000 as u64) + // Minimum execution time: 41_668 nanoseconds. + Weight::from_ref_time(42_232_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - Weight::from_ref_time(20_056_000 as u64) + // Minimum execution time: 23_741 nanoseconds. + Weight::from_ref_time(24_073_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -104,43 +114,50 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(41_860_000 as u64) + // Minimum execution time: 48_134 nanoseconds. + Weight::from_ref_time(48_811_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - Weight::from_ref_time(32_760_000 as u64) + // Minimum execution time: 36_586 nanoseconds. + Weight::from_ref_time(36_966_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - Weight::from_ref_time(22_279_000 as u64) + // Minimum execution time: 28_486 nanoseconds. + Weight::from_ref_time(28_940_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - Weight::from_ref_time(25_488_000 as u64) + // Minimum execution time: 31_225 nanoseconds. + Weight::from_ref_time(31_946_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - Weight::from_ref_time(42_190_000 as u64) + // Minimum execution time: 47_347 nanoseconds. + Weight::from_ref_time(48_005_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - Weight::from_ref_time(37_789_000 as u64) + // Minimum execution time: 41_668 nanoseconds. + Weight::from_ref_time(42_232_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) fn force_unreserve() -> Weight { - Weight::from_ref_time(20_056_000 as u64) + // Minimum execution time: 23_741 nanoseconds. + Weight::from_ref_time(24_073_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/benchmarking/src/weights.rs b/frame/benchmarking/src/weights.rs index 62117a6f65b07..5e5a2e7ee343c 100644 --- a/frame/benchmarking/src/weights.rs +++ b/frame/benchmarking/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for frame_benchmarking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/benchmarking/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,76 +60,108 @@ pub trait WeightInfo { /// Weights for frame_benchmarking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { - Weight::from_ref_time(103_000 as u64) + // Minimum execution time: 108 nanoseconds. + Weight::from_ref_time(137_610 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { - Weight::from_ref_time(105_000 as u64) + // Minimum execution time: 104 nanoseconds. + Weight::from_ref_time(133_508 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { - Weight::from_ref_time(113_000 as u64) + // Minimum execution time: 110 nanoseconds. + Weight::from_ref_time(140_230 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { - Weight::from_ref_time(102_000 as u64) + // Minimum execution time: 96 nanoseconds. + Weight::from_ref_time(136_059 as u64) } + /// The range of component `i` is `[0, 100]`. fn hashing(_i: u32, ) -> Weight { - Weight::from_ref_time(20_865_902_000 as u64) + // Minimum execution time: 21_804_747 nanoseconds. + Weight::from_ref_time(22_013_681_386 as u64) } + /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { - Weight::from_ref_time(319_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 136 nanoseconds. + Weight::from_ref_time(156_000 as u64) + // Standard Error: 4_531 + .saturating_add(Weight::from_ref_time(46_817_640 as u64).saturating_mul(i as u64)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `i` is `[0, 1000]`. fn storage_read(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 125 nanoseconds. + Weight::from_ref_time(135_000 as u64) + // Standard Error: 3_651 + .saturating_add(Weight::from_ref_time(2_021_172 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `i` is `[0, 1000]`. fn storage_write(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 120 nanoseconds. + Weight::from_ref_time(131_000 as u64) + // Standard Error: 348 + .saturating_add(Weight::from_ref_time(377_243 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } } // For backwards compatibility and tests impl WeightInfo for () { + /// The range of component `i` is `[0, 1000000]`. fn addition(_i: u32, ) -> Weight { - Weight::from_ref_time(103_000 as u64) + // Minimum execution time: 108 nanoseconds. + Weight::from_ref_time(137_610 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn subtraction(_i: u32, ) -> Weight { - Weight::from_ref_time(105_000 as u64) + // Minimum execution time: 104 nanoseconds. + Weight::from_ref_time(133_508 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn multiplication(_i: u32, ) -> Weight { - Weight::from_ref_time(113_000 as u64) + // Minimum execution time: 110 nanoseconds. + Weight::from_ref_time(140_230 as u64) } + /// The range of component `i` is `[0, 1000000]`. fn division(_i: u32, ) -> Weight { - Weight::from_ref_time(102_000 as u64) + // Minimum execution time: 96 nanoseconds. + Weight::from_ref_time(136_059 as u64) } + /// The range of component `i` is `[0, 100]`. fn hashing(_i: u32, ) -> Weight { - Weight::from_ref_time(20_865_902_000 as u64) + // Minimum execution time: 21_804_747 nanoseconds. + Weight::from_ref_time(22_013_681_386 as u64) } + /// The range of component `i` is `[0, 100]`. fn sr25519_verification(i: u32, ) -> Weight { - Weight::from_ref_time(319_000 as u64) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(47_171_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 136 nanoseconds. + Weight::from_ref_time(156_000 as u64) + // Standard Error: 4_531 + .saturating_add(Weight::from_ref_time(46_817_640 as u64).saturating_mul(i as u64)) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `i` is `[0, 1000]`. fn storage_read(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(2_110_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 125 nanoseconds. + Weight::from_ref_time(135_000 as u64) + // Standard Error: 3_651 + .saturating_add(Weight::from_ref_time(2_021_172 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) + /// The range of component `i` is `[0, 1000]`. fn storage_write(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(372_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 120 nanoseconds. + Weight::from_ref_time(131_000 as u64) + // Standard Error: 348 + .saturating_add(Weight::from_ref_time(377_243 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } } diff --git a/frame/bounties/src/weights.rs b/frame/bounties/src/weights.rs index 7255ece5c223a..71f4bf01899fb 100644 --- a/frame/bounties/src/weights.rs +++ b/frame/bounties/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/bounties/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -64,42 +67,51 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) - fn propose_bounty(_d: u32, ) -> Weight { - Weight::from_ref_time(28_903_000 as u64) + /// The range of component `d` is `[0, 300]`. + fn propose_bounty(d: u32, ) -> Weight { + // Minimum execution time: 33_514 nanoseconds. + Weight::from_ref_time(34_906_466 as u64) + // Standard Error: 226 + .saturating_add(Weight::from_ref_time(2_241 as u64).saturating_mul(d as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - Weight::from_ref_time(10_997_000 as u64) + // Minimum execution time: 14_129 nanoseconds. + Weight::from_ref_time(14_424_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(8_967_000 as u64) + // Minimum execution time: 13_675 nanoseconds. + Weight::from_ref_time(13_964_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(28_665_000 as u64) + // Minimum execution time: 38_926 nanoseconds. + Weight::from_ref_time(40_183_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(25_141_000 as u64) + // Minimum execution time: 33_774 nanoseconds. + Weight::from_ref_time(34_295_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - Weight::from_ref_time(21_295_000 as u64) + // Minimum execution time: 28_558 nanoseconds. + Weight::from_ref_time(29_293_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -108,7 +120,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - Weight::from_ref_time(67_951_000 as u64) + // Minimum execution time: 77_042 nanoseconds. + Weight::from_ref_time(77_730_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -117,7 +130,8 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - Weight::from_ref_time(33_654_000 as u64) + // Minimum execution time: 43_632 nanoseconds. + Weight::from_ref_time(44_196_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -126,23 +140,27 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - Weight::from_ref_time(50_582_000 as u64) + // Minimum execution time: 59_519 nanoseconds. + Weight::from_ref_time(59_967_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - Weight::from_ref_time(18_322_000 as u64) + // Minimum execution time: 25_263 nanoseconds. + Weight::from_ref_time(25_788_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Bounties BountyApprovals (r:1 w:1) - // Storage: Bounties Bounties (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: Bounties Bounties (r:2 w:2) + // Storage: System Account (r:4 w:4) + /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as u64).saturating_mul(b as u64)) + // Minimum execution time: 8_953 nanoseconds. + Weight::from_ref_time(23_242_227 as u64) + // Standard Error: 13_187 + .saturating_add(Weight::from_ref_time(26_727_999 as u64).saturating_mul(b as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(b as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -156,42 +174,51 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) // Storage: Bounties Bounties (r:0 w:1) - fn propose_bounty(_d: u32, ) -> Weight { - Weight::from_ref_time(28_903_000 as u64) + /// The range of component `d` is `[0, 300]`. + fn propose_bounty(d: u32, ) -> Weight { + // Minimum execution time: 33_514 nanoseconds. + Weight::from_ref_time(34_906_466 as u64) + // Standard Error: 226 + .saturating_add(Weight::from_ref_time(2_241 as u64).saturating_mul(d as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: Bounties BountyApprovals (r:1 w:1) fn approve_bounty() -> Weight { - Weight::from_ref_time(10_997_000 as u64) + // Minimum execution time: 14_129 nanoseconds. + Weight::from_ref_time(14_424_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(8_967_000 as u64) + // Minimum execution time: 13_675 nanoseconds. + Weight::from_ref_time(13_964_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(28_665_000 as u64) + // Minimum execution time: 38_926 nanoseconds. + Weight::from_ref_time(40_183_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(25_141_000 as u64) + // Minimum execution time: 33_774 nanoseconds. + Weight::from_ref_time(34_295_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:1) // Storage: ChildBounties ParentChildBounties (r:1 w:0) fn award_bounty() -> Weight { - Weight::from_ref_time(21_295_000 as u64) + // Minimum execution time: 28_558 nanoseconds. + Weight::from_ref_time(29_293_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -200,7 +227,8 @@ impl WeightInfo for () { // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn claim_bounty() -> Weight { - Weight::from_ref_time(67_951_000 as u64) + // Minimum execution time: 77_042 nanoseconds. + Weight::from_ref_time(77_730_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -209,7 +237,8 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_proposed() -> Weight { - Weight::from_ref_time(33_654_000 as u64) + // Minimum execution time: 43_632 nanoseconds. + Weight::from_ref_time(44_196_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -218,23 +247,27 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: Bounties BountyDescriptions (r:0 w:1) fn close_bounty_active() -> Weight { - Weight::from_ref_time(50_582_000 as u64) + // Minimum execution time: 59_519 nanoseconds. + Weight::from_ref_time(59_967_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Bounties Bounties (r:1 w:1) fn extend_bounty_expiry() -> Weight { - Weight::from_ref_time(18_322_000 as u64) + // Minimum execution time: 25_263 nanoseconds. + Weight::from_ref_time(25_788_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Bounties BountyApprovals (r:1 w:1) - // Storage: Bounties Bounties (r:1 w:1) - // Storage: System Account (r:2 w:2) + // Storage: Bounties Bounties (r:2 w:2) + // Storage: System Account (r:4 w:4) + /// The range of component `b` is `[0, 100]`. fn spend_funds(b: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(29_233_000 as u64).saturating_mul(b as u64)) + // Minimum execution time: 8_953 nanoseconds. + Weight::from_ref_time(23_242_227 as u64) + // Standard Error: 13_187 + .saturating_add(Weight::from_ref_time(26_727_999 as u64).saturating_mul(b as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(b as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) diff --git a/frame/child-bounties/src/weights.rs b/frame/child-bounties/src/weights.rs index 564fb0ae23d15..235c84320effa 100644 --- a/frame/child-bounties/src/weights.rs +++ b/frame/child-bounties/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_child_bounties //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/child-bounties/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -63,10 +66,12 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBountyCount (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) + /// The range of component `d` is `[0, 300]`. fn add_child_bounty(d: u32, ) -> Weight { - Weight::from_ref_time(51_064_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(d as u64)) + // Minimum execution time: 59_121 nanoseconds. + Weight::from_ref_time(60_212_235 as u64) + // Standard Error: 149 + .saturating_add(Weight::from_ref_time(412 as u64).saturating_mul(d as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -74,7 +79,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(15_286_000 as u64) + // Minimum execution time: 20_785 nanoseconds. + Weight::from_ref_time(21_000_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -82,7 +88,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(29_929_000 as u64) + // Minimum execution time: 37_874 nanoseconds. + Weight::from_ref_time(38_322_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -90,14 +97,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(32_449_000 as u64) + // Minimum execution time: 43_385 nanoseconds. + Weight::from_ref_time(43_774_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - Weight::from_ref_time(23_793_000 as u64) + // Minimum execution time: 31_390 nanoseconds. + Weight::from_ref_time(31_802_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -106,7 +115,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - Weight::from_ref_time(67_529_000 as u64) + // Minimum execution time: 74_956 nanoseconds. + Weight::from_ref_time(75_850_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -117,7 +127,8 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - Weight::from_ref_time(48_436_000 as u64) + // Minimum execution time: 57_215 nanoseconds. + Weight::from_ref_time(58_285_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -128,7 +139,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - Weight::from_ref_time(58_044_000 as u64) + // Minimum execution time: 67_641 nanoseconds. + Weight::from_ref_time(69_184_000 as u64) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -142,10 +154,12 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBountyCount (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) // Storage: ChildBounties ChildBounties (r:0 w:1) + /// The range of component `d` is `[0, 300]`. fn add_child_bounty(d: u32, ) -> Weight { - Weight::from_ref_time(51_064_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(d as u64)) + // Minimum execution time: 59_121 nanoseconds. + Weight::from_ref_time(60_212_235 as u64) + // Standard Error: 149 + .saturating_add(Weight::from_ref_time(412 as u64).saturating_mul(d as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -153,7 +167,8 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: ChildBounties ChildrenCuratorFees (r:1 w:1) fn propose_curator() -> Weight { - Weight::from_ref_time(15_286_000 as u64) + // Minimum execution time: 20_785 nanoseconds. + Weight::from_ref_time(21_000_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -161,7 +176,8 @@ impl WeightInfo for () { // Storage: ChildBounties ChildBounties (r:1 w:1) // Storage: System Account (r:1 w:1) fn accept_curator() -> Weight { - Weight::from_ref_time(29_929_000 as u64) + // Minimum execution time: 37_874 nanoseconds. + Weight::from_ref_time(38_322_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -169,14 +185,16 @@ impl WeightInfo for () { // Storage: Bounties Bounties (r:1 w:0) // Storage: System Account (r:1 w:1) fn unassign_curator() -> Weight { - Weight::from_ref_time(32_449_000 as u64) + // Minimum execution time: 43_385 nanoseconds. + Weight::from_ref_time(43_774_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Bounties Bounties (r:1 w:0) // Storage: ChildBounties ChildBounties (r:1 w:1) fn award_child_bounty() -> Weight { - Weight::from_ref_time(23_793_000 as u64) + // Minimum execution time: 31_390 nanoseconds. + Weight::from_ref_time(31_802_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -185,7 +203,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn claim_child_bounty() -> Weight { - Weight::from_ref_time(67_529_000 as u64) + // Minimum execution time: 74_956 nanoseconds. + Weight::from_ref_time(75_850_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -196,7 +215,8 @@ impl WeightInfo for () { // Storage: System Account (r:2 w:2) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_added() -> Weight { - Weight::from_ref_time(48_436_000 as u64) + // Minimum execution time: 57_215 nanoseconds. + Weight::from_ref_time(58_285_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -207,7 +227,8 @@ impl WeightInfo for () { // Storage: ChildBounties ParentChildBounties (r:1 w:1) // Storage: ChildBounties ChildBountyDescriptions (r:0 w:1) fn close_child_bounty_active() -> Weight { - Weight::from_ref_time(58_044_000 as u64) + // Minimum execution time: 67_641 nanoseconds. + Weight::from_ref_time(69_184_000 as u64) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } diff --git a/frame/collective/src/weights.rs b/frame/collective/src/weights.rs index c7237911da6ab..052550de7bd7e 100644 --- a/frame/collective/src/weights.rs +++ b/frame/collective/src/weights.rs @@ -18,25 +18,24 @@ //! Autogenerated weights for pallet_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-11-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_collective // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_collective -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/collective/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -71,12 +70,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Minimum execution time: 18_852 nanoseconds. - Weight::from_ref_time(19_138_000 as u64) - // Standard Error: 65_564 - .saturating_add(Weight::from_ref_time(5_276_957 as u64).saturating_mul(m as u64)) - // Standard Error: 65_564 - .saturating_add(Weight::from_ref_time(7_655_866 as u64).saturating_mul(p as u64)) + // Minimum execution time: 18_895 nanoseconds. + Weight::from_ref_time(19_254_000 as u64) + // Standard Error: 63_540 + .saturating_add(Weight::from_ref_time(5_061_801 as u64).saturating_mul(m as u64)) + // Standard Error: 63_540 + .saturating_add(Weight::from_ref_time(7_588_981 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(p as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -86,12 +85,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 23_081 nanoseconds. - Weight::from_ref_time(22_608_754 as u64) - // Standard Error: 35 - .saturating_add(Weight::from_ref_time(1_722 as u64).saturating_mul(b as u64)) - // Standard Error: 370 - .saturating_add(Weight::from_ref_time(23_442 as u64).saturating_mul(m as u64)) + // Minimum execution time: 24_469 nanoseconds. + Weight::from_ref_time(23_961_134 as u64) + // Standard Error: 43 + .saturating_add(Weight::from_ref_time(1_677 as u64).saturating_mul(b as u64)) + // Standard Error: 450 + .saturating_add(Weight::from_ref_time(18_645 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) @@ -99,12 +98,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 25_477 nanoseconds. - Weight::from_ref_time(25_478_243 as u64) - // Standard Error: 47 - .saturating_add(Weight::from_ref_time(1_346 as u64).saturating_mul(b as u64)) - // Standard Error: 493 - .saturating_add(Weight::from_ref_time(27_323 as u64).saturating_mul(m as u64)) + // Minimum execution time: 26_476 nanoseconds. + Weight::from_ref_time(25_829_298 as u64) + // Standard Error: 49 + .saturating_add(Weight::from_ref_time(1_741 as u64).saturating_mul(b as u64)) + // Standard Error: 515 + .saturating_add(Weight::from_ref_time(29_436 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) @@ -116,14 +115,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 33_037 nanoseconds. - Weight::from_ref_time(29_297_056 as u64) - // Standard Error: 180 - .saturating_add(Weight::from_ref_time(5_899 as u64).saturating_mul(b as u64)) - // Standard Error: 1_884 - .saturating_add(Weight::from_ref_time(33_511 as u64).saturating_mul(m as u64)) - // Standard Error: 1_860 - .saturating_add(Weight::from_ref_time(195_416 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_585 nanoseconds. + Weight::from_ref_time(33_092_289 as u64) + // Standard Error: 173 + .saturating_add(Weight::from_ref_time(4_266 as u64).saturating_mul(b as u64)) + // Standard Error: 1_812 + .saturating_add(Weight::from_ref_time(29_262 as u64).saturating_mul(m as u64)) + // Standard Error: 1_789 + .saturating_add(Weight::from_ref_time(181_285 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -131,10 +130,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Voting (r:1 w:1) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - // Minimum execution time: 35_284 nanoseconds. - Weight::from_ref_time(38_865_202 as u64) - // Standard Error: 2_322 - .saturating_add(Weight::from_ref_time(53_753 as u64).saturating_mul(m as u64)) + // Minimum execution time: 36_374 nanoseconds. + Weight::from_ref_time(38_950_243 as u64) + // Standard Error: 2_583 + .saturating_add(Weight::from_ref_time(65_345 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -145,12 +144,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 35_773 nanoseconds. - Weight::from_ref_time(36_651_208 as u64) - // Standard Error: 1_142 - .saturating_add(Weight::from_ref_time(27_095 as u64).saturating_mul(m as u64)) - // Standard Error: 1_114 - .saturating_add(Weight::from_ref_time(169_747 as u64).saturating_mul(p as u64)) + // Minimum execution time: 36_066 nanoseconds. + Weight::from_ref_time(38_439_655 as u64) + // Standard Error: 1_281 + .saturating_add(Weight::from_ref_time(17_045 as u64).saturating_mul(m as u64)) + // Standard Error: 1_249 + .saturating_add(Weight::from_ref_time(164_998 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -162,14 +161,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 46_290 nanoseconds. - Weight::from_ref_time(46_176_864 as u64) - // Standard Error: 147 - .saturating_add(Weight::from_ref_time(2_318 as u64).saturating_mul(b as u64)) - // Standard Error: 1_560 - .saturating_add(Weight::from_ref_time(31_428 as u64).saturating_mul(m as u64)) - // Standard Error: 1_520 - .saturating_add(Weight::from_ref_time(171_822 as u64).saturating_mul(p as u64)) + // Minimum execution time: 47_753 nanoseconds. + Weight::from_ref_time(46_507_829 as u64) + // Standard Error: 149 + .saturating_add(Weight::from_ref_time(2_159 as u64).saturating_mul(b as u64)) + // Standard Error: 1_581 + .saturating_add(Weight::from_ref_time(37_842 as u64).saturating_mul(m as u64)) + // Standard Error: 1_541 + .saturating_add(Weight::from_ref_time(173_395 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -181,12 +180,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 38_421 nanoseconds. - Weight::from_ref_time(40_586_165 as u64) - // Standard Error: 1_853 - .saturating_add(Weight::from_ref_time(20_063 as u64).saturating_mul(m as u64)) - // Standard Error: 1_807 - .saturating_add(Weight::from_ref_time(151_494 as u64).saturating_mul(p as u64)) + // Minimum execution time: 39_416 nanoseconds. + Weight::from_ref_time(39_610_161 as u64) + // Standard Error: 1_231 + .saturating_add(Weight::from_ref_time(32_991 as u64).saturating_mul(m as u64)) + // Standard Error: 1_200 + .saturating_add(Weight::from_ref_time(170_773 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -199,14 +198,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 48_281 nanoseconds. - Weight::from_ref_time(47_511_499 as u64) - // Standard Error: 135 - .saturating_add(Weight::from_ref_time(1_900 as u64).saturating_mul(b as u64)) - // Standard Error: 1_429 - .saturating_add(Weight::from_ref_time(37_612 as u64).saturating_mul(m as u64)) - // Standard Error: 1_393 - .saturating_add(Weight::from_ref_time(180_682 as u64).saturating_mul(p as u64)) + // Minimum execution time: 49_840 nanoseconds. + Weight::from_ref_time(48_542_914 as u64) + // Standard Error: 136 + .saturating_add(Weight::from_ref_time(2_650 as u64).saturating_mul(b as u64)) + // Standard Error: 1_442 + .saturating_add(Weight::from_ref_time(37_898 as u64).saturating_mul(m as u64)) + // Standard Error: 1_406 + .saturating_add(Weight::from_ref_time(182_176 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -215,10 +214,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Council ProposalOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - // Minimum execution time: 22_534 nanoseconds. - Weight::from_ref_time(25_722_688 as u64) - // Standard Error: 1_622 - .saturating_add(Weight::from_ref_time(166_308 as u64).saturating_mul(p as u64)) + // Minimum execution time: 24_199 nanoseconds. + Weight::from_ref_time(26_869_176 as u64) + // Standard Error: 1_609 + .saturating_add(Weight::from_ref_time(163_341 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -234,12 +233,12 @@ impl WeightInfo for () { /// The range of component `n` is `[0, 100]`. /// The range of component `p` is `[0, 100]`. fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Minimum execution time: 18_852 nanoseconds. - Weight::from_ref_time(19_138_000 as u64) - // Standard Error: 65_564 - .saturating_add(Weight::from_ref_time(5_276_957 as u64).saturating_mul(m as u64)) - // Standard Error: 65_564 - .saturating_add(Weight::from_ref_time(7_655_866 as u64).saturating_mul(p as u64)) + // Minimum execution time: 18_895 nanoseconds. + Weight::from_ref_time(19_254_000 as u64) + // Standard Error: 63_540 + .saturating_add(Weight::from_ref_time(5_061_801 as u64).saturating_mul(m as u64)) + // Standard Error: 63_540 + .saturating_add(Weight::from_ref_time(7_588_981 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(p as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) @@ -249,12 +248,12 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[1, 100]`. fn execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 23_081 nanoseconds. - Weight::from_ref_time(22_608_754 as u64) - // Standard Error: 35 - .saturating_add(Weight::from_ref_time(1_722 as u64).saturating_mul(b as u64)) - // Standard Error: 370 - .saturating_add(Weight::from_ref_time(23_442 as u64).saturating_mul(m as u64)) + // Minimum execution time: 24_469 nanoseconds. + Weight::from_ref_time(23_961_134 as u64) + // Standard Error: 43 + .saturating_add(Weight::from_ref_time(1_677 as u64).saturating_mul(b as u64)) + // Standard Error: 450 + .saturating_add(Weight::from_ref_time(18_645 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Council Members (r:1 w:0) @@ -262,12 +261,12 @@ impl WeightInfo for () { /// The range of component `b` is `[1, 1024]`. /// The range of component `m` is `[1, 100]`. fn propose_execute(b: u32, m: u32, ) -> Weight { - // Minimum execution time: 25_477 nanoseconds. - Weight::from_ref_time(25_478_243 as u64) - // Standard Error: 47 - .saturating_add(Weight::from_ref_time(1_346 as u64).saturating_mul(b as u64)) - // Standard Error: 493 - .saturating_add(Weight::from_ref_time(27_323 as u64).saturating_mul(m as u64)) + // Minimum execution time: 26_476 nanoseconds. + Weight::from_ref_time(25_829_298 as u64) + // Standard Error: 49 + .saturating_add(Weight::from_ref_time(1_741 as u64).saturating_mul(b as u64)) + // Standard Error: 515 + .saturating_add(Weight::from_ref_time(29_436 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) } // Storage: Council Members (r:1 w:0) @@ -279,14 +278,14 @@ impl WeightInfo for () { /// The range of component `m` is `[2, 100]`. /// The range of component `p` is `[1, 100]`. fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 33_037 nanoseconds. - Weight::from_ref_time(29_297_056 as u64) - // Standard Error: 180 - .saturating_add(Weight::from_ref_time(5_899 as u64).saturating_mul(b as u64)) - // Standard Error: 1_884 - .saturating_add(Weight::from_ref_time(33_511 as u64).saturating_mul(m as u64)) - // Standard Error: 1_860 - .saturating_add(Weight::from_ref_time(195_416 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_585 nanoseconds. + Weight::from_ref_time(33_092_289 as u64) + // Standard Error: 173 + .saturating_add(Weight::from_ref_time(4_266 as u64).saturating_mul(b as u64)) + // Standard Error: 1_812 + .saturating_add(Weight::from_ref_time(29_262 as u64).saturating_mul(m as u64)) + // Standard Error: 1_789 + .saturating_add(Weight::from_ref_time(181_285 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -294,10 +293,10 @@ impl WeightInfo for () { // Storage: Council Voting (r:1 w:1) /// The range of component `m` is `[5, 100]`. fn vote(m: u32, ) -> Weight { - // Minimum execution time: 35_284 nanoseconds. - Weight::from_ref_time(38_865_202 as u64) - // Standard Error: 2_322 - .saturating_add(Weight::from_ref_time(53_753 as u64).saturating_mul(m as u64)) + // Minimum execution time: 36_374 nanoseconds. + Weight::from_ref_time(38_950_243 as u64) + // Standard Error: 2_583 + .saturating_add(Weight::from_ref_time(65_345 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -308,12 +307,12 @@ impl WeightInfo for () { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 35_773 nanoseconds. - Weight::from_ref_time(36_651_208 as u64) - // Standard Error: 1_142 - .saturating_add(Weight::from_ref_time(27_095 as u64).saturating_mul(m as u64)) - // Standard Error: 1_114 - .saturating_add(Weight::from_ref_time(169_747 as u64).saturating_mul(p as u64)) + // Minimum execution time: 36_066 nanoseconds. + Weight::from_ref_time(38_439_655 as u64) + // Standard Error: 1_281 + .saturating_add(Weight::from_ref_time(17_045 as u64).saturating_mul(m as u64)) + // Standard Error: 1_249 + .saturating_add(Weight::from_ref_time(164_998 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -325,14 +324,14 @@ impl WeightInfo for () { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 46_290 nanoseconds. - Weight::from_ref_time(46_176_864 as u64) - // Standard Error: 147 - .saturating_add(Weight::from_ref_time(2_318 as u64).saturating_mul(b as u64)) - // Standard Error: 1_560 - .saturating_add(Weight::from_ref_time(31_428 as u64).saturating_mul(m as u64)) - // Standard Error: 1_520 - .saturating_add(Weight::from_ref_time(171_822 as u64).saturating_mul(p as u64)) + // Minimum execution time: 47_753 nanoseconds. + Weight::from_ref_time(46_507_829 as u64) + // Standard Error: 149 + .saturating_add(Weight::from_ref_time(2_159 as u64).saturating_mul(b as u64)) + // Standard Error: 1_581 + .saturating_add(Weight::from_ref_time(37_842 as u64).saturating_mul(m as u64)) + // Standard Error: 1_541 + .saturating_add(Weight::from_ref_time(173_395 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -344,12 +343,12 @@ impl WeightInfo for () { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Minimum execution time: 38_421 nanoseconds. - Weight::from_ref_time(40_586_165 as u64) - // Standard Error: 1_853 - .saturating_add(Weight::from_ref_time(20_063 as u64).saturating_mul(m as u64)) - // Standard Error: 1_807 - .saturating_add(Weight::from_ref_time(151_494 as u64).saturating_mul(p as u64)) + // Minimum execution time: 39_416 nanoseconds. + Weight::from_ref_time(39_610_161 as u64) + // Standard Error: 1_231 + .saturating_add(Weight::from_ref_time(32_991 as u64).saturating_mul(m as u64)) + // Standard Error: 1_200 + .saturating_add(Weight::from_ref_time(170_773 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -362,14 +361,14 @@ impl WeightInfo for () { /// The range of component `m` is `[4, 100]`. /// The range of component `p` is `[1, 100]`. fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Minimum execution time: 48_281 nanoseconds. - Weight::from_ref_time(47_511_499 as u64) - // Standard Error: 135 - .saturating_add(Weight::from_ref_time(1_900 as u64).saturating_mul(b as u64)) - // Standard Error: 1_429 - .saturating_add(Weight::from_ref_time(37_612 as u64).saturating_mul(m as u64)) - // Standard Error: 1_393 - .saturating_add(Weight::from_ref_time(180_682 as u64).saturating_mul(p as u64)) + // Minimum execution time: 49_840 nanoseconds. + Weight::from_ref_time(48_542_914 as u64) + // Standard Error: 136 + .saturating_add(Weight::from_ref_time(2_650 as u64).saturating_mul(b as u64)) + // Standard Error: 1_442 + .saturating_add(Weight::from_ref_time(37_898 as u64).saturating_mul(m as u64)) + // Standard Error: 1_406 + .saturating_add(Weight::from_ref_time(182_176 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -378,10 +377,10 @@ impl WeightInfo for () { // Storage: Council ProposalOf (r:0 w:1) /// The range of component `p` is `[1, 100]`. fn disapprove_proposal(p: u32, ) -> Weight { - // Minimum execution time: 22_534 nanoseconds. - Weight::from_ref_time(25_722_688 as u64) - // Standard Error: 1_622 - .saturating_add(Weight::from_ref_time(166_308 as u64).saturating_mul(p as u64)) + // Minimum execution time: 24_199 nanoseconds. + Weight::from_ref_time(26_869_176 as u64) + // Standard Error: 1_609 + .saturating_add(Weight::from_ref_time(163_341 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index c42cc41a0bd9c..8632124c0200d 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_contracts //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-28, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 @@ -29,14 +29,14 @@ // --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet-contracts +// --pallet=pallet_contracts // --extrinsic=* -// --heap-pages=4096 // --execution=wasm // --wasm-execution=compiled -// --output=frame/contracts/src/weights.rs -// --header=HEADER-APACHE2 -// --template=.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --output=./frame/contracts/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -167,17 +167,17 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - // Minimum execution time: 2_904 nanoseconds. - Weight::from_ref_time(3_036_000 as u64) + // Minimum execution time: 3_064 nanoseconds. + Weight::from_ref_time(3_236_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - // Minimum execution time: 14_553 nanoseconds. - Weight::from_ref_time(14_161_469 as u64) - // Standard Error: 593 - .saturating_add(Weight::from_ref_time(894_982 as u64).saturating_mul(k as u64)) + // Minimum execution time: 15_492 nanoseconds. + Weight::from_ref_time(14_309_233 as u64) + // Standard Error: 649 + .saturating_add(Weight::from_ref_time(930_078 as u64).saturating_mul(k as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -185,10 +185,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Minimum execution time: 3_121 nanoseconds. - Weight::from_ref_time(14_262_977 as u64) - // Standard Error: 3_272 - .saturating_add(Weight::from_ref_time(1_232_597 as u64).saturating_mul(q as u64)) + // Minimum execution time: 3_240 nanoseconds. + Weight::from_ref_time(15_076_559 as u64) + // Standard Error: 3_337 + .saturating_add(Weight::from_ref_time(1_244_348 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -196,10 +196,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - // Minimum execution time: 28_625 nanoseconds. - Weight::from_ref_time(31_685_032 as u64) - // Standard Error: 57 - .saturating_add(Weight::from_ref_time(44_024 as u64).saturating_mul(c as u64)) + // Minimum execution time: 22_524 nanoseconds. + Weight::from_ref_time(19_939_078 as u64) + // Standard Error: 43 + .saturating_add(Weight::from_ref_time(43_802 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -210,10 +210,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - // Minimum execution time: 355_167 nanoseconds. - Weight::from_ref_time(330_201_049 as u64) - // Standard Error: 64 - .saturating_add(Weight::from_ref_time(45_952 as u64).saturating_mul(c as u64)) + // Minimum execution time: 261_039 nanoseconds. + Weight::from_ref_time(228_709_853 as u64) + // Standard Error: 105 + .saturating_add(Weight::from_ref_time(47_449 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -228,12 +228,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - // Minimum execution time: 2_165_939 nanoseconds. - Weight::from_ref_time(346_673_842 as u64) - // Standard Error: 73 - .saturating_add(Weight::from_ref_time(107_020 as u64).saturating_mul(c as u64)) + // Minimum execution time: 2_054_867 nanoseconds. + Weight::from_ref_time(259_090_306 as u64) + // Standard Error: 72 + .saturating_add(Weight::from_ref_time(107_519 as u64).saturating_mul(c as u64)) // Standard Error: 4 - .saturating_add(Weight::from_ref_time(1_754 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_736 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(9 as u64)) } @@ -246,10 +246,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - // Minimum execution time: 262_523 nanoseconds. - Weight::from_ref_time(255_633_789 as u64) + // Minimum execution time: 213_409 nanoseconds. + Weight::from_ref_time(205_300_495 as u64) // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_485 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_479 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -259,8 +259,8 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - // Minimum execution time: 233_124 nanoseconds. - Weight::from_ref_time(233_826_000 as u64) + // Minimum execution time: 183_317 nanoseconds. + Weight::from_ref_time(184_465_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -270,10 +270,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - // Minimum execution time: 61_170 nanoseconds. - Weight::from_ref_time(61_653_502 as u64) - // Standard Error: 47 - .saturating_add(Weight::from_ref_time(45_553 as u64).saturating_mul(c as u64)) + // Minimum execution time: 56_187 nanoseconds. + Weight::from_ref_time(60_636_621 as u64) + // Standard Error: 46 + .saturating_add(Weight::from_ref_time(45_734 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -282,8 +282,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - // Minimum execution time: 37_278 nanoseconds. - Weight::from_ref_time(37_822_000 as u64) + // Minimum execution time: 38_433 nanoseconds. + Weight::from_ref_time(38_917_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -291,8 +291,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - // Minimum execution time: 40_329 nanoseconds. - Weight::from_ref_time(41_017_000 as u64) + // Minimum execution time: 41_507 nanoseconds. + Weight::from_ref_time(41_938_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -303,10 +303,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - // Minimum execution time: 345_717 nanoseconds. - Weight::from_ref_time(347_610_656 as u64) - // Standard Error: 37_159 - .saturating_add(Weight::from_ref_time(35_506_783 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_628 nanoseconds. + Weight::from_ref_time(251_997_923 as u64) + // Standard Error: 26_157 + .saturating_add(Weight::from_ref_time(35_002_004 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -317,10 +317,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - // Minimum execution time: 346_158 nanoseconds. - Weight::from_ref_time(289_982_824 as u64) - // Standard Error: 426_543 - .saturating_add(Weight::from_ref_time(202_123_887 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_390 nanoseconds. + Weight::from_ref_time(193_793_052 as u64) + // Standard Error: 430_292 + .saturating_add(Weight::from_ref_time(211_029_686 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -332,10 +332,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 349_141 nanoseconds. - Weight::from_ref_time(297_477_406 as u64) - // Standard Error: 403_411 - .saturating_add(Weight::from_ref_time(258_999_975 as u64).saturating_mul(r as u64)) + // Minimum execution time: 252_469 nanoseconds. + Weight::from_ref_time(201_438_856 as u64) + // Standard Error: 420_040 + .saturating_add(Weight::from_ref_time(267_340_744 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -347,10 +347,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 347_419 nanoseconds. - Weight::from_ref_time(350_048_279 as u64) - // Standard Error: 22_511 - .saturating_add(Weight::from_ref_time(38_799_515 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_154 nanoseconds. + Weight::from_ref_time(254_831_062 as u64) + // Standard Error: 37_843 + .saturating_add(Weight::from_ref_time(38_579_567 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -361,10 +361,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - // Minimum execution time: 341_634 nanoseconds. - Weight::from_ref_time(344_394_100 as u64) - // Standard Error: 12_563 - .saturating_add(Weight::from_ref_time(14_529_298 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_875 nanoseconds. + Weight::from_ref_time(250_312_587 as u64) + // Standard Error: 17_901 + .saturating_add(Weight::from_ref_time(15_153_431 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -375,10 +375,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - // Minimum execution time: 346_246 nanoseconds. - Weight::from_ref_time(349_406_095 as u64) - // Standard Error: 28_842 - .saturating_add(Weight::from_ref_time(35_066_080 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_097 nanoseconds. + Weight::from_ref_time(252_157_442 as u64) + // Standard Error: 38_426 + .saturating_add(Weight::from_ref_time(35_084_205 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -389,10 +389,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - // Minimum execution time: 346_003 nanoseconds. - Weight::from_ref_time(349_076_713 as u64) - // Standard Error: 30_507 - .saturating_add(Weight::from_ref_time(34_662_539 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_034 nanoseconds. + Weight::from_ref_time(252_189_233 as u64) + // Standard Error: 33_081 + .saturating_add(Weight::from_ref_time(34_764_160 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -403,10 +403,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - // Minimum execution time: 346_063 nanoseconds. - Weight::from_ref_time(350_054_605 as u64) - // Standard Error: 68_711 - .saturating_add(Weight::from_ref_time(107_584_776 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_587 nanoseconds. + Weight::from_ref_time(258_565_111 as u64) + // Standard Error: 75_715 + .saturating_add(Weight::from_ref_time(109_687_486 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -417,10 +417,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - // Minimum execution time: 346_057 nanoseconds. - Weight::from_ref_time(349_489_403 as u64) - // Standard Error: 38_116 - .saturating_add(Weight::from_ref_time(34_750_791 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_735 nanoseconds. + Weight::from_ref_time(252_875_784 as u64) + // Standard Error: 42_024 + .saturating_add(Weight::from_ref_time(34_555_983 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -431,10 +431,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - // Minimum execution time: 345_722 nanoseconds. - Weight::from_ref_time(348_659_357 as u64) - // Standard Error: 33_077 - .saturating_add(Weight::from_ref_time(34_845_216 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_025 nanoseconds. + Weight::from_ref_time(255_212_046 as u64) + // Standard Error: 41_865 + .saturating_add(Weight::from_ref_time(34_332_291 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -445,10 +445,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - // Minimum execution time: 345_805 nanoseconds. - Weight::from_ref_time(347_579_287 as u64) - // Standard Error: 28_498 - .saturating_add(Weight::from_ref_time(34_529_480 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_641 nanoseconds. + Weight::from_ref_time(252_978_686 as u64) + // Standard Error: 25_820 + .saturating_add(Weight::from_ref_time(34_175_386 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -459,10 +459,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - // Minimum execution time: 345_943 nanoseconds. - Weight::from_ref_time(348_941_819 as u64) - // Standard Error: 32_278 - .saturating_add(Weight::from_ref_time(34_580_817 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_871 nanoseconds. + Weight::from_ref_time(253_237_931 as u64) + // Standard Error: 30_986 + .saturating_add(Weight::from_ref_time(34_305_155 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -474,10 +474,10 @@ impl WeightInfo for SubstrateWeight { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - // Minimum execution time: 345_753 nanoseconds. - Weight::from_ref_time(354_733_779 as u64) - // Standard Error: 71_767 - .saturating_add(Weight::from_ref_time(105_098_275 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_787 nanoseconds. + Weight::from_ref_time(258_457_094 as u64) + // Standard Error: 75_835 + .saturating_add(Weight::from_ref_time(107_115_666 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -488,10 +488,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - // Minimum execution time: 222_866 nanoseconds. - Weight::from_ref_time(226_212_157 as u64) - // Standard Error: 17_326 - .saturating_add(Weight::from_ref_time(15_544_104 as u64).saturating_mul(r as u64)) + // Minimum execution time: 171_667 nanoseconds. + Weight::from_ref_time(174_687_863 as u64) + // Standard Error: 34_576 + .saturating_add(Weight::from_ref_time(15_895_674 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -502,10 +502,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - // Minimum execution time: 345_704 nanoseconds. - Weight::from_ref_time(348_160_999 as u64) - // Standard Error: 31_560 - .saturating_add(Weight::from_ref_time(32_888_428 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_610 nanoseconds. + Weight::from_ref_time(251_476_758 as u64) + // Standard Error: 39_422 + .saturating_add(Weight::from_ref_time(32_870_429 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -516,10 +516,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 380_814 nanoseconds. - Weight::from_ref_time(403_973_055 as u64) - // Standard Error: 2_690 - .saturating_add(Weight::from_ref_time(9_597_443 as u64).saturating_mul(n as u64)) + // Minimum execution time: 285_154 nanoseconds. + Weight::from_ref_time(307_768_636 as u64) + // Standard Error: 2_701 + .saturating_add(Weight::from_ref_time(9_544_122 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -530,10 +530,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - // Minimum execution time: 340_930 nanoseconds. - Weight::from_ref_time(342_218_828 as u64) - // Standard Error: 80_578 - .saturating_add(Weight::from_ref_time(1_547_971 as u64).saturating_mul(r as u64)) + // Minimum execution time: 244_810 nanoseconds. + Weight::from_ref_time(247_576_385 as u64) + // Standard Error: 80_494 + .saturating_add(Weight::from_ref_time(2_052_714 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -544,10 +544,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 343_035 nanoseconds. - Weight::from_ref_time(345_347_882 as u64) - // Standard Error: 509 - .saturating_add(Weight::from_ref_time(228_526 as u64).saturating_mul(n as u64)) + // Minimum execution time: 248_049 nanoseconds. + Weight::from_ref_time(250_148_025 as u64) + // Standard Error: 339 + .saturating_add(Weight::from_ref_time(185_344 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -560,10 +560,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - // Minimum execution time: 344_997 nanoseconds. - Weight::from_ref_time(346_431_655 as u64) - // Standard Error: 79_924 - .saturating_add(Weight::from_ref_time(53_530_044 as u64).saturating_mul(r as u64)) + // Minimum execution time: 246_620 nanoseconds. + Weight::from_ref_time(250_752_277 as u64) + // Standard Error: 84_300 + .saturating_add(Weight::from_ref_time(54_264_722 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -577,10 +577,10 @@ impl WeightInfo for SubstrateWeight { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - // Minimum execution time: 345_926 nanoseconds. - Weight::from_ref_time(351_620_774 as u64) - // Standard Error: 69_858 - .saturating_add(Weight::from_ref_time(130_846_895 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_065 nanoseconds. + Weight::from_ref_time(252_419_902 as u64) + // Standard Error: 84_223 + .saturating_add(Weight::from_ref_time(134_454_079 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -591,10 +591,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - // Minimum execution time: 341_582 nanoseconds. - Weight::from_ref_time(353_141_157 as u64) - // Standard Error: 99_764 - .saturating_add(Weight::from_ref_time(231_149_152 as u64).saturating_mul(r as u64)) + // Minimum execution time: 246_588 nanoseconds. + Weight::from_ref_time(261_525_328 as u64) + // Standard Error: 97_732 + .saturating_add(Weight::from_ref_time(235_555_878 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -606,12 +606,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Minimum execution time: 1_269_467 nanoseconds. - Weight::from_ref_time(566_371_969 as u64) - // Standard Error: 435_714 - .saturating_add(Weight::from_ref_time(180_441_805 as u64).saturating_mul(t as u64)) - // Standard Error: 119_668 - .saturating_add(Weight::from_ref_time(70_111_002 as u64).saturating_mul(n as u64)) + // Minimum execution time: 1_171_144 nanoseconds. + Weight::from_ref_time(490_333_337 as u64) + // Standard Error: 404_664 + .saturating_add(Weight::from_ref_time(173_683_265 as u64).saturating_mul(t as u64)) + // Standard Error: 111_140 + .saturating_add(Weight::from_ref_time(66_081_822 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -624,20 +624,20 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - // Minimum execution time: 231_528 nanoseconds. - Weight::from_ref_time(235_676_870 as u64) - // Standard Error: 20_851 - .saturating_add(Weight::from_ref_time(26_215_920 as u64).saturating_mul(r as u64)) + // Minimum execution time: 178_822 nanoseconds. + Weight::from_ref_time(181_571_518 as u64) + // Standard Error: 19_207 + .saturating_add(Weight::from_ref_time(26_784_712 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - // Minimum execution time: 346_509 nanoseconds. - Weight::from_ref_time(301_173_874 as u64) - // Standard Error: 436_884 - .saturating_add(Weight::from_ref_time(415_194_325 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_737 nanoseconds. + Weight::from_ref_time(208_095_467 as u64) + // Standard Error: 417_236 + .saturating_add(Weight::from_ref_time(430_088_574 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -646,10 +646,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Minimum execution time: 500_452 nanoseconds. - Weight::from_ref_time(641_506_544 as u64) - // Standard Error: 1_308_973 - .saturating_add(Weight::from_ref_time(98_769_541 as u64).saturating_mul(n as u64)) + // Minimum execution time: 400_055 nanoseconds. + Weight::from_ref_time(551_666_883 as u64) + // Standard Error: 1_379_652 + .saturating_add(Weight::from_ref_time(94_069_118 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(52 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(50 as u64)) @@ -658,10 +658,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Minimum execution time: 500_941 nanoseconds. - Weight::from_ref_time(610_446_049 as u64) - // Standard Error: 1_018_173 - .saturating_add(Weight::from_ref_time(65_568_627 as u64).saturating_mul(n as u64)) + // Minimum execution time: 400_370 nanoseconds. + Weight::from_ref_time(521_380_000 as u64) + // Standard Error: 1_112_618 + .saturating_add(Weight::from_ref_time(68_664_898 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(49 as u64)) @@ -670,10 +670,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - // Minimum execution time: 345_820 nanoseconds. - Weight::from_ref_time(302_335_076 as u64) - // Standard Error: 472_676 - .saturating_add(Weight::from_ref_time(395_286_593 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_711 nanoseconds. + Weight::from_ref_time(212_629_798 as u64) + // Standard Error: 378_159 + .saturating_add(Weight::from_ref_time(415_326_230 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -682,10 +682,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 457_830 nanoseconds. - Weight::from_ref_time(582_524_868 as u64) - // Standard Error: 1_161_813 - .saturating_add(Weight::from_ref_time(67_291_419 as u64).saturating_mul(n as u64)) + // Minimum execution time: 365_702 nanoseconds. + Weight::from_ref_time(499_337_686 as u64) + // Standard Error: 1_232_330 + .saturating_add(Weight::from_ref_time(70_648_878 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(48 as u64)) @@ -694,10 +694,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - // Minimum execution time: 348_249 nanoseconds. - Weight::from_ref_time(317_329_583 as u64) - // Standard Error: 400_112 - .saturating_add(Weight::from_ref_time(331_362_971 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_357 nanoseconds. + Weight::from_ref_time(220_533_580 as u64) + // Standard Error: 345_297 + .saturating_add(Weight::from_ref_time(349_413_968 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -705,10 +705,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 446_902 nanoseconds. - Weight::from_ref_time(556_989_117 as u64) - // Standard Error: 1_029_959 - .saturating_add(Weight::from_ref_time(159_623_361 as u64).saturating_mul(n as u64)) + // Minimum execution time: 354_162 nanoseconds. + Weight::from_ref_time(472_811_575 as u64) + // Standard Error: 1_109_282 + .saturating_add(Weight::from_ref_time(154_074_386 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -716,10 +716,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - // Minimum execution time: 347_064 nanoseconds. - Weight::from_ref_time(316_879_171 as u64) - // Standard Error: 355_083 - .saturating_add(Weight::from_ref_time(304_763_264 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_551 nanoseconds. + Weight::from_ref_time(219_176_526 as u64) + // Standard Error: 358_914 + .saturating_add(Weight::from_ref_time(326_009_513 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -727,10 +727,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 435_275 nanoseconds. - Weight::from_ref_time(527_900_488 as u64) - // Standard Error: 872_763 - .saturating_add(Weight::from_ref_time(60_604_211 as u64).saturating_mul(n as u64)) + // Minimum execution time: 339_149 nanoseconds. + Weight::from_ref_time(440_615_016 as u64) + // Standard Error: 954_837 + .saturating_add(Weight::from_ref_time(66_153_533 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -738,10 +738,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - // Minimum execution time: 348_671 nanoseconds. - Weight::from_ref_time(306_307_186 as u64) - // Standard Error: 438_525 - .saturating_add(Weight::from_ref_time(422_575_502 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_812 nanoseconds. + Weight::from_ref_time(209_954_069 as u64) + // Standard Error: 398_380 + .saturating_add(Weight::from_ref_time(438_573_954 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -750,10 +750,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 470_344 nanoseconds. - Weight::from_ref_time(613_380_073 as u64) - // Standard Error: 1_333_864 - .saturating_add(Weight::from_ref_time(164_398_286 as u64).saturating_mul(n as u64)) + // Minimum execution time: 374_594 nanoseconds. + Weight::from_ref_time(525_213_792 as u64) + // Standard Error: 1_378_489 + .saturating_add(Weight::from_ref_time(161_599_623 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(51 as u64)) .saturating_add(T::DbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(48 as u64)) @@ -766,10 +766,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - // Minimum execution time: 348_312 nanoseconds. - Weight::from_ref_time(303_017_886 as u64) - // Standard Error: 568_589 - .saturating_add(Weight::from_ref_time(1_351_508_682 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_379 nanoseconds. + Weight::from_ref_time(204_214_298 as u64) + // Standard Error: 662_575 + .saturating_add(Weight::from_ref_time(1_366_716_853 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -782,10 +782,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - // Minimum execution time: 348_997 nanoseconds. - Weight::from_ref_time(349_975_000 as u64) - // Standard Error: 5_689_469 - .saturating_add(Weight::from_ref_time(24_991_501_544 as u64).saturating_mul(r as u64)) + // Minimum execution time: 252_896 nanoseconds. + Weight::from_ref_time(253_811_000 as u64) + // Standard Error: 6_576_179 + .saturating_add(Weight::from_ref_time(17_254_952_849 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -798,10 +798,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - // Minimum execution time: 348_751 nanoseconds. - Weight::from_ref_time(349_330_000 as u64) - // Standard Error: 7_125_421 - .saturating_add(Weight::from_ref_time(24_917_646_052 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_312 nanoseconds. + Weight::from_ref_time(253_806_000 as u64) + // Standard Error: 6_118_873 + .saturating_add(Weight::from_ref_time(17_081_370_212 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((150 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -815,12 +815,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Minimum execution time: 16_275_711 nanoseconds. - Weight::from_ref_time(15_203_241_602 as u64) - // Standard Error: 3_582_276 - .saturating_add(Weight::from_ref_time(1_179_848_168 as u64).saturating_mul(t as u64)) - // Standard Error: 5_371 - .saturating_add(Weight::from_ref_time(9_712_484 as u64).saturating_mul(c as u64)) + // Minimum execution time: 12_001_522 nanoseconds. + Weight::from_ref_time(10_903_312_955 as u64) + // Standard Error: 4_301_096 + .saturating_add(Weight::from_ref_time(1_243_413_241 as u64).saturating_mul(t as u64)) + // Standard Error: 6_449 + .saturating_add(Weight::from_ref_time(9_713_655 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(167 as u64)) .saturating_add(T::DbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(163 as u64)) @@ -835,10 +835,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - // Minimum execution time: 353_168 nanoseconds. - Weight::from_ref_time(353_901_000 as u64) - // Standard Error: 18_701_989 - .saturating_add(Weight::from_ref_time(30_028_672_644 as u64).saturating_mul(r as u64)) + // Minimum execution time: 254_969 nanoseconds. + Weight::from_ref_time(255_984_000 as u64) + // Standard Error: 18_545_048 + .saturating_add(Weight::from_ref_time(22_343_189_765 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(5 as u64)) @@ -854,10 +854,10 @@ impl WeightInfo for SubstrateWeight { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - // Minimum execution time: 18_226_984 nanoseconds. - Weight::from_ref_time(18_032_466_983 as u64) - // Standard Error: 75_544 - .saturating_add(Weight::from_ref_time(121_087_538 as u64).saturating_mul(s as u64)) + // Minimum execution time: 14_077_497 nanoseconds. + Weight::from_ref_time(13_949_740_588 as u64) + // Standard Error: 66_631 + .saturating_add(Weight::from_ref_time(120_519_572 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(249 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(T::DbWeight::get().writes(247 as u64)) @@ -870,10 +870,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Minimum execution time: 344_334 nanoseconds. - Weight::from_ref_time(345_939_404 as u64) - // Standard Error: 98_994 - .saturating_add(Weight::from_ref_time(58_015_295 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_445 nanoseconds. + Weight::from_ref_time(251_229_791 as u64) + // Standard Error: 88_045 + .saturating_add(Weight::from_ref_time(57_577_008 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -884,10 +884,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 402_492 nanoseconds. - Weight::from_ref_time(402_789_000 as u64) - // Standard Error: 54_174 - .saturating_add(Weight::from_ref_time(324_036_889 as u64).saturating_mul(n as u64)) + // Minimum execution time: 308_069 nanoseconds. + Weight::from_ref_time(308_971_000 as u64) + // Standard Error: 46_181 + .saturating_add(Weight::from_ref_time(321_835_684 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -898,10 +898,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - // Minimum execution time: 343_493 nanoseconds. - Weight::from_ref_time(345_116_214 as u64) - // Standard Error: 90_515 - .saturating_add(Weight::from_ref_time(71_282_485 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_107 nanoseconds. + Weight::from_ref_time(250_125_030 as u64) + // Standard Error: 88_769 + .saturating_add(Weight::from_ref_time(70_727_669 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -912,10 +912,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 415_650 nanoseconds. - Weight::from_ref_time(415_894_000 as u64) - // Standard Error: 58_077 - .saturating_add(Weight::from_ref_time(248_416_317 as u64).saturating_mul(n as u64)) + // Minimum execution time: 319_515 nanoseconds. + Weight::from_ref_time(319_784_000 as u64) + // Standard Error: 58_896 + .saturating_add(Weight::from_ref_time(246_433_962 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -926,10 +926,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Minimum execution time: 343_890 nanoseconds. - Weight::from_ref_time(345_542_924 as u64) - // Standard Error: 87_564 - .saturating_add(Weight::from_ref_time(47_361_375 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_887 nanoseconds. + Weight::from_ref_time(250_452_702 as u64) + // Standard Error: 140_887 + .saturating_add(Weight::from_ref_time(49_538_397 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -940,10 +940,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 391_629 nanoseconds. - Weight::from_ref_time(392_793_000 as u64) - // Standard Error: 51_120 - .saturating_add(Weight::from_ref_time(99_288_467 as u64).saturating_mul(n as u64)) + // Minimum execution time: 297_534 nanoseconds. + Weight::from_ref_time(298_249_000 as u64) + // Standard Error: 49_680 + .saturating_add(Weight::from_ref_time(99_001_103 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -954,10 +954,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Minimum execution time: 341_701 nanoseconds. - Weight::from_ref_time(343_318_489 as u64) - // Standard Error: 103_756 - .saturating_add(Weight::from_ref_time(47_301_910 as u64).saturating_mul(r as u64)) + // Minimum execution time: 245_926 nanoseconds. + Weight::from_ref_time(248_471_834 as u64) + // Standard Error: 101_639 + .saturating_add(Weight::from_ref_time(47_889_865 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -968,10 +968,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 389_898 nanoseconds. - Weight::from_ref_time(390_489_000 as u64) - // Standard Error: 52_301 - .saturating_add(Weight::from_ref_time(99_344_068 as u64).saturating_mul(n as u64)) + // Minimum execution time: 294_835 nanoseconds. + Weight::from_ref_time(296_328_000 as u64) + // Standard Error: 46_612 + .saturating_add(Weight::from_ref_time(98_859_152 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -982,10 +982,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - // Minimum execution time: 348_789 nanoseconds. - Weight::from_ref_time(350_451_620 as u64) - // Standard Error: 195_939 - .saturating_add(Weight::from_ref_time(2_962_830_479 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_104 nanoseconds. + Weight::from_ref_time(253_114_893 as u64) + // Standard Error: 316_740 + .saturating_add(Weight::from_ref_time(2_964_072_706 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -996,10 +996,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - // Minimum execution time: 346_537 nanoseconds. - Weight::from_ref_time(347_926_836 as u64) - // Standard Error: 110_557 - .saturating_add(Weight::from_ref_time(2_058_656_763 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_048 nanoseconds. + Weight::from_ref_time(251_774_991 as u64) + // Standard Error: 115_294 + .saturating_add(Weight::from_ref_time(2_094_245_208 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -1011,10 +1011,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 346_672 nanoseconds. - Weight::from_ref_time(347_628_000 as u64) - // Standard Error: 2_742_329 - .saturating_add(Weight::from_ref_time(1_370_383_386 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_830 nanoseconds. + Weight::from_ref_time(251_477_000 as u64) + // Standard Error: 2_727_998 + .saturating_add(Weight::from_ref_time(1_390_149_283 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().reads((225 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) @@ -1022,360 +1022,360 @@ impl WeightInfo for SubstrateWeight { } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - // Minimum execution time: 118_029 nanoseconds. - Weight::from_ref_time(118_800_774 as u64) - // Standard Error: 4_234 - .saturating_add(Weight::from_ref_time(866_719 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_022 nanoseconds. + Weight::from_ref_time(69_707_657 as u64) + // Standard Error: 8_674 + .saturating_add(Weight::from_ref_time(887_555 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - // Minimum execution time: 118_145 nanoseconds. - Weight::from_ref_time(118_644_813 as u64) - // Standard Error: 2_692 - .saturating_add(Weight::from_ref_time(2_874_276 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_491 nanoseconds. + Weight::from_ref_time(70_354_670 as u64) + // Standard Error: 1_518 + .saturating_add(Weight::from_ref_time(2_758_912 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - // Minimum execution time: 118_142 nanoseconds. - Weight::from_ref_time(118_474_161 as u64) - // Standard Error: 6_997 - .saturating_add(Weight::from_ref_time(2_769_076 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_156 nanoseconds. + Weight::from_ref_time(69_917_601 as u64) + // Standard Error: 1_970 + .saturating_add(Weight::from_ref_time(2_753_174 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - // Minimum execution time: 118_020 nanoseconds. - Weight::from_ref_time(118_499_103 as u64) - // Standard Error: 1_266 - .saturating_add(Weight::from_ref_time(2_351_397 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_944 nanoseconds. + Weight::from_ref_time(69_727_961 as u64) + // Standard Error: 376 + .saturating_add(Weight::from_ref_time(2_356_996 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - // Minimum execution time: 118_008 nanoseconds. - Weight::from_ref_time(118_398_803 as u64) - // Standard Error: 750 - .saturating_add(Weight::from_ref_time(2_480_061 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_971 nanoseconds. + Weight::from_ref_time(69_755_949 as u64) + // Standard Error: 543 + .saturating_add(Weight::from_ref_time(2_489_510 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - // Minimum execution time: 118_046 nanoseconds. - Weight::from_ref_time(118_332_903 as u64) - // Standard Error: 234 - .saturating_add(Weight::from_ref_time(1_426_355 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_061 nanoseconds. + Weight::from_ref_time(69_625_000 as u64) + // Standard Error: 486 + .saturating_add(Weight::from_ref_time(1_431_684 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - // Minimum execution time: 117_998 nanoseconds. - Weight::from_ref_time(117_910_870 as u64) - // Standard Error: 4_629 - .saturating_add(Weight::from_ref_time(1_992_697 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_058 nanoseconds. + Weight::from_ref_time(69_521_790 as u64) + // Standard Error: 892 + .saturating_add(Weight::from_ref_time(1_964_054 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - // Minimum execution time: 117_998 nanoseconds. - Weight::from_ref_time(118_059_139 as u64) - // Standard Error: 1_985 - .saturating_add(Weight::from_ref_time(2_164_252 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_020 nanoseconds. + Weight::from_ref_time(69_344_255 as u64) + // Standard Error: 1_408 + .saturating_add(Weight::from_ref_time(2_169_179 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - // Minimum execution time: 121_282 nanoseconds. - Weight::from_ref_time(121_483_289 as u64) - // Standard Error: 57 - .saturating_add(Weight::from_ref_time(4_013 as u64).saturating_mul(e as u64)) + // Minimum execution time: 72_366 nanoseconds. + Weight::from_ref_time(72_869_594 as u64) + // Standard Error: 73 + .saturating_add(Weight::from_ref_time(3_867 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - // Minimum execution time: 118_128 nanoseconds. - Weight::from_ref_time(118_869_764 as u64) - // Standard Error: 8_890 - .saturating_add(Weight::from_ref_time(6_496_684 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_164 nanoseconds. + Weight::from_ref_time(70_269_099 as u64) + // Standard Error: 8_824 + .saturating_add(Weight::from_ref_time(6_594_634 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - // Minimum execution time: 131_764 nanoseconds. - Weight::from_ref_time(133_009_514 as u64) - // Standard Error: 9_697 - .saturating_add(Weight::from_ref_time(8_310_784 as u64).saturating_mul(r as u64)) + // Minimum execution time: 83_348 nanoseconds. + Weight::from_ref_time(84_968_895 as u64) + // Standard Error: 6_305 + .saturating_add(Weight::from_ref_time(8_395_193 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Minimum execution time: 141_100 nanoseconds. - Weight::from_ref_time(142_282_540 as u64) - // Standard Error: 867 - .saturating_add(Weight::from_ref_time(536_814 as u64).saturating_mul(p as u64)) + // Minimum execution time: 92_358 nanoseconds. + Weight::from_ref_time(93_605_536 as u64) + // Standard Error: 2_019 + .saturating_add(Weight::from_ref_time(536_495 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - // Minimum execution time: 118_535 nanoseconds. - Weight::from_ref_time(119_017_162 as u64) - // Standard Error: 1_175 - .saturating_add(Weight::from_ref_time(911_141 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_191 nanoseconds. + Weight::from_ref_time(70_407_702 as u64) + // Standard Error: 2_812 + .saturating_add(Weight::from_ref_time(901_706 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - // Minimum execution time: 118_472 nanoseconds. - Weight::from_ref_time(118_790_737 as u64) - // Standard Error: 758 - .saturating_add(Weight::from_ref_time(962_148 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_230 nanoseconds. + Weight::from_ref_time(70_255_278 as u64) + // Standard Error: 1_284 + .saturating_add(Weight::from_ref_time(951_754 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - // Minimum execution time: 118_551 nanoseconds. - Weight::from_ref_time(119_070_827 as u64) - // Standard Error: 2_411 - .saturating_add(Weight::from_ref_time(1_367_622 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_278 nanoseconds. + Weight::from_ref_time(70_089_139 as u64) + // Standard Error: 757 + .saturating_add(Weight::from_ref_time(1_369_185 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - // Minimum execution time: 120_958 nanoseconds. - Weight::from_ref_time(121_649_052 as u64) - // Standard Error: 1_557 - .saturating_add(Weight::from_ref_time(1_447_477 as u64).saturating_mul(r as u64)) + // Minimum execution time: 72_047 nanoseconds. + Weight::from_ref_time(72_783_972 as u64) + // Standard Error: 837 + .saturating_add(Weight::from_ref_time(1_471_680 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - // Minimum execution time: 120_866 nanoseconds. - Weight::from_ref_time(121_040_400 as u64) - // Standard Error: 6_203 - .saturating_add(Weight::from_ref_time(1_548_715 as u64).saturating_mul(r as u64)) + // Minimum execution time: 71_960 nanoseconds. + Weight::from_ref_time(72_745_981 as u64) + // Standard Error: 1_086 + .saturating_add(Weight::from_ref_time(1_537_741 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - // Minimum execution time: 118_076 nanoseconds. - Weight::from_ref_time(118_381_010 as u64) - // Standard Error: 1_814 - .saturating_add(Weight::from_ref_time(938_699 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_221 nanoseconds. + Weight::from_ref_time(70_010_862 as u64) + // Standard Error: 1_845 + .saturating_add(Weight::from_ref_time(933_738 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - // Minimum execution time: 118_030 nanoseconds. - Weight::from_ref_time(120_331_261 as u64) - // Standard Error: 333_364 - .saturating_add(Weight::from_ref_time(227_636_638 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_081 nanoseconds. + Weight::from_ref_time(71_015_495 as u64) + // Standard Error: 27_078 + .saturating_add(Weight::from_ref_time(183_899_704 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - // Minimum execution time: 118_056 nanoseconds. - Weight::from_ref_time(118_554_799 as u64) - // Standard Error: 1_269 - .saturating_add(Weight::from_ref_time(1_331_717 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_589 nanoseconds. + Weight::from_ref_time(70_175_537 as u64) + // Standard Error: 1_355 + .saturating_add(Weight::from_ref_time(1_323_745 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - // Minimum execution time: 118_009 nanoseconds. - Weight::from_ref_time(118_678_537 as u64) - // Standard Error: 1_699 - .saturating_add(Weight::from_ref_time(1_328_153 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_083 nanoseconds. + Weight::from_ref_time(69_832_339 as u64) + // Standard Error: 818 + .saturating_add(Weight::from_ref_time(1_334_198 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - // Minimum execution time: 118_031 nanoseconds. - Weight::from_ref_time(118_478_616 as u64) - // Standard Error: 1_533 - .saturating_add(Weight::from_ref_time(1_335_254 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_084 nanoseconds. + Weight::from_ref_time(69_802_701 as u64) + // Standard Error: 744 + .saturating_add(Weight::from_ref_time(1_334_601 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - // Minimum execution time: 117_910 nanoseconds. - Weight::from_ref_time(118_471_672 as u64) - // Standard Error: 1_275 - .saturating_add(Weight::from_ref_time(1_345_140 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_052 nanoseconds. + Weight::from_ref_time(69_717_748 as u64) + // Standard Error: 571 + .saturating_add(Weight::from_ref_time(1_346_564 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - // Minimum execution time: 119_504 nanoseconds. - Weight::from_ref_time(118_940_207 as u64) - // Standard Error: 2_512 - .saturating_add(Weight::from_ref_time(1_307_954 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_016 nanoseconds. + Weight::from_ref_time(69_793_413 as u64) + // Standard Error: 769 + .saturating_add(Weight::from_ref_time(1_317_502 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - // Minimum execution time: 118_009 nanoseconds. - Weight::from_ref_time(118_366_251 as u64) - // Standard Error: 1_116 - .saturating_add(Weight::from_ref_time(1_322_232 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_043 nanoseconds. + Weight::from_ref_time(69_963_419 as u64) + // Standard Error: 1_117 + .saturating_add(Weight::from_ref_time(1_313_727 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - // Minimum execution time: 118_020 nanoseconds. - Weight::from_ref_time(118_222_792 as u64) - // Standard Error: 1_973 - .saturating_add(Weight::from_ref_time(1_336_896 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_032 nanoseconds. + Weight::from_ref_time(69_727_577 as u64) + // Standard Error: 662 + .saturating_add(Weight::from_ref_time(1_331_088 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - // Minimum execution time: 118_038 nanoseconds. - Weight::from_ref_time(118_757_016 as u64) - // Standard Error: 7_932 - .saturating_add(Weight::from_ref_time(1_867_807 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_097 nanoseconds. + Weight::from_ref_time(69_767_650 as u64) + // Standard Error: 2_056 + .saturating_add(Weight::from_ref_time(1_875_021 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - // Minimum execution time: 118_041 nanoseconds. - Weight::from_ref_time(118_717_403 as u64) - // Standard Error: 8_010 - .saturating_add(Weight::from_ref_time(1_868_876 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_153 nanoseconds. + Weight::from_ref_time(69_906_946 as u64) + // Standard Error: 1_060 + .saturating_add(Weight::from_ref_time(1_867_154 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - // Minimum execution time: 118_057 nanoseconds. - Weight::from_ref_time(118_560_106 as u64) - // Standard Error: 1_362 - .saturating_add(Weight::from_ref_time(1_869_820 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_380 nanoseconds. + Weight::from_ref_time(69_867_328 as u64) + // Standard Error: 778 + .saturating_add(Weight::from_ref_time(1_869_718 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - // Minimum execution time: 118_045 nanoseconds. - Weight::from_ref_time(118_318_194 as u64) - // Standard Error: 179 - .saturating_add(Weight::from_ref_time(1_873_670 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_259 nanoseconds. + Weight::from_ref_time(69_695_407 as u64) + // Standard Error: 746 + .saturating_add(Weight::from_ref_time(1_874_772 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - // Minimum execution time: 118_000 nanoseconds. - Weight::from_ref_time(118_520_606 as u64) - // Standard Error: 3_656 - .saturating_add(Weight::from_ref_time(1_870_762 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_986 nanoseconds. + Weight::from_ref_time(70_027_081 as u64) + // Standard Error: 1_401 + .saturating_add(Weight::from_ref_time(1_862_971 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - // Minimum execution time: 118_035 nanoseconds. - Weight::from_ref_time(118_464_151 as u64) - // Standard Error: 1_073 - .saturating_add(Weight::from_ref_time(1_870_309 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_953 nanoseconds. + Weight::from_ref_time(69_798_073 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_871_888 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - // Minimum execution time: 118_071 nanoseconds. - Weight::from_ref_time(118_470_365 as u64) - // Standard Error: 1_224 - .saturating_add(Weight::from_ref_time(1_870_522 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_909 nanoseconds. + Weight::from_ref_time(69_845_981 as u64) + // Standard Error: 775 + .saturating_add(Weight::from_ref_time(1_868_722 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - // Minimum execution time: 118_032 nanoseconds. - Weight::from_ref_time(118_407_669 as u64) - // Standard Error: 769 - .saturating_add(Weight::from_ref_time(1_883_635 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_986 nanoseconds. + Weight::from_ref_time(69_683_189 as u64) + // Standard Error: 503 + .saturating_add(Weight::from_ref_time(1_884_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - // Minimum execution time: 118_044 nanoseconds. - Weight::from_ref_time(118_324_106 as u64) - // Standard Error: 523 - .saturating_add(Weight::from_ref_time(1_874_728 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_230 nanoseconds. + Weight::from_ref_time(69_765_336 as u64) + // Standard Error: 2_060 + .saturating_add(Weight::from_ref_time(1_871_848 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - // Minimum execution time: 118_111 nanoseconds. - Weight::from_ref_time(118_538_748 as u64) - // Standard Error: 3_498 - .saturating_add(Weight::from_ref_time(1_868_932 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_953 nanoseconds. + Weight::from_ref_time(69_828_265 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(1_868_596 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - // Minimum execution time: 118_060 nanoseconds. - Weight::from_ref_time(118_421_175 as u64) - // Standard Error: 962 - .saturating_add(Weight::from_ref_time(1_850_713 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_078 nanoseconds. + Weight::from_ref_time(69_832_768 as u64) + // Standard Error: 894 + .saturating_add(Weight::from_ref_time(1_845_786 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - // Minimum execution time: 118_050 nanoseconds. - Weight::from_ref_time(118_774_937 as u64) - // Standard Error: 1_717 - .saturating_add(Weight::from_ref_time(1_838_127 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_939 nanoseconds. + Weight::from_ref_time(69_676_256 as u64) + // Standard Error: 374 + .saturating_add(Weight::from_ref_time(1_851_026 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - // Minimum execution time: 117_997 nanoseconds. - Weight::from_ref_time(118_626_785 as u64) - // Standard Error: 1_382 - .saturating_add(Weight::from_ref_time(1_842_530 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_096 nanoseconds. + Weight::from_ref_time(69_914_159 as u64) + // Standard Error: 1_265 + .saturating_add(Weight::from_ref_time(1_844_489 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - // Minimum execution time: 118_064 nanoseconds. - Weight::from_ref_time(118_404_203 as u64) - // Standard Error: 2_437 - .saturating_add(Weight::from_ref_time(2_489_569 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_939 nanoseconds. + Weight::from_ref_time(69_641_768 as u64) + // Standard Error: 347 + .saturating_add(Weight::from_ref_time(2_488_628 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - // Minimum execution time: 117_949 nanoseconds. - Weight::from_ref_time(118_525_228 as u64) - // Standard Error: 2_231 - .saturating_add(Weight::from_ref_time(2_453_863 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_114 nanoseconds. + Weight::from_ref_time(69_844_395 as u64) + // Standard Error: 1_489 + .saturating_add(Weight::from_ref_time(2_456_310 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - // Minimum execution time: 118_032 nanoseconds. - Weight::from_ref_time(118_311_431 as u64) - // Standard Error: 175 - .saturating_add(Weight::from_ref_time(2_532_745 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_082 nanoseconds. + Weight::from_ref_time(69_993_662 as u64) + // Standard Error: 1_218 + .saturating_add(Weight::from_ref_time(2_524_010 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - // Minimum execution time: 117_987 nanoseconds. - Weight::from_ref_time(118_573_115 as u64) - // Standard Error: 1_814 - .saturating_add(Weight::from_ref_time(2_438_519 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_036 nanoseconds. + Weight::from_ref_time(70_095_304 as u64) + // Standard Error: 1_473 + .saturating_add(Weight::from_ref_time(2_429_659 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - // Minimum execution time: 120_001 nanoseconds. - Weight::from_ref_time(118_527_518 as u64) - // Standard Error: 1_040 - .saturating_add(Weight::from_ref_time(1_874_359 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_229 nanoseconds. + Weight::from_ref_time(69_759_818 as u64) + // Standard Error: 573 + .saturating_add(Weight::from_ref_time(1_879_670 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - // Minimum execution time: 118_051 nanoseconds. - Weight::from_ref_time(118_315_649 as u64) - // Standard Error: 163 - .saturating_add(Weight::from_ref_time(1_851_162 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_151 nanoseconds. + Weight::from_ref_time(69_865_948 as u64) + // Standard Error: 721 + .saturating_add(Weight::from_ref_time(1_846_734 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - // Minimum execution time: 118_038 nanoseconds. - Weight::from_ref_time(118_558_677 as u64) - // Standard Error: 2_258 - .saturating_add(Weight::from_ref_time(1_845_713 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_120 nanoseconds. + Weight::from_ref_time(70_135_849 as u64) + // Standard Error: 3_443 + .saturating_add(Weight::from_ref_time(1_841_784 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - // Minimum execution time: 117_985 nanoseconds. - Weight::from_ref_time(118_660_597 as u64) - // Standard Error: 5_893 - .saturating_add(Weight::from_ref_time(1_887_423 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_077 nanoseconds. + Weight::from_ref_time(69_929_746 as u64) + // Standard Error: 821 + .saturating_add(Weight::from_ref_time(1_866_348 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - // Minimum execution time: 120_101 nanoseconds. - Weight::from_ref_time(118_522_424 as u64) - // Standard Error: 1_094 - .saturating_add(Weight::from_ref_time(1_867_495 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_226 nanoseconds. + Weight::from_ref_time(69_725_630 as u64) + // Standard Error: 891 + .saturating_add(Weight::from_ref_time(1_873_637 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - // Minimum execution time: 118_068 nanoseconds. - Weight::from_ref_time(119_012_941 as u64) - // Standard Error: 9_129 - .saturating_add(Weight::from_ref_time(1_877_850 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_591 nanoseconds. + Weight::from_ref_time(69_939_773 as u64) + // Standard Error: 960 + .saturating_add(Weight::from_ref_time(1_867_208 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - // Minimum execution time: 120_105 nanoseconds. - Weight::from_ref_time(118_559_474 as u64) - // Standard Error: 2_055 - .saturating_add(Weight::from_ref_time(1_868_662 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_187 nanoseconds. + Weight::from_ref_time(69_845_516 as u64) + // Standard Error: 781 + .saturating_add(Weight::from_ref_time(1_869_613 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - // Minimum execution time: 118_004 nanoseconds. - Weight::from_ref_time(118_370_466 as u64) - // Standard Error: 818 - .saturating_add(Weight::from_ref_time(1_873_459 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_065 nanoseconds. + Weight::from_ref_time(69_950_430 as u64) + // Standard Error: 986 + .saturating_add(Weight::from_ref_time(1_867_001 as u64).saturating_mul(r as u64)) } } @@ -1383,17 +1383,17 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) fn on_process_deletion_queue_batch() -> Weight { - // Minimum execution time: 2_904 nanoseconds. - Weight::from_ref_time(3_036_000 as u64) + // Minimum execution time: 3_064 nanoseconds. + Weight::from_ref_time(3_236_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `k` is `[0, 1024]`. fn on_initialize_per_trie_key(k: u32, ) -> Weight { - // Minimum execution time: 14_553 nanoseconds. - Weight::from_ref_time(14_161_469 as u64) - // Standard Error: 593 - .saturating_add(Weight::from_ref_time(894_982 as u64).saturating_mul(k as u64)) + // Minimum execution time: 15_492 nanoseconds. + Weight::from_ref_time(14_309_233 as u64) + // Standard Error: 649 + .saturating_add(Weight::from_ref_time(930_078 as u64).saturating_mul(k as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) @@ -1401,10 +1401,10 @@ impl WeightInfo for () { // Storage: Contracts DeletionQueue (r:1 w:0) /// The range of component `q` is `[0, 128]`. fn on_initialize_per_queue_item(q: u32, ) -> Weight { - // Minimum execution time: 3_121 nanoseconds. - Weight::from_ref_time(14_262_977 as u64) - // Standard Error: 3_272 - .saturating_add(Weight::from_ref_time(1_232_597 as u64).saturating_mul(q as u64)) + // Minimum execution time: 3_240 nanoseconds. + Weight::from_ref_time(15_076_559 as u64) + // Standard Error: 3_337 + .saturating_add(Weight::from_ref_time(1_244_348 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1412,10 +1412,10 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn reinstrument(c: u32, ) -> Weight { - // Minimum execution time: 28_625 nanoseconds. - Weight::from_ref_time(31_685_032 as u64) - // Standard Error: 57 - .saturating_add(Weight::from_ref_time(44_024 as u64).saturating_mul(c as u64)) + // Minimum execution time: 22_524 nanoseconds. + Weight::from_ref_time(19_939_078 as u64) + // Standard Error: 43 + .saturating_add(Weight::from_ref_time(43_802 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -1426,10 +1426,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `c` is `[0, 131072]`. fn call_with_code_per_byte(c: u32, ) -> Weight { - // Minimum execution time: 355_167 nanoseconds. - Weight::from_ref_time(330_201_049 as u64) - // Standard Error: 64 - .saturating_add(Weight::from_ref_time(45_952 as u64).saturating_mul(c as u64)) + // Minimum execution time: 261_039 nanoseconds. + Weight::from_ref_time(228_709_853 as u64) + // Standard Error: 105 + .saturating_add(Weight::from_ref_time(47_449 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1444,12 +1444,12 @@ impl WeightInfo for () { /// The range of component `c` is `[0, 64226]`. /// The range of component `s` is `[0, 1048576]`. fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - // Minimum execution time: 2_165_939 nanoseconds. - Weight::from_ref_time(346_673_842 as u64) - // Standard Error: 73 - .saturating_add(Weight::from_ref_time(107_020 as u64).saturating_mul(c as u64)) + // Minimum execution time: 2_054_867 nanoseconds. + Weight::from_ref_time(259_090_306 as u64) + // Standard Error: 72 + .saturating_add(Weight::from_ref_time(107_519 as u64).saturating_mul(c as u64)) // Standard Error: 4 - .saturating_add(Weight::from_ref_time(1_754 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_736 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(9 as u64)) } @@ -1462,10 +1462,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `s` is `[0, 1048576]`. fn instantiate(s: u32, ) -> Weight { - // Minimum execution time: 262_523 nanoseconds. - Weight::from_ref_time(255_633_789 as u64) + // Minimum execution time: 213_409 nanoseconds. + Weight::from_ref_time(205_300_495 as u64) // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_485 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_479 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } @@ -1475,8 +1475,8 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: System EventTopics (r:2 w:2) fn call() -> Weight { - // Minimum execution time: 233_124 nanoseconds. - Weight::from_ref_time(233_826_000 as u64) + // Minimum execution time: 183_317 nanoseconds. + Weight::from_ref_time(184_465_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1486,10 +1486,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:0 w:1) /// The range of component `c` is `[0, 64226]`. fn upload_code(c: u32, ) -> Weight { - // Minimum execution time: 61_170 nanoseconds. - Weight::from_ref_time(61_653_502 as u64) - // Standard Error: 47 - .saturating_add(Weight::from_ref_time(45_553 as u64).saturating_mul(c as u64)) + // Minimum execution time: 56_187 nanoseconds. + Weight::from_ref_time(60_636_621 as u64) + // Standard Error: 46 + .saturating_add(Weight::from_ref_time(45_734 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1498,8 +1498,8 @@ impl WeightInfo for () { // Storage: Contracts CodeStorage (r:0 w:1) // Storage: Contracts PristineCode (r:0 w:1) fn remove_code() -> Weight { - // Minimum execution time: 37_278 nanoseconds. - Weight::from_ref_time(37_822_000 as u64) + // Minimum execution time: 38_433 nanoseconds. + Weight::from_ref_time(38_917_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -1507,8 +1507,8 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:2 w:2) // Storage: System EventTopics (r:3 w:3) fn set_code() -> Weight { - // Minimum execution time: 40_329 nanoseconds. - Weight::from_ref_time(41_017_000 as u64) + // Minimum execution time: 41_507 nanoseconds. + Weight::from_ref_time(41_938_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -1519,10 +1519,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller(r: u32, ) -> Weight { - // Minimum execution time: 345_717 nanoseconds. - Weight::from_ref_time(347_610_656 as u64) - // Standard Error: 37_159 - .saturating_add(Weight::from_ref_time(35_506_783 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_628 nanoseconds. + Weight::from_ref_time(251_997_923 as u64) + // Standard Error: 26_157 + .saturating_add(Weight::from_ref_time(35_002_004 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1533,10 +1533,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_is_contract(r: u32, ) -> Weight { - // Minimum execution time: 346_158 nanoseconds. - Weight::from_ref_time(289_982_824 as u64) - // Standard Error: 426_543 - .saturating_add(Weight::from_ref_time(202_123_887 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_390 nanoseconds. + Weight::from_ref_time(193_793_052 as u64) + // Standard Error: 430_292 + .saturating_add(Weight::from_ref_time(211_029_686 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1548,10 +1548,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 349_141 nanoseconds. - Weight::from_ref_time(297_477_406 as u64) - // Standard Error: 403_411 - .saturating_add(Weight::from_ref_time(258_999_975 as u64).saturating_mul(r as u64)) + // Minimum execution time: 252_469 nanoseconds. + Weight::from_ref_time(201_438_856 as u64) + // Standard Error: 420_040 + .saturating_add(Weight::from_ref_time(267_340_744 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1563,10 +1563,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_own_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 347_419 nanoseconds. - Weight::from_ref_time(350_048_279 as u64) - // Standard Error: 22_511 - .saturating_add(Weight::from_ref_time(38_799_515 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_154 nanoseconds. + Weight::from_ref_time(254_831_062 as u64) + // Standard Error: 37_843 + .saturating_add(Weight::from_ref_time(38_579_567 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1577,10 +1577,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_caller_is_origin(r: u32, ) -> Weight { - // Minimum execution time: 341_634 nanoseconds. - Weight::from_ref_time(344_394_100 as u64) - // Standard Error: 12_563 - .saturating_add(Weight::from_ref_time(14_529_298 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_875 nanoseconds. + Weight::from_ref_time(250_312_587 as u64) + // Standard Error: 17_901 + .saturating_add(Weight::from_ref_time(15_153_431 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1591,10 +1591,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_address(r: u32, ) -> Weight { - // Minimum execution time: 346_246 nanoseconds. - Weight::from_ref_time(349_406_095 as u64) - // Standard Error: 28_842 - .saturating_add(Weight::from_ref_time(35_066_080 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_097 nanoseconds. + Weight::from_ref_time(252_157_442 as u64) + // Standard Error: 38_426 + .saturating_add(Weight::from_ref_time(35_084_205 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1605,10 +1605,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas_left(r: u32, ) -> Weight { - // Minimum execution time: 346_003 nanoseconds. - Weight::from_ref_time(349_076_713 as u64) - // Standard Error: 30_507 - .saturating_add(Weight::from_ref_time(34_662_539 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_034 nanoseconds. + Weight::from_ref_time(252_189_233 as u64) + // Standard Error: 33_081 + .saturating_add(Weight::from_ref_time(34_764_160 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1619,10 +1619,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_balance(r: u32, ) -> Weight { - // Minimum execution time: 346_063 nanoseconds. - Weight::from_ref_time(350_054_605 as u64) - // Standard Error: 68_711 - .saturating_add(Weight::from_ref_time(107_584_776 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_587 nanoseconds. + Weight::from_ref_time(258_565_111 as u64) + // Standard Error: 75_715 + .saturating_add(Weight::from_ref_time(109_687_486 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1633,10 +1633,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_value_transferred(r: u32, ) -> Weight { - // Minimum execution time: 346_057 nanoseconds. - Weight::from_ref_time(349_489_403 as u64) - // Standard Error: 38_116 - .saturating_add(Weight::from_ref_time(34_750_791 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_735 nanoseconds. + Weight::from_ref_time(252_875_784 as u64) + // Standard Error: 42_024 + .saturating_add(Weight::from_ref_time(34_555_983 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1647,10 +1647,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_minimum_balance(r: u32, ) -> Weight { - // Minimum execution time: 345_722 nanoseconds. - Weight::from_ref_time(348_659_357 as u64) - // Standard Error: 33_077 - .saturating_add(Weight::from_ref_time(34_845_216 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_025 nanoseconds. + Weight::from_ref_time(255_212_046 as u64) + // Standard Error: 41_865 + .saturating_add(Weight::from_ref_time(34_332_291 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1661,10 +1661,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_block_number(r: u32, ) -> Weight { - // Minimum execution time: 345_805 nanoseconds. - Weight::from_ref_time(347_579_287 as u64) - // Standard Error: 28_498 - .saturating_add(Weight::from_ref_time(34_529_480 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_641 nanoseconds. + Weight::from_ref_time(252_978_686 as u64) + // Standard Error: 25_820 + .saturating_add(Weight::from_ref_time(34_175_386 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1675,10 +1675,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_now(r: u32, ) -> Weight { - // Minimum execution time: 345_943 nanoseconds. - Weight::from_ref_time(348_941_819 as u64) - // Standard Error: 32_278 - .saturating_add(Weight::from_ref_time(34_580_817 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_871 nanoseconds. + Weight::from_ref_time(253_237_931 as u64) + // Standard Error: 30_986 + .saturating_add(Weight::from_ref_time(34_305_155 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1690,10 +1690,10 @@ impl WeightInfo for () { // Storage: TransactionPayment NextFeeMultiplier (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_weight_to_fee(r: u32, ) -> Weight { - // Minimum execution time: 345_753 nanoseconds. - Weight::from_ref_time(354_733_779 as u64) - // Standard Error: 71_767 - .saturating_add(Weight::from_ref_time(105_098_275 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_787 nanoseconds. + Weight::from_ref_time(258_457_094 as u64) + // Standard Error: 75_835 + .saturating_add(Weight::from_ref_time(107_115_666 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1704,10 +1704,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_gas(r: u32, ) -> Weight { - // Minimum execution time: 222_866 nanoseconds. - Weight::from_ref_time(226_212_157 as u64) - // Standard Error: 17_326 - .saturating_add(Weight::from_ref_time(15_544_104 as u64).saturating_mul(r as u64)) + // Minimum execution time: 171_667 nanoseconds. + Weight::from_ref_time(174_687_863 as u64) + // Standard Error: 34_576 + .saturating_add(Weight::from_ref_time(15_895_674 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1718,10 +1718,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_input(r: u32, ) -> Weight { - // Minimum execution time: 345_704 nanoseconds. - Weight::from_ref_time(348_160_999 as u64) - // Standard Error: 31_560 - .saturating_add(Weight::from_ref_time(32_888_428 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_610 nanoseconds. + Weight::from_ref_time(251_476_758 as u64) + // Standard Error: 39_422 + .saturating_add(Weight::from_ref_time(32_870_429 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1732,10 +1732,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_input_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 380_814 nanoseconds. - Weight::from_ref_time(403_973_055 as u64) - // Standard Error: 2_690 - .saturating_add(Weight::from_ref_time(9_597_443 as u64).saturating_mul(n as u64)) + // Minimum execution time: 285_154 nanoseconds. + Weight::from_ref_time(307_768_636 as u64) + // Standard Error: 2_701 + .saturating_add(Weight::from_ref_time(9_544_122 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1746,10 +1746,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_return(r: u32, ) -> Weight { - // Minimum execution time: 340_930 nanoseconds. - Weight::from_ref_time(342_218_828 as u64) - // Standard Error: 80_578 - .saturating_add(Weight::from_ref_time(1_547_971 as u64).saturating_mul(r as u64)) + // Minimum execution time: 244_810 nanoseconds. + Weight::from_ref_time(247_576_385 as u64) + // Standard Error: 80_494 + .saturating_add(Weight::from_ref_time(2_052_714 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1760,10 +1760,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_return_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 343_035 nanoseconds. - Weight::from_ref_time(345_347_882 as u64) - // Standard Error: 509 - .saturating_add(Weight::from_ref_time(228_526 as u64).saturating_mul(n as u64)) + // Minimum execution time: 248_049 nanoseconds. + Weight::from_ref_time(250_148_025 as u64) + // Standard Error: 339 + .saturating_add(Weight::from_ref_time(185_344 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1776,10 +1776,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:1 w:1) /// The range of component `r` is `[0, 1]`. fn seal_terminate(r: u32, ) -> Weight { - // Minimum execution time: 344_997 nanoseconds. - Weight::from_ref_time(346_431_655 as u64) - // Standard Error: 79_924 - .saturating_add(Weight::from_ref_time(53_530_044 as u64).saturating_mul(r as u64)) + // Minimum execution time: 246_620 nanoseconds. + Weight::from_ref_time(250_752_277 as u64) + // Standard Error: 84_300 + .saturating_add(Weight::from_ref_time(54_264_722 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1793,10 +1793,10 @@ impl WeightInfo for () { // Storage: RandomnessCollectiveFlip RandomMaterial (r:1 w:0) /// The range of component `r` is `[0, 20]`. fn seal_random(r: u32, ) -> Weight { - // Minimum execution time: 345_926 nanoseconds. - Weight::from_ref_time(351_620_774 as u64) - // Standard Error: 69_858 - .saturating_add(Weight::from_ref_time(130_846_895 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_065 nanoseconds. + Weight::from_ref_time(252_419_902 as u64) + // Standard Error: 84_223 + .saturating_add(Weight::from_ref_time(134_454_079 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1807,10 +1807,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_deposit_event(r: u32, ) -> Weight { - // Minimum execution time: 341_582 nanoseconds. - Weight::from_ref_time(353_141_157 as u64) - // Standard Error: 99_764 - .saturating_add(Weight::from_ref_time(231_149_152 as u64).saturating_mul(r as u64)) + // Minimum execution time: 246_588 nanoseconds. + Weight::from_ref_time(261_525_328 as u64) + // Standard Error: 97_732 + .saturating_add(Weight::from_ref_time(235_555_878 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -1822,12 +1822,12 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 4]`. /// The range of component `n` is `[0, 16]`. fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - // Minimum execution time: 1_269_467 nanoseconds. - Weight::from_ref_time(566_371_969 as u64) - // Standard Error: 435_714 - .saturating_add(Weight::from_ref_time(180_441_805 as u64).saturating_mul(t as u64)) - // Standard Error: 119_668 - .saturating_add(Weight::from_ref_time(70_111_002 as u64).saturating_mul(n as u64)) + // Minimum execution time: 1_171_144 nanoseconds. + Weight::from_ref_time(490_333_337 as u64) + // Standard Error: 404_664 + .saturating_add(Weight::from_ref_time(173_683_265 as u64).saturating_mul(t as u64)) + // Standard Error: 111_140 + .saturating_add(Weight::from_ref_time(66_081_822 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1840,20 +1840,20 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_debug_message(r: u32, ) -> Weight { - // Minimum execution time: 231_528 nanoseconds. - Weight::from_ref_time(235_676_870 as u64) - // Standard Error: 20_851 - .saturating_add(Weight::from_ref_time(26_215_920 as u64).saturating_mul(r as u64)) + // Minimum execution time: 178_822 nanoseconds. + Weight::from_ref_time(181_571_518 as u64) + // Standard Error: 19_207 + .saturating_add(Weight::from_ref_time(26_784_712 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_set_storage(r: u32, ) -> Weight { - // Minimum execution time: 346_509 nanoseconds. - Weight::from_ref_time(301_173_874 as u64) - // Standard Error: 436_884 - .saturating_add(Weight::from_ref_time(415_194_325 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_737 nanoseconds. + Weight::from_ref_time(208_095_467 as u64) + // Standard Error: 417_236 + .saturating_add(Weight::from_ref_time(430_088_574 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1862,10 +1862,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_new_kb(n: u32, ) -> Weight { - // Minimum execution time: 500_452 nanoseconds. - Weight::from_ref_time(641_506_544 as u64) - // Standard Error: 1_308_973 - .saturating_add(Weight::from_ref_time(98_769_541 as u64).saturating_mul(n as u64)) + // Minimum execution time: 400_055 nanoseconds. + Weight::from_ref_time(551_666_883 as u64) + // Standard Error: 1_379_652 + .saturating_add(Weight::from_ref_time(94_069_118 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(52 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(50 as u64)) @@ -1874,10 +1874,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_set_storage_per_old_kb(n: u32, ) -> Weight { - // Minimum execution time: 500_941 nanoseconds. - Weight::from_ref_time(610_446_049 as u64) - // Standard Error: 1_018_173 - .saturating_add(Weight::from_ref_time(65_568_627 as u64).saturating_mul(n as u64)) + // Minimum execution time: 400_370 nanoseconds. + Weight::from_ref_time(521_380_000 as u64) + // Standard Error: 1_112_618 + .saturating_add(Weight::from_ref_time(68_664_898 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(49 as u64)) @@ -1886,10 +1886,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_clear_storage(r: u32, ) -> Weight { - // Minimum execution time: 345_820 nanoseconds. - Weight::from_ref_time(302_335_076 as u64) - // Standard Error: 472_676 - .saturating_add(Weight::from_ref_time(395_286_593 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_711 nanoseconds. + Weight::from_ref_time(212_629_798 as u64) + // Standard Error: 378_159 + .saturating_add(Weight::from_ref_time(415_326_230 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1898,10 +1898,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_clear_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 457_830 nanoseconds. - Weight::from_ref_time(582_524_868 as u64) - // Standard Error: 1_161_813 - .saturating_add(Weight::from_ref_time(67_291_419 as u64).saturating_mul(n as u64)) + // Minimum execution time: 365_702 nanoseconds. + Weight::from_ref_time(499_337_686 as u64) + // Standard Error: 1_232_330 + .saturating_add(Weight::from_ref_time(70_648_878 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(48 as u64)) @@ -1910,10 +1910,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_get_storage(r: u32, ) -> Weight { - // Minimum execution time: 348_249 nanoseconds. - Weight::from_ref_time(317_329_583 as u64) - // Standard Error: 400_112 - .saturating_add(Weight::from_ref_time(331_362_971 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_357 nanoseconds. + Weight::from_ref_time(220_533_580 as u64) + // Standard Error: 345_297 + .saturating_add(Weight::from_ref_time(349_413_968 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1921,10 +1921,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_get_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 446_902 nanoseconds. - Weight::from_ref_time(556_989_117 as u64) - // Standard Error: 1_029_959 - .saturating_add(Weight::from_ref_time(159_623_361 as u64).saturating_mul(n as u64)) + // Minimum execution time: 354_162 nanoseconds. + Weight::from_ref_time(472_811_575 as u64) + // Standard Error: 1_109_282 + .saturating_add(Weight::from_ref_time(154_074_386 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1932,10 +1932,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_contains_storage(r: u32, ) -> Weight { - // Minimum execution time: 347_064 nanoseconds. - Weight::from_ref_time(316_879_171 as u64) - // Standard Error: 355_083 - .saturating_add(Weight::from_ref_time(304_763_264 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_551 nanoseconds. + Weight::from_ref_time(219_176_526 as u64) + // Standard Error: 358_914 + .saturating_add(Weight::from_ref_time(326_009_513 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1943,10 +1943,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_contains_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 435_275 nanoseconds. - Weight::from_ref_time(527_900_488 as u64) - // Standard Error: 872_763 - .saturating_add(Weight::from_ref_time(60_604_211 as u64).saturating_mul(n as u64)) + // Minimum execution time: 339_149 nanoseconds. + Weight::from_ref_time(440_615_016 as u64) + // Standard Error: 954_837 + .saturating_add(Weight::from_ref_time(66_153_533 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1954,10 +1954,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `r` is `[0, 10]`. fn seal_take_storage(r: u32, ) -> Weight { - // Minimum execution time: 348_671 nanoseconds. - Weight::from_ref_time(306_307_186 as u64) - // Standard Error: 438_525 - .saturating_add(Weight::from_ref_time(422_575_502 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_812 nanoseconds. + Weight::from_ref_time(209_954_069 as u64) + // Standard Error: 398_380 + .saturating_add(Weight::from_ref_time(438_573_954 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -1966,10 +1966,10 @@ impl WeightInfo for () { // Storage: Skipped Metadata (r:0 w:0) /// The range of component `n` is `[0, 8]`. fn seal_take_storage_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 470_344 nanoseconds. - Weight::from_ref_time(613_380_073 as u64) - // Standard Error: 1_333_864 - .saturating_add(Weight::from_ref_time(164_398_286 as u64).saturating_mul(n as u64)) + // Minimum execution time: 374_594 nanoseconds. + Weight::from_ref_time(525_213_792 as u64) + // Standard Error: 1_378_489 + .saturating_add(Weight::from_ref_time(161_599_623 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(51 as u64)) .saturating_add(RocksDbWeight::get().reads((7 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(48 as u64)) @@ -1982,10 +1982,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_transfer(r: u32, ) -> Weight { - // Minimum execution time: 348_312 nanoseconds. - Weight::from_ref_time(303_017_886 as u64) - // Standard Error: 568_589 - .saturating_add(Weight::from_ref_time(1_351_508_682 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_379 nanoseconds. + Weight::from_ref_time(204_214_298 as u64) + // Standard Error: 662_575 + .saturating_add(Weight::from_ref_time(1_366_716_853 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((80 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -1998,10 +1998,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_call(r: u32, ) -> Weight { - // Minimum execution time: 348_997 nanoseconds. - Weight::from_ref_time(349_975_000 as u64) - // Standard Error: 5_689_469 - .saturating_add(Weight::from_ref_time(24_991_501_544 as u64).saturating_mul(r as u64)) + // Minimum execution time: 252_896 nanoseconds. + Weight::from_ref_time(253_811_000 as u64) + // Standard Error: 6_576_179 + .saturating_add(Weight::from_ref_time(17_254_952_849 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().reads((160 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -2014,10 +2014,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 20]`. fn seal_delegate_call(r: u32, ) -> Weight { - // Minimum execution time: 348_751 nanoseconds. - Weight::from_ref_time(349_330_000 as u64) - // Standard Error: 7_125_421 - .saturating_add(Weight::from_ref_time(24_917_646_052 as u64).saturating_mul(r as u64)) + // Minimum execution time: 249_312 nanoseconds. + Weight::from_ref_time(253_806_000 as u64) + // Standard Error: 6_118_873 + .saturating_add(Weight::from_ref_time(17_081_370_212 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((150 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -2031,12 +2031,12 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `c` is `[0, 1024]`. fn seal_call_per_transfer_clone_kb(t: u32, c: u32, ) -> Weight { - // Minimum execution time: 16_275_711 nanoseconds. - Weight::from_ref_time(15_203_241_602 as u64) - // Standard Error: 3_582_276 - .saturating_add(Weight::from_ref_time(1_179_848_168 as u64).saturating_mul(t as u64)) - // Standard Error: 5_371 - .saturating_add(Weight::from_ref_time(9_712_484 as u64).saturating_mul(c as u64)) + // Minimum execution time: 12_001_522 nanoseconds. + Weight::from_ref_time(10_903_312_955 as u64) + // Standard Error: 4_301_096 + .saturating_add(Weight::from_ref_time(1_243_413_241 as u64).saturating_mul(t as u64)) + // Standard Error: 6_449 + .saturating_add(Weight::from_ref_time(9_713_655 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(167 as u64)) .saturating_add(RocksDbWeight::get().reads((81 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(163 as u64)) @@ -2051,10 +2051,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:80 w:80) /// The range of component `r` is `[0, 20]`. fn seal_instantiate(r: u32, ) -> Weight { - // Minimum execution time: 353_168 nanoseconds. - Weight::from_ref_time(353_901_000 as u64) - // Standard Error: 18_701_989 - .saturating_add(Weight::from_ref_time(30_028_672_644 as u64).saturating_mul(r as u64)) + // Minimum execution time: 254_969 nanoseconds. + Weight::from_ref_time(255_984_000 as u64) + // Standard Error: 18_545_048 + .saturating_add(Weight::from_ref_time(22_343_189_765 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().reads((400 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(5 as u64)) @@ -2070,10 +2070,10 @@ impl WeightInfo for () { /// The range of component `t` is `[0, 1]`. /// The range of component `s` is `[0, 960]`. fn seal_instantiate_per_transfer_salt_kb(t: u32, s: u32, ) -> Weight { - // Minimum execution time: 18_226_984 nanoseconds. - Weight::from_ref_time(18_032_466_983 as u64) - // Standard Error: 75_544 - .saturating_add(Weight::from_ref_time(121_087_538 as u64).saturating_mul(s as u64)) + // Minimum execution time: 14_077_497 nanoseconds. + Weight::from_ref_time(13_949_740_588 as u64) + // Standard Error: 66_631 + .saturating_add(Weight::from_ref_time(120_519_572 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(249 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(t as u64))) .saturating_add(RocksDbWeight::get().writes(247 as u64)) @@ -2086,10 +2086,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_sha2_256(r: u32, ) -> Weight { - // Minimum execution time: 344_334 nanoseconds. - Weight::from_ref_time(345_939_404 as u64) - // Standard Error: 98_994 - .saturating_add(Weight::from_ref_time(58_015_295 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_445 nanoseconds. + Weight::from_ref_time(251_229_791 as u64) + // Standard Error: 88_045 + .saturating_add(Weight::from_ref_time(57_577_008 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2100,10 +2100,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 402_492 nanoseconds. - Weight::from_ref_time(402_789_000 as u64) - // Standard Error: 54_174 - .saturating_add(Weight::from_ref_time(324_036_889 as u64).saturating_mul(n as u64)) + // Minimum execution time: 308_069 nanoseconds. + Weight::from_ref_time(308_971_000 as u64) + // Standard Error: 46_181 + .saturating_add(Weight::from_ref_time(321_835_684 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2114,10 +2114,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_keccak_256(r: u32, ) -> Weight { - // Minimum execution time: 343_493 nanoseconds. - Weight::from_ref_time(345_116_214 as u64) - // Standard Error: 90_515 - .saturating_add(Weight::from_ref_time(71_282_485 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_107 nanoseconds. + Weight::from_ref_time(250_125_030 as u64) + // Standard Error: 88_769 + .saturating_add(Weight::from_ref_time(70_727_669 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2128,10 +2128,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 415_650 nanoseconds. - Weight::from_ref_time(415_894_000 as u64) - // Standard Error: 58_077 - .saturating_add(Weight::from_ref_time(248_416_317 as u64).saturating_mul(n as u64)) + // Minimum execution time: 319_515 nanoseconds. + Weight::from_ref_time(319_784_000 as u64) + // Standard Error: 58_896 + .saturating_add(Weight::from_ref_time(246_433_962 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2142,10 +2142,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_256(r: u32, ) -> Weight { - // Minimum execution time: 343_890 nanoseconds. - Weight::from_ref_time(345_542_924 as u64) - // Standard Error: 87_564 - .saturating_add(Weight::from_ref_time(47_361_375 as u64).saturating_mul(r as u64)) + // Minimum execution time: 247_887 nanoseconds. + Weight::from_ref_time(250_452_702 as u64) + // Standard Error: 140_887 + .saturating_add(Weight::from_ref_time(49_538_397 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2156,10 +2156,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 391_629 nanoseconds. - Weight::from_ref_time(392_793_000 as u64) - // Standard Error: 51_120 - .saturating_add(Weight::from_ref_time(99_288_467 as u64).saturating_mul(n as u64)) + // Minimum execution time: 297_534 nanoseconds. + Weight::from_ref_time(298_249_000 as u64) + // Standard Error: 49_680 + .saturating_add(Weight::from_ref_time(99_001_103 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2170,10 +2170,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_hash_blake2_128(r: u32, ) -> Weight { - // Minimum execution time: 341_701 nanoseconds. - Weight::from_ref_time(343_318_489 as u64) - // Standard Error: 103_756 - .saturating_add(Weight::from_ref_time(47_301_910 as u64).saturating_mul(r as u64)) + // Minimum execution time: 245_926 nanoseconds. + Weight::from_ref_time(248_471_834 as u64) + // Standard Error: 101_639 + .saturating_add(Weight::from_ref_time(47_889_865 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2184,10 +2184,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `n` is `[0, 1024]`. fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - // Minimum execution time: 389_898 nanoseconds. - Weight::from_ref_time(390_489_000 as u64) - // Standard Error: 52_301 - .saturating_add(Weight::from_ref_time(99_344_068 as u64).saturating_mul(n as u64)) + // Minimum execution time: 294_835 nanoseconds. + Weight::from_ref_time(296_328_000 as u64) + // Standard Error: 46_612 + .saturating_add(Weight::from_ref_time(98_859_152 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2198,10 +2198,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_recover(r: u32, ) -> Weight { - // Minimum execution time: 348_789 nanoseconds. - Weight::from_ref_time(350_451_620 as u64) - // Standard Error: 195_939 - .saturating_add(Weight::from_ref_time(2_962_830_479 as u64).saturating_mul(r as u64)) + // Minimum execution time: 251_104 nanoseconds. + Weight::from_ref_time(253_114_893 as u64) + // Standard Error: 316_740 + .saturating_add(Weight::from_ref_time(2_964_072_706 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2212,10 +2212,10 @@ impl WeightInfo for () { // Storage: System EventTopics (r:2 w:2) /// The range of component `r` is `[0, 1]`. fn seal_ecdsa_to_eth_address(r: u32, ) -> Weight { - // Minimum execution time: 346_537 nanoseconds. - Weight::from_ref_time(347_926_836 as u64) - // Standard Error: 110_557 - .saturating_add(Weight::from_ref_time(2_058_656_763 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_048 nanoseconds. + Weight::from_ref_time(251_774_991 as u64) + // Standard Error: 115_294 + .saturating_add(Weight::from_ref_time(2_094_245_208 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -2227,10 +2227,10 @@ impl WeightInfo for () { // Storage: Contracts OwnerInfoOf (r:16 w:16) /// The range of component `r` is `[0, 20]`. fn seal_set_code_hash(r: u32, ) -> Weight { - // Minimum execution time: 346_672 nanoseconds. - Weight::from_ref_time(347_628_000 as u64) - // Standard Error: 2_742_329 - .saturating_add(Weight::from_ref_time(1_370_383_386 as u64).saturating_mul(r as u64)) + // Minimum execution time: 250_830 nanoseconds. + Weight::from_ref_time(251_477_000 as u64) + // Standard Error: 2_727_998 + .saturating_add(Weight::from_ref_time(1_390_149_283 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().reads((225 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) @@ -2238,359 +2238,359 @@ impl WeightInfo for () { } /// The range of component `r` is `[0, 50]`. fn instr_i64const(r: u32, ) -> Weight { - // Minimum execution time: 118_029 nanoseconds. - Weight::from_ref_time(118_800_774 as u64) - // Standard Error: 4_234 - .saturating_add(Weight::from_ref_time(866_719 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_022 nanoseconds. + Weight::from_ref_time(69_707_657 as u64) + // Standard Error: 8_674 + .saturating_add(Weight::from_ref_time(887_555 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64load(r: u32, ) -> Weight { - // Minimum execution time: 118_145 nanoseconds. - Weight::from_ref_time(118_644_813 as u64) - // Standard Error: 2_692 - .saturating_add(Weight::from_ref_time(2_874_276 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_491 nanoseconds. + Weight::from_ref_time(70_354_670 as u64) + // Standard Error: 1_518 + .saturating_add(Weight::from_ref_time(2_758_912 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64store(r: u32, ) -> Weight { - // Minimum execution time: 118_142 nanoseconds. - Weight::from_ref_time(118_474_161 as u64) - // Standard Error: 6_997 - .saturating_add(Weight::from_ref_time(2_769_076 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_156 nanoseconds. + Weight::from_ref_time(69_917_601 as u64) + // Standard Error: 1_970 + .saturating_add(Weight::from_ref_time(2_753_174 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_select(r: u32, ) -> Weight { - // Minimum execution time: 118_020 nanoseconds. - Weight::from_ref_time(118_499_103 as u64) - // Standard Error: 1_266 - .saturating_add(Weight::from_ref_time(2_351_397 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_944 nanoseconds. + Weight::from_ref_time(69_727_961 as u64) + // Standard Error: 376 + .saturating_add(Weight::from_ref_time(2_356_996 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_if(r: u32, ) -> Weight { - // Minimum execution time: 118_008 nanoseconds. - Weight::from_ref_time(118_398_803 as u64) - // Standard Error: 750 - .saturating_add(Weight::from_ref_time(2_480_061 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_971 nanoseconds. + Weight::from_ref_time(69_755_949 as u64) + // Standard Error: 543 + .saturating_add(Weight::from_ref_time(2_489_510 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br(r: u32, ) -> Weight { - // Minimum execution time: 118_046 nanoseconds. - Weight::from_ref_time(118_332_903 as u64) - // Standard Error: 234 - .saturating_add(Weight::from_ref_time(1_426_355 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_061 nanoseconds. + Weight::from_ref_time(69_625_000 as u64) + // Standard Error: 486 + .saturating_add(Weight::from_ref_time(1_431_684 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_if(r: u32, ) -> Weight { - // Minimum execution time: 117_998 nanoseconds. - Weight::from_ref_time(117_910_870 as u64) - // Standard Error: 4_629 - .saturating_add(Weight::from_ref_time(1_992_697 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_058 nanoseconds. + Weight::from_ref_time(69_521_790 as u64) + // Standard Error: 892 + .saturating_add(Weight::from_ref_time(1_964_054 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_br_table(r: u32, ) -> Weight { - // Minimum execution time: 117_998 nanoseconds. - Weight::from_ref_time(118_059_139 as u64) - // Standard Error: 1_985 - .saturating_add(Weight::from_ref_time(2_164_252 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_020 nanoseconds. + Weight::from_ref_time(69_344_255 as u64) + // Standard Error: 1_408 + .saturating_add(Weight::from_ref_time(2_169_179 as u64).saturating_mul(r as u64)) } /// The range of component `e` is `[1, 256]`. fn instr_br_table_per_entry(e: u32, ) -> Weight { - // Minimum execution time: 121_282 nanoseconds. - Weight::from_ref_time(121_483_289 as u64) - // Standard Error: 57 - .saturating_add(Weight::from_ref_time(4_013 as u64).saturating_mul(e as u64)) + // Minimum execution time: 72_366 nanoseconds. + Weight::from_ref_time(72_869_594 as u64) + // Standard Error: 73 + .saturating_add(Weight::from_ref_time(3_867 as u64).saturating_mul(e as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call(r: u32, ) -> Weight { - // Minimum execution time: 118_128 nanoseconds. - Weight::from_ref_time(118_869_764 as u64) - // Standard Error: 8_890 - .saturating_add(Weight::from_ref_time(6_496_684 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_164 nanoseconds. + Weight::from_ref_time(70_269_099 as u64) + // Standard Error: 8_824 + .saturating_add(Weight::from_ref_time(6_594_634 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_call_indirect(r: u32, ) -> Weight { - // Minimum execution time: 131_764 nanoseconds. - Weight::from_ref_time(133_009_514 as u64) - // Standard Error: 9_697 - .saturating_add(Weight::from_ref_time(8_310_784 as u64).saturating_mul(r as u64)) + // Minimum execution time: 83_348 nanoseconds. + Weight::from_ref_time(84_968_895 as u64) + // Standard Error: 6_305 + .saturating_add(Weight::from_ref_time(8_395_193 as u64).saturating_mul(r as u64)) } /// The range of component `p` is `[0, 128]`. fn instr_call_indirect_per_param(p: u32, ) -> Weight { - // Minimum execution time: 141_100 nanoseconds. - Weight::from_ref_time(142_282_540 as u64) - // Standard Error: 867 - .saturating_add(Weight::from_ref_time(536_814 as u64).saturating_mul(p as u64)) + // Minimum execution time: 92_358 nanoseconds. + Weight::from_ref_time(93_605_536 as u64) + // Standard Error: 2_019 + .saturating_add(Weight::from_ref_time(536_495 as u64).saturating_mul(p as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_get(r: u32, ) -> Weight { - // Minimum execution time: 118_535 nanoseconds. - Weight::from_ref_time(119_017_162 as u64) - // Standard Error: 1_175 - .saturating_add(Weight::from_ref_time(911_141 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_191 nanoseconds. + Weight::from_ref_time(70_407_702 as u64) + // Standard Error: 2_812 + .saturating_add(Weight::from_ref_time(901_706 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_set(r: u32, ) -> Weight { - // Minimum execution time: 118_472 nanoseconds. - Weight::from_ref_time(118_790_737 as u64) - // Standard Error: 758 - .saturating_add(Weight::from_ref_time(962_148 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_230 nanoseconds. + Weight::from_ref_time(70_255_278 as u64) + // Standard Error: 1_284 + .saturating_add(Weight::from_ref_time(951_754 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_local_tee(r: u32, ) -> Weight { - // Minimum execution time: 118_551 nanoseconds. - Weight::from_ref_time(119_070_827 as u64) - // Standard Error: 2_411 - .saturating_add(Weight::from_ref_time(1_367_622 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_278 nanoseconds. + Weight::from_ref_time(70_089_139 as u64) + // Standard Error: 757 + .saturating_add(Weight::from_ref_time(1_369_185 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_get(r: u32, ) -> Weight { - // Minimum execution time: 120_958 nanoseconds. - Weight::from_ref_time(121_649_052 as u64) - // Standard Error: 1_557 - .saturating_add(Weight::from_ref_time(1_447_477 as u64).saturating_mul(r as u64)) + // Minimum execution time: 72_047 nanoseconds. + Weight::from_ref_time(72_783_972 as u64) + // Standard Error: 837 + .saturating_add(Weight::from_ref_time(1_471_680 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_global_set(r: u32, ) -> Weight { - // Minimum execution time: 120_866 nanoseconds. - Weight::from_ref_time(121_040_400 as u64) - // Standard Error: 6_203 - .saturating_add(Weight::from_ref_time(1_548_715 as u64).saturating_mul(r as u64)) + // Minimum execution time: 71_960 nanoseconds. + Weight::from_ref_time(72_745_981 as u64) + // Standard Error: 1_086 + .saturating_add(Weight::from_ref_time(1_537_741 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_memory_current(r: u32, ) -> Weight { - // Minimum execution time: 118_076 nanoseconds. - Weight::from_ref_time(118_381_010 as u64) - // Standard Error: 1_814 - .saturating_add(Weight::from_ref_time(938_699 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_221 nanoseconds. + Weight::from_ref_time(70_010_862 as u64) + // Standard Error: 1_845 + .saturating_add(Weight::from_ref_time(933_738 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 1]`. fn instr_memory_grow(r: u32, ) -> Weight { - // Minimum execution time: 118_030 nanoseconds. - Weight::from_ref_time(120_331_261 as u64) - // Standard Error: 333_364 - .saturating_add(Weight::from_ref_time(227_636_638 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_081 nanoseconds. + Weight::from_ref_time(71_015_495 as u64) + // Standard Error: 27_078 + .saturating_add(Weight::from_ref_time(183_899_704 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64clz(r: u32, ) -> Weight { - // Minimum execution time: 118_056 nanoseconds. - Weight::from_ref_time(118_554_799 as u64) - // Standard Error: 1_269 - .saturating_add(Weight::from_ref_time(1_331_717 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_589 nanoseconds. + Weight::from_ref_time(70_175_537 as u64) + // Standard Error: 1_355 + .saturating_add(Weight::from_ref_time(1_323_745 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ctz(r: u32, ) -> Weight { - // Minimum execution time: 118_009 nanoseconds. - Weight::from_ref_time(118_678_537 as u64) - // Standard Error: 1_699 - .saturating_add(Weight::from_ref_time(1_328_153 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_083 nanoseconds. + Weight::from_ref_time(69_832_339 as u64) + // Standard Error: 818 + .saturating_add(Weight::from_ref_time(1_334_198 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64popcnt(r: u32, ) -> Weight { - // Minimum execution time: 118_031 nanoseconds. - Weight::from_ref_time(118_478_616 as u64) - // Standard Error: 1_533 - .saturating_add(Weight::from_ref_time(1_335_254 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_084 nanoseconds. + Weight::from_ref_time(69_802_701 as u64) + // Standard Error: 744 + .saturating_add(Weight::from_ref_time(1_334_601 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eqz(r: u32, ) -> Weight { - // Minimum execution time: 117_910 nanoseconds. - Weight::from_ref_time(118_471_672 as u64) - // Standard Error: 1_275 - .saturating_add(Weight::from_ref_time(1_345_140 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_052 nanoseconds. + Weight::from_ref_time(69_717_748 as u64) + // Standard Error: 571 + .saturating_add(Weight::from_ref_time(1_346_564 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendsi32(r: u32, ) -> Weight { - // Minimum execution time: 119_504 nanoseconds. - Weight::from_ref_time(118_940_207 as u64) - // Standard Error: 2_512 - .saturating_add(Weight::from_ref_time(1_307_954 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_016 nanoseconds. + Weight::from_ref_time(69_793_413 as u64) + // Standard Error: 769 + .saturating_add(Weight::from_ref_time(1_317_502 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64extendui32(r: u32, ) -> Weight { - // Minimum execution time: 118_009 nanoseconds. - Weight::from_ref_time(118_366_251 as u64) - // Standard Error: 1_116 - .saturating_add(Weight::from_ref_time(1_322_232 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_043 nanoseconds. + Weight::from_ref_time(69_963_419 as u64) + // Standard Error: 1_117 + .saturating_add(Weight::from_ref_time(1_313_727 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i32wrapi64(r: u32, ) -> Weight { - // Minimum execution time: 118_020 nanoseconds. - Weight::from_ref_time(118_222_792 as u64) - // Standard Error: 1_973 - .saturating_add(Weight::from_ref_time(1_336_896 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_032 nanoseconds. + Weight::from_ref_time(69_727_577 as u64) + // Standard Error: 662 + .saturating_add(Weight::from_ref_time(1_331_088 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64eq(r: u32, ) -> Weight { - // Minimum execution time: 118_038 nanoseconds. - Weight::from_ref_time(118_757_016 as u64) - // Standard Error: 7_932 - .saturating_add(Weight::from_ref_time(1_867_807 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_097 nanoseconds. + Weight::from_ref_time(69_767_650 as u64) + // Standard Error: 2_056 + .saturating_add(Weight::from_ref_time(1_875_021 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ne(r: u32, ) -> Weight { - // Minimum execution time: 118_041 nanoseconds. - Weight::from_ref_time(118_717_403 as u64) - // Standard Error: 8_010 - .saturating_add(Weight::from_ref_time(1_868_876 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_153 nanoseconds. + Weight::from_ref_time(69_906_946 as u64) + // Standard Error: 1_060 + .saturating_add(Weight::from_ref_time(1_867_154 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64lts(r: u32, ) -> Weight { - // Minimum execution time: 118_057 nanoseconds. - Weight::from_ref_time(118_560_106 as u64) - // Standard Error: 1_362 - .saturating_add(Weight::from_ref_time(1_869_820 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_380 nanoseconds. + Weight::from_ref_time(69_867_328 as u64) + // Standard Error: 778 + .saturating_add(Weight::from_ref_time(1_869_718 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ltu(r: u32, ) -> Weight { - // Minimum execution time: 118_045 nanoseconds. - Weight::from_ref_time(118_318_194 as u64) - // Standard Error: 179 - .saturating_add(Weight::from_ref_time(1_873_670 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_259 nanoseconds. + Weight::from_ref_time(69_695_407 as u64) + // Standard Error: 746 + .saturating_add(Weight::from_ref_time(1_874_772 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gts(r: u32, ) -> Weight { - // Minimum execution time: 118_000 nanoseconds. - Weight::from_ref_time(118_520_606 as u64) - // Standard Error: 3_656 - .saturating_add(Weight::from_ref_time(1_870_762 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_986 nanoseconds. + Weight::from_ref_time(70_027_081 as u64) + // Standard Error: 1_401 + .saturating_add(Weight::from_ref_time(1_862_971 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64gtu(r: u32, ) -> Weight { - // Minimum execution time: 118_035 nanoseconds. - Weight::from_ref_time(118_464_151 as u64) - // Standard Error: 1_073 - .saturating_add(Weight::from_ref_time(1_870_309 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_953 nanoseconds. + Weight::from_ref_time(69_798_073 as u64) + // Standard Error: 1_000 + .saturating_add(Weight::from_ref_time(1_871_888 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64les(r: u32, ) -> Weight { - // Minimum execution time: 118_071 nanoseconds. - Weight::from_ref_time(118_470_365 as u64) - // Standard Error: 1_224 - .saturating_add(Weight::from_ref_time(1_870_522 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_909 nanoseconds. + Weight::from_ref_time(69_845_981 as u64) + // Standard Error: 775 + .saturating_add(Weight::from_ref_time(1_868_722 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64leu(r: u32, ) -> Weight { - // Minimum execution time: 118_032 nanoseconds. - Weight::from_ref_time(118_407_669 as u64) - // Standard Error: 769 - .saturating_add(Weight::from_ref_time(1_883_635 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_986 nanoseconds. + Weight::from_ref_time(69_683_189 as u64) + // Standard Error: 503 + .saturating_add(Weight::from_ref_time(1_884_715 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64ges(r: u32, ) -> Weight { - // Minimum execution time: 118_044 nanoseconds. - Weight::from_ref_time(118_324_106 as u64) - // Standard Error: 523 - .saturating_add(Weight::from_ref_time(1_874_728 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_230 nanoseconds. + Weight::from_ref_time(69_765_336 as u64) + // Standard Error: 2_060 + .saturating_add(Weight::from_ref_time(1_871_848 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64geu(r: u32, ) -> Weight { - // Minimum execution time: 118_111 nanoseconds. - Weight::from_ref_time(118_538_748 as u64) - // Standard Error: 3_498 - .saturating_add(Weight::from_ref_time(1_868_932 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_953 nanoseconds. + Weight::from_ref_time(69_828_265 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(1_868_596 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64add(r: u32, ) -> Weight { - // Minimum execution time: 118_060 nanoseconds. - Weight::from_ref_time(118_421_175 as u64) - // Standard Error: 962 - .saturating_add(Weight::from_ref_time(1_850_713 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_078 nanoseconds. + Weight::from_ref_time(69_832_768 as u64) + // Standard Error: 894 + .saturating_add(Weight::from_ref_time(1_845_786 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64sub(r: u32, ) -> Weight { - // Minimum execution time: 118_050 nanoseconds. - Weight::from_ref_time(118_774_937 as u64) - // Standard Error: 1_717 - .saturating_add(Weight::from_ref_time(1_838_127 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_939 nanoseconds. + Weight::from_ref_time(69_676_256 as u64) + // Standard Error: 374 + .saturating_add(Weight::from_ref_time(1_851_026 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64mul(r: u32, ) -> Weight { - // Minimum execution time: 117_997 nanoseconds. - Weight::from_ref_time(118_626_785 as u64) - // Standard Error: 1_382 - .saturating_add(Weight::from_ref_time(1_842_530 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_096 nanoseconds. + Weight::from_ref_time(69_914_159 as u64) + // Standard Error: 1_265 + .saturating_add(Weight::from_ref_time(1_844_489 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divs(r: u32, ) -> Weight { - // Minimum execution time: 118_064 nanoseconds. - Weight::from_ref_time(118_404_203 as u64) - // Standard Error: 2_437 - .saturating_add(Weight::from_ref_time(2_489_569 as u64).saturating_mul(r as u64)) + // Minimum execution time: 68_939 nanoseconds. + Weight::from_ref_time(69_641_768 as u64) + // Standard Error: 347 + .saturating_add(Weight::from_ref_time(2_488_628 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64divu(r: u32, ) -> Weight { - // Minimum execution time: 117_949 nanoseconds. - Weight::from_ref_time(118_525_228 as u64) - // Standard Error: 2_231 - .saturating_add(Weight::from_ref_time(2_453_863 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_114 nanoseconds. + Weight::from_ref_time(69_844_395 as u64) + // Standard Error: 1_489 + .saturating_add(Weight::from_ref_time(2_456_310 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rems(r: u32, ) -> Weight { - // Minimum execution time: 118_032 nanoseconds. - Weight::from_ref_time(118_311_431 as u64) - // Standard Error: 175 - .saturating_add(Weight::from_ref_time(2_532_745 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_082 nanoseconds. + Weight::from_ref_time(69_993_662 as u64) + // Standard Error: 1_218 + .saturating_add(Weight::from_ref_time(2_524_010 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64remu(r: u32, ) -> Weight { - // Minimum execution time: 117_987 nanoseconds. - Weight::from_ref_time(118_573_115 as u64) - // Standard Error: 1_814 - .saturating_add(Weight::from_ref_time(2_438_519 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_036 nanoseconds. + Weight::from_ref_time(70_095_304 as u64) + // Standard Error: 1_473 + .saturating_add(Weight::from_ref_time(2_429_659 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64and(r: u32, ) -> Weight { - // Minimum execution time: 120_001 nanoseconds. - Weight::from_ref_time(118_527_518 as u64) - // Standard Error: 1_040 - .saturating_add(Weight::from_ref_time(1_874_359 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_229 nanoseconds. + Weight::from_ref_time(69_759_818 as u64) + // Standard Error: 573 + .saturating_add(Weight::from_ref_time(1_879_670 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64or(r: u32, ) -> Weight { - // Minimum execution time: 118_051 nanoseconds. - Weight::from_ref_time(118_315_649 as u64) - // Standard Error: 163 - .saturating_add(Weight::from_ref_time(1_851_162 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_151 nanoseconds. + Weight::from_ref_time(69_865_948 as u64) + // Standard Error: 721 + .saturating_add(Weight::from_ref_time(1_846_734 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64xor(r: u32, ) -> Weight { - // Minimum execution time: 118_038 nanoseconds. - Weight::from_ref_time(118_558_677 as u64) - // Standard Error: 2_258 - .saturating_add(Weight::from_ref_time(1_845_713 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_120 nanoseconds. + Weight::from_ref_time(70_135_849 as u64) + // Standard Error: 3_443 + .saturating_add(Weight::from_ref_time(1_841_784 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shl(r: u32, ) -> Weight { - // Minimum execution time: 117_985 nanoseconds. - Weight::from_ref_time(118_660_597 as u64) - // Standard Error: 5_893 - .saturating_add(Weight::from_ref_time(1_887_423 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_077 nanoseconds. + Weight::from_ref_time(69_929_746 as u64) + // Standard Error: 821 + .saturating_add(Weight::from_ref_time(1_866_348 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shrs(r: u32, ) -> Weight { - // Minimum execution time: 120_101 nanoseconds. - Weight::from_ref_time(118_522_424 as u64) - // Standard Error: 1_094 - .saturating_add(Weight::from_ref_time(1_867_495 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_226 nanoseconds. + Weight::from_ref_time(69_725_630 as u64) + // Standard Error: 891 + .saturating_add(Weight::from_ref_time(1_873_637 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64shru(r: u32, ) -> Weight { - // Minimum execution time: 118_068 nanoseconds. - Weight::from_ref_time(119_012_941 as u64) - // Standard Error: 9_129 - .saturating_add(Weight::from_ref_time(1_877_850 as u64).saturating_mul(r as u64)) + // Minimum execution time: 70_591 nanoseconds. + Weight::from_ref_time(69_939_773 as u64) + // Standard Error: 960 + .saturating_add(Weight::from_ref_time(1_867_208 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotl(r: u32, ) -> Weight { - // Minimum execution time: 120_105 nanoseconds. - Weight::from_ref_time(118_559_474 as u64) - // Standard Error: 2_055 - .saturating_add(Weight::from_ref_time(1_868_662 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_187 nanoseconds. + Weight::from_ref_time(69_845_516 as u64) + // Standard Error: 781 + .saturating_add(Weight::from_ref_time(1_869_613 as u64).saturating_mul(r as u64)) } /// The range of component `r` is `[0, 50]`. fn instr_i64rotr(r: u32, ) -> Weight { - // Minimum execution time: 118_004 nanoseconds. - Weight::from_ref_time(118_370_466 as u64) - // Standard Error: 818 - .saturating_add(Weight::from_ref_time(1_873_459 as u64).saturating_mul(r as u64)) + // Minimum execution time: 69_065 nanoseconds. + Weight::from_ref_time(69_950_430 as u64) + // Standard Error: 986 + .saturating_add(Weight::from_ref_time(1_867_001 as u64).saturating_mul(r as u64)) } } diff --git a/frame/conviction-voting/src/weights.rs b/frame/conviction-voting/src/weights.rs index 6428d82c94c39..e50842449f88a 100644 --- a/frame/conviction-voting/src/weights.rs +++ b/frame/conviction-voting/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_conviction_voting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/conviction-voting/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -62,7 +65,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - Weight::from_ref_time(148_804_000 as u64) + // Minimum execution time: 131_633 nanoseconds. + Weight::from_ref_time(132_742_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -72,7 +76,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - Weight::from_ref_time(313_333_000 as u64) + // Minimum execution time: 176_240 nanoseconds. + Weight::from_ref_time(183_274_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -80,14 +85,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - Weight::from_ref_time(300_591_000 as u64) + // Minimum execution time: 158_880 nanoseconds. + Weight::from_ref_time(164_648_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - Weight::from_ref_time(53_887_000 as u64) + // Minimum execution time: 60_330 nanoseconds. + Weight::from_ref_time(61_588_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -96,10 +103,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) + /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(51_518_000 as u64) - // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 63_088 nanoseconds. + Weight::from_ref_time(67_803_536 as u64) + // Standard Error: 197_102 + .saturating_add(Weight::from_ref_time(31_557_563 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -108,10 +117,12 @@ impl WeightInfo for SubstrateWeight { // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) + /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_885_000 as u64) - // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 45_150 nanoseconds. + Weight::from_ref_time(51_547_530 as u64) + // Standard Error: 771_127 + .saturating_add(Weight::from_ref_time(26_927_969 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -121,7 +132,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - Weight::from_ref_time(67_703_000 as u64) + // Minimum execution time: 75_067 nanoseconds. + Weight::from_ref_time(76_888_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -135,7 +147,8 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_new() -> Weight { - Weight::from_ref_time(148_804_000 as u64) + // Minimum execution time: 131_633 nanoseconds. + Weight::from_ref_time(132_742_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -145,7 +158,8 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote_existing() -> Weight { - Weight::from_ref_time(313_333_000 as u64) + // Minimum execution time: 176_240 nanoseconds. + Weight::from_ref_time(183_274_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -153,14 +167,16 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn remove_vote() -> Weight { - Weight::from_ref_time(300_591_000 as u64) + // Minimum execution time: 158_880 nanoseconds. + Weight::from_ref_time(164_648_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: ConvictionVoting VotingFor (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:0) fn remove_other_vote() -> Weight { - Weight::from_ref_time(53_887_000 as u64) + // Minimum execution time: 60_330 nanoseconds. + Weight::from_ref_time(61_588_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -169,10 +185,12 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) + /// The range of component `r` is `[0, 1]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(51_518_000 as u64) - // Standard Error: 83_000 - .saturating_add(Weight::from_ref_time(27_235_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 63_088 nanoseconds. + Weight::from_ref_time(67_803_536 as u64) + // Standard Error: 197_102 + .saturating_add(Weight::from_ref_time(31_557_563 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -181,10 +199,12 @@ impl WeightInfo for () { // Storage: ConvictionVoting VotingFor (r:2 w:2) // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) + /// The range of component `r` is `[0, 1]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_885_000 as u64) - // Standard Error: 75_000 - .saturating_add(Weight::from_ref_time(24_395_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 45_150 nanoseconds. + Weight::from_ref_time(51_547_530 as u64) + // Standard Error: 771_127 + .saturating_add(Weight::from_ref_time(26_927_969 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) @@ -194,7 +214,8 @@ impl WeightInfo for () { // Storage: ConvictionVoting ClassLocksFor (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn unlock() -> Weight { - Weight::from_ref_time(67_703_000 as u64) + // Minimum execution time: 75_067 nanoseconds. + Weight::from_ref_time(76_888_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 0a3b717938022..db3969d400b97 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_democracy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_democracy -// --chain=dev // --output=./frame/democracy/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -78,13 +79,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(57_410_000 as u64) + // Minimum execution time: 56_868 nanoseconds. + Weight::from_ref_time(57_788_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) fn second() -> Weight { - Weight::from_ref_time(49_224_000 as u64) + // Minimum execution time: 49_328 nanoseconds. + Weight::from_ref_time(49_764_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -92,7 +95,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new() -> Weight { - Weight::from_ref_time(60_933_000 as u64) + // Minimum execution time: 60_323 nanoseconds. + Weight::from_ref_time(61_389_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -100,14 +104,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing() -> Weight { - Weight::from_ref_time(60_393_000 as u64) + // Minimum execution time: 60_612 nanoseconds. + Weight::from_ref_time(61_282_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(24_588_000 as u64) + // Minimum execution time: 24_780 nanoseconds. + Weight::from_ref_time(25_194_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -118,39 +124,45 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) fn blacklist() -> Weight { - Weight::from_ref_time(91_226_000 as u64) + // Minimum execution time: 85_177 nanoseconds. + Weight::from_ref_time(91_733_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose() -> Weight { - Weight::from_ref_time(18_898_000 as u64) + // Minimum execution time: 19_483 nanoseconds. + Weight::from_ref_time(19_914_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(5_136_000 as u64) + // Minimum execution time: 4_963 nanoseconds. + Weight::from_ref_time(5_250_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(5_243_000 as u64) + // Minimum execution time: 5_075 nanoseconds. + Weight::from_ref_time(5_187_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(24_275_000 as u64) + // Minimum execution time: 23_956 nanoseconds. + Weight::from_ref_time(24_814_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external() -> Weight { - Weight::from_ref_time(30_988_000 as u64) + // Minimum execution time: 31_472 nanoseconds. + Weight::from_ref_time(31_770_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -158,13 +170,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal() -> Weight { - Weight::from_ref_time(78_515_000 as u64) + // Minimum execution time: 73_811 nanoseconds. + Weight::from_ref_time(78_943_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(16_155_000 as u64) + // Minimum execution time: 16_074 nanoseconds. + Weight::from_ref_time(16_409_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) @@ -172,9 +186,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:2 w:0) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(7_007_000 as u64) - // Standard Error: 2_686 - .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) + // Minimum execution time: 7_430 nanoseconds. + Weight::from_ref_time(12_086_064 as u64) + // Standard Error: 3_474 + .saturating_add(Weight::from_ref_time(2_283_457 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -187,9 +202,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:2 w:0) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(9_528_000 as u64) - // Standard Error: 2_521 - .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) + // Minimum execution time: 9_882 nanoseconds. + Weight::from_ref_time(14_566_711 as u64) + // Standard Error: 3_354 + .saturating_add(Weight::from_ref_time(2_282_038 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -199,9 +215,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:2 w:2) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(46_787_000 as u64) - // Standard Error: 2_943 - .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) + // Minimum execution time: 48_840 nanoseconds. + Weight::from_ref_time(56_403_092 as u64) + // Standard Error: 6_093 + .saturating_add(Weight::from_ref_time(3_344_243 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -211,9 +228,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:2 w:2) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(29_789_000 as u64) - // Standard Error: 2_324 - .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) + // Minimum execution time: 30_483 nanoseconds. + Weight::from_ref_time(32_035_405 as u64) + // Standard Error: 4_383 + .saturating_add(Weight::from_ref_time(3_347_667 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -221,7 +239,8 @@ impl WeightInfo for SubstrateWeight { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(6_519_000 as u64) + // Minimum execution time: 6_421 nanoseconds. + Weight::from_ref_time(6_638_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) @@ -229,9 +248,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(28_884_000 as u64) - // Standard Error: 2_631 - .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) + // Minimum execution time: 30_291 nanoseconds. + Weight::from_ref_time(37_071_950 as u64) + // Standard Error: 1_619 + .saturating_add(Weight::from_ref_time(59_302 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -240,9 +260,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(33_498_000 as u64) - // Standard Error: 622 - .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) + // Minimum execution time: 34_888 nanoseconds. + Weight::from_ref_time(36_418_789 as u64) + // Standard Error: 906 + .saturating_add(Weight::from_ref_time(109_602 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -250,9 +271,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(18_201_000 as u64) - // Standard Error: 1_007 - .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) + // Minimum execution time: 18_739 nanoseconds. + Weight::from_ref_time(21_004_077 as u64) + // Standard Error: 1_075 + .saturating_add(Weight::from_ref_time(116_457 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -260,9 +282,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(18_455_000 as u64) - // Standard Error: 951 - .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) + // Minimum execution time: 18_514 nanoseconds. + Weight::from_ref_time(21_030_667 as u64) + // Standard Error: 1_102 + .saturating_add(Weight::from_ref_time(118_039 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -275,13 +298,15 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(57_410_000 as u64) + // Minimum execution time: 56_868 nanoseconds. + Weight::from_ref_time(57_788_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) fn second() -> Weight { - Weight::from_ref_time(49_224_000 as u64) + // Minimum execution time: 49_328 nanoseconds. + Weight::from_ref_time(49_764_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -289,7 +314,8 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new() -> Weight { - Weight::from_ref_time(60_933_000 as u64) + // Minimum execution time: 60_323 nanoseconds. + Weight::from_ref_time(61_389_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -297,14 +323,16 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing() -> Weight { - Weight::from_ref_time(60_393_000 as u64) + // Minimum execution time: 60_612 nanoseconds. + Weight::from_ref_time(61_282_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(24_588_000 as u64) + // Minimum execution time: 24_780 nanoseconds. + Weight::from_ref_time(25_194_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -315,39 +343,45 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) fn blacklist() -> Weight { - Weight::from_ref_time(91_226_000 as u64) + // Minimum execution time: 85_177 nanoseconds. + Weight::from_ref_time(91_733_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose() -> Weight { - Weight::from_ref_time(18_898_000 as u64) + // Minimum execution time: 19_483 nanoseconds. + Weight::from_ref_time(19_914_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(5_136_000 as u64) + // Minimum execution time: 4_963 nanoseconds. + Weight::from_ref_time(5_250_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(5_243_000 as u64) + // Minimum execution time: 5_075 nanoseconds. + Weight::from_ref_time(5_187_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(24_275_000 as u64) + // Minimum execution time: 23_956 nanoseconds. + Weight::from_ref_time(24_814_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external() -> Weight { - Weight::from_ref_time(30_988_000 as u64) + // Minimum execution time: 31_472 nanoseconds. + Weight::from_ref_time(31_770_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -355,13 +389,15 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal() -> Weight { - Weight::from_ref_time(78_515_000 as u64) + // Minimum execution time: 73_811 nanoseconds. + Weight::from_ref_time(78_943_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(16_155_000 as u64) + // Minimum execution time: 16_074 nanoseconds. + Weight::from_ref_time(16_409_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy LowestUnbaked (r:1 w:1) @@ -369,9 +405,10 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:2 w:0) /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(7_007_000 as u64) - // Standard Error: 2_686 - .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) + // Minimum execution time: 7_430 nanoseconds. + Weight::from_ref_time(12_086_064 as u64) + // Standard Error: 3_474 + .saturating_add(Weight::from_ref_time(2_283_457 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -384,9 +421,10 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:2 w:0) /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(9_528_000 as u64) - // Standard Error: 2_521 - .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) + // Minimum execution time: 9_882 nanoseconds. + Weight::from_ref_time(14_566_711 as u64) + // Standard Error: 3_354 + .saturating_add(Weight::from_ref_time(2_282_038 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -396,9 +434,10 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:2 w:2) /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(46_787_000 as u64) - // Standard Error: 2_943 - .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) + // Minimum execution time: 48_840 nanoseconds. + Weight::from_ref_time(56_403_092 as u64) + // Standard Error: 6_093 + .saturating_add(Weight::from_ref_time(3_344_243 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -408,9 +447,10 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:2 w:2) /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(29_789_000 as u64) - // Standard Error: 2_324 - .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) + // Minimum execution time: 30_483 nanoseconds. + Weight::from_ref_time(32_035_405 as u64) + // Standard Error: 4_383 + .saturating_add(Weight::from_ref_time(3_347_667 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) @@ -418,7 +458,8 @@ impl WeightInfo for () { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(6_519_000 as u64) + // Minimum execution time: 6_421 nanoseconds. + Weight::from_ref_time(6_638_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) @@ -426,9 +467,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(28_884_000 as u64) - // Standard Error: 2_631 - .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) + // Minimum execution time: 30_291 nanoseconds. + Weight::from_ref_time(37_071_950 as u64) + // Standard Error: 1_619 + .saturating_add(Weight::from_ref_time(59_302 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -437,9 +479,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(33_498_000 as u64) - // Standard Error: 622 - .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) + // Minimum execution time: 34_888 nanoseconds. + Weight::from_ref_time(36_418_789 as u64) + // Standard Error: 906 + .saturating_add(Weight::from_ref_time(109_602 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -447,9 +490,10 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(18_201_000 as u64) - // Standard Error: 1_007 - .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) + // Minimum execution time: 18_739 nanoseconds. + Weight::from_ref_time(21_004_077 as u64) + // Standard Error: 1_075 + .saturating_add(Weight::from_ref_time(116_457 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -457,9 +501,10 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(18_455_000 as u64) - // Standard Error: 951 - .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) + // Minimum execution time: 18_514 nanoseconds. + Weight::from_ref_time(21_030_667 as u64) + // Standard Error: 1_102 + .saturating_add(Weight::from_ref_time(118_039 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 5e5191242bd77..221fd5837f7b7 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/election-provider-multi-phase/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -68,45 +71,51 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - Weight::from_ref_time(13_495_000 as u64) + // Minimum execution time: 17_309 nanoseconds. + Weight::from_ref_time(17_646_000 as u64) .saturating_add(T::DbWeight::get().reads(8 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - Weight::from_ref_time(14_114_000 as u64) + // Minimum execution time: 17_992 nanoseconds. + Weight::from_ref_time(18_426_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - Weight::from_ref_time(13_756_000 as u64) + // Minimum execution time: 17_340 nanoseconds. + Weight::from_ref_time(17_881_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - Weight::from_ref_time(28_467_000 as u64) + // Minimum execution time: 35_571 nanoseconds. + Weight::from_ref_time(35_989_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - Weight::from_ref_time(21_991_000 as u64) + // Minimum execution time: 27_403 nanoseconds. + Weight::from_ref_time(27_879_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - Weight::from_ref_time(3_186_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as u64).saturating_mul(v as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as u64).saturating_mul(t as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { + // Minimum execution time: 571_900 nanoseconds. + Weight::from_ref_time(589_170_000 as u64) + // Standard Error: 7_123 + .saturating_add(Weight::from_ref_time(384_767 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -118,12 +127,15 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(137_653_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as u64).saturating_mul(a as u64)) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(d as u64)) + // Minimum execution time: 1_296_481 nanoseconds. + Weight::from_ref_time(1_076_121_575 as u64) + // Standard Error: 5_708 + .saturating_add(Weight::from_ref_time(474_995 as u64).saturating_mul(a as u64)) + // Standard Error: 8_556 + .saturating_add(Weight::from_ref_time(39_224 as u64).saturating_mul(d as u64)) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } @@ -134,7 +146,8 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(49_313_000 as u64) + // Minimum execution time: 58_716 nanoseconds. + Weight::from_ref_time(59_480_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -145,16 +158,17 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(v as u64)) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(t as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as u64).saturating_mul(a as u64)) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(d as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. + fn submit_unsigned(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { + // Minimum execution time: 5_540_737 nanoseconds. + Weight::from_ref_time(5_567_381_000 as u64) + // Standard Error: 18_563 + .saturating_add(Weight::from_ref_time(603_280 as u64).saturating_mul(v as u64)) + // Standard Error: 55_009 + .saturating_add(Weight::from_ref_time(3_164_053 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -162,16 +176,17 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as u64).saturating_mul(v as u64)) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as u64).saturating_mul(a as u64)) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as u64).saturating_mul(d as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. + fn feasibility_check(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { + // Minimum execution time: 5_021_808 nanoseconds. + Weight::from_ref_time(5_051_856_000 as u64) + // Standard Error: 16_650 + .saturating_add(Weight::from_ref_time(683_344 as u64).saturating_mul(v as u64)) + // Standard Error: 49_342 + .saturating_add(Weight::from_ref_time(2_190_098 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) } } @@ -187,45 +202,51 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - Weight::from_ref_time(13_495_000 as u64) + // Minimum execution time: 17_309 nanoseconds. + Weight::from_ref_time(17_646_000 as u64) .saturating_add(RocksDbWeight::get().reads(8 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - Weight::from_ref_time(14_114_000 as u64) + // Minimum execution time: 17_992 nanoseconds. + Weight::from_ref_time(18_426_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - Weight::from_ref_time(13_756_000 as u64) + // Minimum execution time: 17_340 nanoseconds. + Weight::from_ref_time(17_881_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - Weight::from_ref_time(28_467_000 as u64) + // Minimum execution time: 35_571 nanoseconds. + Weight::from_ref_time(35_989_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - Weight::from_ref_time(21_991_000 as u64) + // Minimum execution time: 27_403 nanoseconds. + Weight::from_ref_time(27_879_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { - Weight::from_ref_time(3_186_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(202_000 as u64).saturating_mul(v as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(60_000 as u64).saturating_mul(t as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + fn create_snapshot_internal(v: u32, _t: u32, ) -> Weight { + // Minimum execution time: 571_900 nanoseconds. + Weight::from_ref_time(589_170_000 as u64) + // Standard Error: 7_123 + .saturating_add(Weight::from_ref_time(384_767 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -237,12 +258,15 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. fn elect_queued(a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(137_653_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(640_000 as u64).saturating_mul(a as u64)) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(48_000 as u64).saturating_mul(d as u64)) + // Minimum execution time: 1_296_481 nanoseconds. + Weight::from_ref_time(1_076_121_575 as u64) + // Standard Error: 5_708 + .saturating_add(Weight::from_ref_time(474_995 as u64).saturating_mul(a as u64)) + // Standard Error: 8_556 + .saturating_add(Weight::from_ref_time(39_224 as u64).saturating_mul(d as u64)) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(8 as u64)) } @@ -253,7 +277,8 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(49_313_000 as u64) + // Minimum execution time: 58_716 nanoseconds. + Weight::from_ref_time(59_480_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -264,16 +289,17 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(v as u64)) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(107_000 as u64).saturating_mul(t as u64)) - // Standard Error: 12_000 - .saturating_add(Weight::from_ref_time(6_907_000 as u64).saturating_mul(a as u64)) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(1_427_000 as u64).saturating_mul(d as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. + fn submit_unsigned(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { + // Minimum execution time: 5_540_737 nanoseconds. + Weight::from_ref_time(5_567_381_000 as u64) + // Standard Error: 18_563 + .saturating_add(Weight::from_ref_time(603_280 as u64).saturating_mul(v as u64)) + // Standard Error: 55_009 + .saturating_add(Weight::from_ref_time(3_164_053 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -281,16 +307,17 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(844_000 as u64).saturating_mul(v as u64)) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(t as u64)) - // Standard Error: 8_000 - .saturating_add(Weight::from_ref_time(5_421_000 as u64).saturating_mul(a as u64)) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(1_167_000 as u64).saturating_mul(d as u64)) + /// The range of component `v` is `[1000, 2000]`. + /// The range of component `t` is `[500, 1000]`. + /// The range of component `a` is `[500, 800]`. + /// The range of component `d` is `[200, 400]`. + fn feasibility_check(v: u32, _t: u32, a: u32, _d: u32, ) -> Weight { + // Minimum execution time: 5_021_808 nanoseconds. + Weight::from_ref_time(5_051_856_000 as u64) + // Standard Error: 16_650 + .saturating_add(Weight::from_ref_time(683_344 as u64).saturating_mul(v as u64)) + // Standard Error: 49_342 + .saturating_add(Weight::from_ref_time(2_190_098 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) } } diff --git a/frame/elections-phragmen/src/weights.rs b/frame/elections-phragmen/src/weights.rs index 52a5dedc723d4..ddc55b08750d5 100644 --- a/frame/elections-phragmen/src/weights.rs +++ b/frame/elections-phragmen/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_elections_phragmen //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-08-08, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_elections_phragmen // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_elections_phragmen -// --chain=dev // --output=./frame/elections-phragmen/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -70,9 +71,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - Weight::from_ref_time(27_011_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 38_496 nanoseconds. + Weight::from_ref_time(39_424_348 as u64) + // Standard Error: 3_547 + .saturating_add(Weight::from_ref_time(119_971 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -83,9 +85,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - Weight::from_ref_time(40_240_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 49_459 nanoseconds. + Weight::from_ref_time(50_225_486 as u64) + // Standard Error: 3_160 + .saturating_add(Weight::from_ref_time(170_360 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -96,16 +99,18 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - Weight::from_ref_time(40_394_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 48_712 nanoseconds. + Weight::from_ref_time(49_463_298 as u64) + // Standard Error: 2_678 + .saturating_add(Weight::from_ref_time(231_771 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - Weight::from_ref_time(37_651_000 as u64) + // Minimum execution time: 48_359 nanoseconds. + Weight::from_ref_time(48_767_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -114,18 +119,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - Weight::from_ref_time(42_217_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 43_369 nanoseconds. + Weight::from_ref_time(49_587_113 as u64) + // Standard Error: 1_008 + .saturating_add(Weight::from_ref_time(77_752 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - Weight::from_ref_time(46_459_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 41_321 nanoseconds. + Weight::from_ref_time(50_803_289 as u64) + // Standard Error: 1_159 + .saturating_add(Weight::from_ref_time(57_239 as u64).saturating_mul(c as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -135,18 +142,21 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - Weight::from_ref_time(45_189_000 as u64) + // Minimum execution time: 53_542 nanoseconds. + Weight::from_ref_time(54_481_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - Weight::from_ref_time(34_516_000 as u64) + // Minimum execution time: 41_825 nanoseconds. + Weight::from_ref_time(42_248_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { + // Minimum execution time: 2_000_000_000 nanoseconds. Weight::from_ref_time(2_000_000_000_000 as u64) } // Storage: Elections Members (r:1 w:1) @@ -156,7 +166,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - Weight::from_ref_time(51_838_000 as u64) + // Minimum execution time: 62_600 nanoseconds. + Weight::from_ref_time(63_152_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -167,11 +178,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:5000 w:5000) // Storage: System Account (r:5000 w:5000) /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[1, 5000]`. + /// The range of component `d` is `[0, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 297_149_264 nanoseconds. + Weight::from_ref_time(297_898_499_000 as u64) + // Standard Error: 263_819 + .saturating_add(Weight::from_ref_time(37_914_985 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(v as u64))) .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) @@ -189,14 +201,16 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as u64).saturating_mul(v as u64)) - // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as u64).saturating_mul(e as u64)) + // Minimum execution time: 22_034_317 nanoseconds. + Weight::from_ref_time(22_110_020_000 as u64) + // Standard Error: 235_528 + .saturating_add(Weight::from_ref_time(25_553_585 as u64).saturating_mul(v as u64)) + // Standard Error: 15_114 + .saturating_add(Weight::from_ref_time(1_032_330 as u64).saturating_mul(e as u64)) .saturating_add(T::DbWeight::get().reads(280 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(c as u64))) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().writes(6 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } } @@ -210,9 +224,10 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[1, 16]`. fn vote_equal(v: u32, ) -> Weight { - Weight::from_ref_time(27_011_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(214_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 38_496 nanoseconds. + Weight::from_ref_time(39_424_348 as u64) + // Standard Error: 3_547 + .saturating_add(Weight::from_ref_time(119_971 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -223,9 +238,10 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_more(v: u32, ) -> Weight { - Weight::from_ref_time(40_240_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(244_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 49_459 nanoseconds. + Weight::from_ref_time(50_225_486 as u64) + // Standard Error: 3_160 + .saturating_add(Weight::from_ref_time(170_360 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -236,16 +252,18 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) /// The range of component `v` is `[2, 16]`. fn vote_less(v: u32, ) -> Weight { - Weight::from_ref_time(40_394_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(217_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 48_712 nanoseconds. + Weight::from_ref_time(49_463_298 as u64) + // Standard Error: 2_678 + .saturating_add(Weight::from_ref_time(231_771 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Voting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn remove_voter() -> Weight { - Weight::from_ref_time(37_651_000 as u64) + // Minimum execution time: 48_359 nanoseconds. + Weight::from_ref_time(48_767_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -254,18 +272,20 @@ impl WeightInfo for () { // Storage: Elections RunnersUp (r:1 w:0) /// The range of component `c` is `[1, 1000]`. fn submit_candidacy(c: u32, ) -> Weight { - Weight::from_ref_time(42_217_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(50_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 43_369 nanoseconds. + Weight::from_ref_time(49_587_113 as u64) + // Standard Error: 1_008 + .saturating_add(Weight::from_ref_time(77_752 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Elections Candidates (r:1 w:1) /// The range of component `c` is `[1, 1000]`. fn renounce_candidacy_candidate(c: u32, ) -> Weight { - Weight::from_ref_time(46_459_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(26_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 41_321 nanoseconds. + Weight::from_ref_time(50_803_289 as u64) + // Standard Error: 1_159 + .saturating_add(Weight::from_ref_time(57_239 as u64).saturating_mul(c as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -275,18 +295,21 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn renounce_candidacy_members() -> Weight { - Weight::from_ref_time(45_189_000 as u64) + // Minimum execution time: 53_542 nanoseconds. + Weight::from_ref_time(54_481_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Elections RunnersUp (r:1 w:1) fn renounce_candidacy_runners_up() -> Weight { - Weight::from_ref_time(34_516_000 as u64) + // Minimum execution time: 41_825 nanoseconds. + Weight::from_ref_time(42_248_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Benchmark Override (r:0 w:0) fn remove_member_without_replacement() -> Weight { + // Minimum execution time: 2_000_000_000 nanoseconds. Weight::from_ref_time(2_000_000_000_000 as u64) } // Storage: Elections Members (r:1 w:1) @@ -296,7 +319,8 @@ impl WeightInfo for () { // Storage: Council Proposals (r:1 w:0) // Storage: Council Members (r:0 w:1) fn remove_member_with_replacement() -> Weight { - Weight::from_ref_time(51_838_000 as u64) + // Minimum execution time: 62_600 nanoseconds. + Weight::from_ref_time(63_152_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -307,11 +331,12 @@ impl WeightInfo for () { // Storage: Balances Locks (r:5000 w:5000) // Storage: System Account (r:5000 w:5000) /// The range of component `v` is `[5000, 10000]`. - /// The range of component `d` is `[1, 5000]`. + /// The range of component `d` is `[0, 5000]`. fn clean_defunct_voters(v: u32, _d: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 76_000 - .saturating_add(Weight::from_ref_time(63_721_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 297_149_264 nanoseconds. + Weight::from_ref_time(297_898_499_000 as u64) + // Standard Error: 263_819 + .saturating_add(Weight::from_ref_time(37_914_985 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(v as u64))) .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) @@ -329,14 +354,16 @@ impl WeightInfo for () { /// The range of component `v` is `[1, 10000]`. /// The range of component `e` is `[10000, 160000]`. fn election_phragmen(c: u32, v: u32, e: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 773_000 - .saturating_add(Weight::from_ref_time(81_534_000 as u64).saturating_mul(v as u64)) - // Standard Error: 51_000 - .saturating_add(Weight::from_ref_time(4_453_000 as u64).saturating_mul(e as u64)) + // Minimum execution time: 22_034_317 nanoseconds. + Weight::from_ref_time(22_110_020_000 as u64) + // Standard Error: 235_528 + .saturating_add(Weight::from_ref_time(25_553_585 as u64).saturating_mul(v as u64)) + // Standard Error: 15_114 + .saturating_add(Weight::from_ref_time(1_032_330 as u64).saturating_mul(e as u64)) .saturating_add(RocksDbWeight::get().reads(280 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(c as u64))) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(c as u64))) } } diff --git a/frame/fast-unstake/src/weights.rs b/frame/fast-unstake/src/weights.rs index 04857d0dcc865..6001250e8c24d 100644 --- a/frame/fast-unstake/src/weights.rs +++ b/frame/fast-unstake/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,25 @@ //! Autogenerated weights for pallet_fast_unstake //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-07, STEPS: `10`, REPEAT: 1, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `Kians-MacBook-Pro-2.local`, CPU: `` -//! EXECUTION: Some(Native), WASM-EXECUTION: Compiled, CHAIN: None, DB CACHE: 1024 +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/substrate +// ./target/production/substrate // benchmark // pallet -// --steps=10 -// --repeat=1 +// --chain=dev +// --steps=50 +// --repeat=20 // --pallet=pallet_fast_unstake // --extrinsic=* -// --execution=native -// --output -// weight.rs -// --template -// ./.maintain/frame-weight-template.hbs +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/fast-unstake/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,25 +63,18 @@ impl WeightInfo for SubstrateWeight { // Storage: FastUnstake Head (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:2 w:1) - // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:3 w:2) - // Storage: Balances Locks (r:2 w:2) - // Storage: NominationPools MinJoinBond (r:1 w:0) - // Storage: NominationPools PoolMembers (r:1 w:1) - // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) - // Storage: NominationPools MaxPoolMembers (r:1 w:0) - // Storage: NominationPools CounterForPoolMembers (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) fn on_idle_unstake() -> Weight { - Weight::from_ref_time(102_000_000 as u64) - .saturating_add(T::DbWeight::get().reads(25 as u64)) - .saturating_add(T::DbWeight::get().writes(13 as u64)) + // Minimum execution time: 82_426 nanoseconds. + Weight::from_ref_time(83_422_000 as u64) + .saturating_add(T::DbWeight::get().reads(11 as u64)) + .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) @@ -91,42 +86,49 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasStakers (r:1344 w:0) /// The range of component `x` is `[672, 86016]`. fn on_idle_check(x: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 244_000 - .saturating_add(Weight::from_ref_time(13_913_000 as u64).saturating_mul(x as u64)) - .saturating_add(T::DbWeight::get().reads(585 as u64)) + // Minimum execution time: 13_932_777 nanoseconds. + Weight::from_ref_time(13_996_029_000 as u64) + // Standard Error: 16_878 + .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) + .saturating_add(T::DbWeight::get().reads(345 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(x as u64))) .saturating_add(T::DbWeight::get().writes(3 as u64)) } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) - // Storage: Staking Nominators (r:1 w:1) // Storage: FastUnstake Queue (r:1 w:1) // Storage: FastUnstake Head (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn register_fast_unstake() -> Weight { - Weight::from_ref_time(57_000_000 as u64) - .saturating_add(T::DbWeight::get().reads(12 as u64)) + // Minimum execution time: 120_190 nanoseconds. + Weight::from_ref_time(121_337_000 as u64) + .saturating_add(T::DbWeight::get().reads(14 as u64)) .saturating_add(T::DbWeight::get().writes(9 as u64)) } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: FastUnstake Queue (r:1 w:1) // Storage: FastUnstake Head (r:1 w:0) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn deregister() -> Weight { - Weight::from_ref_time(15_000_000 as u64) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + // Minimum execution time: 49_897 nanoseconds. + Weight::from_ref_time(50_080_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) fn control() -> Weight { - Weight::from_ref_time(3_000_000 as u64) + // Minimum execution time: 4_814 nanoseconds. + Weight::from_ref_time(4_997_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } } @@ -139,25 +141,18 @@ impl WeightInfo for () { // Storage: FastUnstake Head (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: Staking Bonded (r:2 w:1) - // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) - // Storage: System Account (r:3 w:2) - // Storage: Balances Locks (r:2 w:2) - // Storage: NominationPools MinJoinBond (r:1 w:0) - // Storage: NominationPools PoolMembers (r:1 w:1) - // Storage: NominationPools BondedPools (r:1 w:1) - // Storage: NominationPools RewardPools (r:1 w:1) - // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) - // Storage: NominationPools MaxPoolMembers (r:1 w:0) - // Storage: NominationPools CounterForPoolMembers (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:0 w:1) // Storage: Staking Payee (r:0 w:1) fn on_idle_unstake() -> Weight { - Weight::from_ref_time(102_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(25 as u64)) - .saturating_add(RocksDbWeight::get().writes(13 as u64)) + // Minimum execution time: 82_426 nanoseconds. + Weight::from_ref_time(83_422_000 as u64) + .saturating_add(RocksDbWeight::get().reads(11 as u64)) + .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking ValidatorCount (r:1 w:0) @@ -169,42 +164,49 @@ impl WeightInfo for () { // Storage: Staking ErasStakers (r:1344 w:0) /// The range of component `x` is `[672, 86016]`. fn on_idle_check(x: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 244_000 - .saturating_add(Weight::from_ref_time(13_913_000 as u64).saturating_mul(x as u64)) - .saturating_add(RocksDbWeight::get().reads(585 as u64)) + // Minimum execution time: 13_932_777 nanoseconds. + Weight::from_ref_time(13_996_029_000 as u64) + // Standard Error: 16_878 + .saturating_add(Weight::from_ref_time(18_113_540 as u64).saturating_mul(x as u64)) + .saturating_add(RocksDbWeight::get().reads(345 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(x as u64))) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) - // Storage: Staking Nominators (r:1 w:1) // Storage: FastUnstake Queue (r:1 w:1) // Storage: FastUnstake Head (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn register_fast_unstake() -> Weight { - Weight::from_ref_time(57_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) + // Minimum execution time: 120_190 nanoseconds. + Weight::from_ref_time(121_337_000 as u64) + .saturating_add(RocksDbWeight::get().reads(14 as u64)) .saturating_add(RocksDbWeight::get().writes(9 as u64)) } + // Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: FastUnstake Queue (r:1 w:1) // Storage: FastUnstake Head (r:1 w:0) // Storage: FastUnstake CounterForQueue (r:1 w:1) fn deregister() -> Weight { - Weight::from_ref_time(15_000_000 as u64) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + // Minimum execution time: 49_897 nanoseconds. + Weight::from_ref_time(50_080_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: FastUnstake ErasToCheckPerBlock (r:0 w:1) fn control() -> Weight { - Weight::from_ref_time(3_000_000 as u64) + // Minimum execution time: 4_814 nanoseconds. + Weight::from_ref_time(4_997_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } } diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs index 15eeb7c5104cd..82b199b1a6663 100644 --- a/frame/gilt/src/weights.rs +++ b/frame/gilt/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_gilt //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/gilt/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,67 +62,79 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) + /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { - Weight::from_ref_time(41_605_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 42_332 nanoseconds. + Weight::from_ref_time(45_584_514 as u64) + // Standard Error: 129 + .saturating_add(Weight::from_ref_time(45_727 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - Weight::from_ref_time(97_715_000 as u64) + // Minimum execution time: 85_866 nanoseconds. + Weight::from_ref_time(87_171_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) + /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { - Weight::from_ref_time(42_061_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 44_605 nanoseconds. + Weight::from_ref_time(46_850_108 as u64) + // Standard Error: 135 + .saturating_add(Weight::from_ref_time(34_178 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - Weight::from_ref_time(5_026_000 as u64) + // Minimum execution time: 7_331 nanoseconds. + Weight::from_ref_time(7_619_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(47_753_000 as u64) + // Minimum execution time: 55_143 nanoseconds. + Weight::from_ref_time(55_845_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - Weight::from_ref_time(1_663_000 as u64) + // Minimum execution time: 3_386 nanoseconds. + Weight::from_ref_time(3_461_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:1) + // Storage: Gilt Active (r:0 w:20) + /// The range of component `b` is `[0, 1000]`. fn pursue_target_per_item(b: u32, ) -> Weight { - Weight::from_ref_time(40_797_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as u64).saturating_mul(b as u64)) + // Minimum execution time: 34_156 nanoseconds. + Weight::from_ref_time(45_262_859 as u64) + // Standard Error: 1_529 + .saturating_add(Weight::from_ref_time(4_181_654 as u64).saturating_mul(b as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(b as u64))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:1) + // Storage: Gilt Queues (r:6 w:6) + // Storage: Gilt Active (r:0 w:6) + /// The range of component `q` is `[0, 300]`. fn pursue_target_per_queue(q: u32, ) -> Weight { - Weight::from_ref_time(14_944_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as u64).saturating_mul(q as u64)) + // Minimum execution time: 33_526 nanoseconds. + Weight::from_ref_time(37_255_562 as u64) + // Standard Error: 3_611 + .saturating_add(Weight::from_ref_time(7_193_128 as u64).saturating_mul(q as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(q as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -131,67 +146,79 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) + /// The range of component `l` is `[0, 999]`. fn place_bid(l: u32, ) -> Weight { - Weight::from_ref_time(41_605_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(62_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 42_332 nanoseconds. + Weight::from_ref_time(45_584_514 as u64) + // Standard Error: 129 + .saturating_add(Weight::from_ref_time(45_727 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) fn place_bid_max() -> Weight { - Weight::from_ref_time(97_715_000 as u64) + // Minimum execution time: 85_866 nanoseconds. + Weight::from_ref_time(87_171_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt Queues (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) + /// The range of component `l` is `[1, 1000]`. fn retract_bid(l: u32, ) -> Weight { - Weight::from_ref_time(42_061_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 44_605 nanoseconds. + Weight::from_ref_time(46_850_108 as u64) + // Standard Error: 135 + .saturating_add(Weight::from_ref_time(34_178 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) fn set_target() -> Weight { - Weight::from_ref_time(5_026_000 as u64) + // Minimum execution time: 7_331 nanoseconds. + Weight::from_ref_time(7_619_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Gilt Active (r:1 w:1) // Storage: Gilt ActiveTotal (r:1 w:1) fn thaw() -> Weight { - Weight::from_ref_time(47_753_000 as u64) + // Minimum execution time: 55_143 nanoseconds. + Weight::from_ref_time(55_845_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:0) fn pursue_target_noop() -> Weight { - Weight::from_ref_time(1_663_000 as u64) + // Minimum execution time: 3_386 nanoseconds. + Weight::from_ref_time(3_461_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:1) + // Storage: Gilt Active (r:0 w:20) + /// The range of component `b` is `[0, 1000]`. fn pursue_target_per_item(b: u32, ) -> Weight { - Weight::from_ref_time(40_797_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(4_122_000 as u64).saturating_mul(b as u64)) + // Minimum execution time: 34_156 nanoseconds. + Weight::from_ref_time(45_262_859 as u64) + // Standard Error: 1_529 + .saturating_add(Weight::from_ref_time(4_181_654 as u64).saturating_mul(b as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(b as u64))) } // Storage: Gilt ActiveTotal (r:1 w:1) // Storage: Gilt QueueTotals (r:1 w:1) - // Storage: Gilt Queues (r:1 w:1) - // Storage: Gilt Active (r:0 w:1) + // Storage: Gilt Queues (r:6 w:6) + // Storage: Gilt Active (r:0 w:6) + /// The range of component `q` is `[0, 300]`. fn pursue_target_per_queue(q: u32, ) -> Weight { - Weight::from_ref_time(14_944_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(8_135_000 as u64).saturating_mul(q as u64)) + // Minimum execution time: 33_526 nanoseconds. + Weight::from_ref_time(37_255_562 as u64) + // Standard Error: 3_611 + .saturating_add(Weight::from_ref_time(7_193_128 as u64).saturating_mul(q as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(q as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) diff --git a/frame/identity/src/weights.rs b/frame/identity/src/weights.rs index d65499034d74a..1f2e8f98e988b 100644 --- a/frame/identity/src/weights.rs +++ b/frame/identity/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,11 +18,12 @@ //! Autogenerated weights for pallet_identity //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet // --chain=dev @@ -34,6 +35,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/identity/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,32 +71,35 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - Weight::from_ref_time(16_649_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 20_269 nanoseconds. + Weight::from_ref_time(21_910_543 as u64) + // Standard Error: 4_604 + .saturating_add(Weight::from_ref_time(223_104 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(31_322_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 41_872 nanoseconds. + Weight::from_ref_time(40_230_216 as u64) + // Standard Error: 2_342 + .saturating_add(Weight::from_ref_time(145_168 as u64).saturating_mul(r as u64)) + // Standard Error: 457 + .saturating_add(Weight::from_ref_time(291_732 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:1 w:1) - /// The range of component `s` is `[1, 100]`. + // Storage: Identity SuperOf (r:2 w:2) + /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { - Weight::from_ref_time(30_012_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 12_024 nanoseconds. + Weight::from_ref_time(32_550_819 as u64) + // Standard Error: 5_057 + .saturating_add(Weight::from_ref_time(2_521_245 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -102,12 +107,13 @@ impl WeightInfo for SubstrateWeight { } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. + // Storage: Identity SuperOf (r:0 w:2) + /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { - Weight::from_ref_time(29_623_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 12_232 nanoseconds. + Weight::from_ref_time(34_009_761 as u64) + // Standard Error: 5_047 + .saturating_add(Weight::from_ref_time(1_113_100 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) @@ -116,16 +122,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[1, 100]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `s` is `[0, 100]`. + /// The range of component `x` is `[0, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_370_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as u64).saturating_mul(s as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 57_144 nanoseconds. + Weight::from_ref_time(41_559_247 as u64) + // Standard Error: 9_996 + .saturating_add(Weight::from_ref_time(146_770 as u64).saturating_mul(r as u64)) + // Standard Error: 1_952 + .saturating_add(Weight::from_ref_time(1_086_673 as u64).saturating_mul(s as u64)) + // Standard Error: 1_952 + .saturating_add(Weight::from_ref_time(162_481 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) @@ -133,65 +140,71 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_759_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 44_726 nanoseconds. + Weight::from_ref_time(41_637_308 as u64) + // Standard Error: 1_907 + .saturating_add(Weight::from_ref_time(219_078 as u64).saturating_mul(r as u64)) + // Standard Error: 372 + .saturating_add(Weight::from_ref_time(309_888 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(32_254_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 39_719 nanoseconds. + Weight::from_ref_time(38_008_751 as u64) + // Standard Error: 2_394 + .saturating_add(Weight::from_ref_time(181_870 as u64).saturating_mul(r as u64)) + // Standard Error: 467 + .saturating_add(Weight::from_ref_time(314_990 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - Weight::from_ref_time(7_858_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_634 nanoseconds. + Weight::from_ref_time(11_383_704 as u64) + // Standard Error: 2_250 + .saturating_add(Weight::from_ref_time(193_094 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - Weight::from_ref_time(8_011_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_840 nanoseconds. + Weight::from_ref_time(11_638_740 as u64) + // Standard Error: 1_985 + .saturating_add(Weight::from_ref_time(193_016 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - Weight::from_ref_time(7_970_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_748 nanoseconds. + Weight::from_ref_time(11_346_901 as u64) + // Standard Error: 2_132 + .saturating_add(Weight::from_ref_time(196_630 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(24_730_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 33_682 nanoseconds. + Weight::from_ref_time(31_336_603 as u64) + // Standard Error: 3_056 + .saturating_add(Weight::from_ref_time(200_403 as u64).saturating_mul(r as u64)) + // Standard Error: 565 + .saturating_add(Weight::from_ref_time(525_142 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -200,16 +213,17 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[1, 100]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `s` is `[0, 100]`. + /// The range of component `x` is `[0, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(44_988_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as u64).saturating_mul(s as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 68_794 nanoseconds. + Weight::from_ref_time(52_114_486 as u64) + // Standard Error: 4_808 + .saturating_add(Weight::from_ref_time(153_462 as u64).saturating_mul(r as u64)) + // Standard Error: 939 + .saturating_add(Weight::from_ref_time(1_084_612 as u64).saturating_mul(s as u64)) + // Standard Error: 939 + .saturating_add(Weight::from_ref_time(170_112 as u64).saturating_mul(x as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) @@ -217,11 +231,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[1, 99]`. + /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { - Weight::from_ref_time(36_768_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_914 nanoseconds. + Weight::from_ref_time(43_488_083 as u64) + // Standard Error: 1_631 + .saturating_add(Weight::from_ref_time(118_845 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -229,9 +244,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - Weight::from_ref_time(13_474_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 16_124 nanoseconds. + Weight::from_ref_time(18_580_462 as u64) + // Standard Error: 688 + .saturating_add(Weight::from_ref_time(67_220 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -240,19 +256,21 @@ impl WeightInfo for SubstrateWeight { // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - Weight::from_ref_time(37_720_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 41_517 nanoseconds. + Weight::from_ref_time(45_123_530 as u64) + // Standard Error: 1_530 + .saturating_add(Weight::from_ref_time(105_429 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[1, 99]`. + /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { - Weight::from_ref_time(26_848_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 30_171 nanoseconds. + Weight::from_ref_time(33_355_514 as u64) + // Standard Error: 1_286 + .saturating_add(Weight::from_ref_time(114_716 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -263,32 +281,35 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn add_registrar(r: u32, ) -> Weight { - Weight::from_ref_time(16_649_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(241_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 20_269 nanoseconds. + Weight::from_ref_time(21_910_543 as u64) + // Standard Error: 4_604 + .saturating_add(Weight::from_ref_time(223_104 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn set_identity(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(31_322_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(252_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(312_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 41_872 nanoseconds. + Weight::from_ref_time(40_230_216 as u64) + // Standard Error: 2_342 + .saturating_add(Weight::from_ref_time(145_168 as u64).saturating_mul(r as u64)) + // Standard Error: 457 + .saturating_add(Weight::from_ref_time(291_732 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:1 w:1) - /// The range of component `s` is `[1, 100]`. + // Storage: Identity SuperOf (r:2 w:2) + /// The range of component `s` is `[0, 100]`. fn set_subs_new(s: u32, ) -> Weight { - Weight::from_ref_time(30_012_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_005_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 12_024 nanoseconds. + Weight::from_ref_time(32_550_819 as u64) + // Standard Error: 5_057 + .saturating_add(Weight::from_ref_time(2_521_245 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -296,12 +317,13 @@ impl WeightInfo for () { } // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SubsOf (r:1 w:1) - // Storage: Identity SuperOf (r:0 w:1) - /// The range of component `p` is `[1, 100]`. + // Storage: Identity SuperOf (r:0 w:2) + /// The range of component `p` is `[0, 100]`. fn set_subs_old(p: u32, ) -> Weight { - Weight::from_ref_time(29_623_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_100_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 12_232 nanoseconds. + Weight::from_ref_time(34_009_761 as u64) + // Standard Error: 5_047 + .saturating_add(Weight::from_ref_time(1_113_100 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) @@ -310,16 +332,17 @@ impl WeightInfo for () { // Storage: Identity IdentityOf (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[1, 100]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `s` is `[0, 100]`. + /// The range of component `x` is `[0, 100]`. fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_370_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(186_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_114_000 as u64).saturating_mul(s as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(189_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 57_144 nanoseconds. + Weight::from_ref_time(41_559_247 as u64) + // Standard Error: 9_996 + .saturating_add(Weight::from_ref_time(146_770 as u64).saturating_mul(r as u64)) + // Standard Error: 1_952 + .saturating_add(Weight::from_ref_time(1_086_673 as u64).saturating_mul(s as u64)) + // Standard Error: 1_952 + .saturating_add(Weight::from_ref_time(162_481 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) @@ -327,65 +350,71 @@ impl WeightInfo for () { // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn request_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(34_759_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(251_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(340_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 44_726 nanoseconds. + Weight::from_ref_time(41_637_308 as u64) + // Standard Error: 1_907 + .saturating_add(Weight::from_ref_time(219_078 as u64).saturating_mul(r as u64)) + // Standard Error: 372 + .saturating_add(Weight::from_ref_time(309_888 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 20]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn cancel_request(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(32_254_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(159_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(347_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 39_719 nanoseconds. + Weight::from_ref_time(38_008_751 as u64) + // Standard Error: 2_394 + .saturating_add(Weight::from_ref_time(181_870 as u64).saturating_mul(r as u64)) + // Standard Error: 467 + .saturating_add(Weight::from_ref_time(314_990 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fee(r: u32, ) -> Weight { - Weight::from_ref_time(7_858_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(190_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_634 nanoseconds. + Weight::from_ref_time(11_383_704 as u64) + // Standard Error: 2_250 + .saturating_add(Weight::from_ref_time(193_094 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_account_id(r: u32, ) -> Weight { - Weight::from_ref_time(8_011_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(187_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_840 nanoseconds. + Weight::from_ref_time(11_638_740 as u64) + // Standard Error: 1_985 + .saturating_add(Weight::from_ref_time(193_016 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:1) /// The range of component `r` is `[1, 19]`. fn set_fields(r: u32, ) -> Weight { - Weight::from_ref_time(7_970_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 10_748 nanoseconds. + Weight::from_ref_time(11_346_901 as u64) + // Standard Error: 2_132 + .saturating_add(Weight::from_ref_time(196_630 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Identity Registrars (r:1 w:0) // Storage: Identity IdentityOf (r:1 w:1) /// The range of component `r` is `[1, 19]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `x` is `[0, 100]`. fn provide_judgement(r: u32, x: u32, ) -> Weight { - Weight::from_ref_time(24_730_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(196_000 as u64).saturating_mul(r as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(341_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 33_682 nanoseconds. + Weight::from_ref_time(31_336_603 as u64) + // Standard Error: 3_056 + .saturating_add(Weight::from_ref_time(200_403 as u64).saturating_mul(r as u64)) + // Standard Error: 565 + .saturating_add(Weight::from_ref_time(525_142 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -394,16 +423,17 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Identity SuperOf (r:0 w:100) /// The range of component `r` is `[1, 20]`. - /// The range of component `s` is `[1, 100]`. - /// The range of component `x` is `[1, 100]`. + /// The range of component `s` is `[0, 100]`. + /// The range of component `x` is `[0, 100]`. fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - Weight::from_ref_time(44_988_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(201_000 as u64).saturating_mul(r as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_126_000 as u64).saturating_mul(s as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(x as u64)) + // Minimum execution time: 68_794 nanoseconds. + Weight::from_ref_time(52_114_486 as u64) + // Standard Error: 4_808 + .saturating_add(Weight::from_ref_time(153_462 as u64).saturating_mul(r as u64)) + // Standard Error: 939 + .saturating_add(Weight::from_ref_time(1_084_612 as u64).saturating_mul(s as u64)) + // Standard Error: 939 + .saturating_add(Weight::from_ref_time(170_112 as u64).saturating_mul(x as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) @@ -411,11 +441,12 @@ impl WeightInfo for () { // Storage: Identity IdentityOf (r:1 w:0) // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[1, 99]`. + /// The range of component `s` is `[0, 99]`. fn add_sub(s: u32, ) -> Weight { - Weight::from_ref_time(36_768_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_914 nanoseconds. + Weight::from_ref_time(43_488_083 as u64) + // Standard Error: 1_631 + .saturating_add(Weight::from_ref_time(118_845 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -423,9 +454,10 @@ impl WeightInfo for () { // Storage: Identity SuperOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn rename_sub(s: u32, ) -> Weight { - Weight::from_ref_time(13_474_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 16_124 nanoseconds. + Weight::from_ref_time(18_580_462 as u64) + // Standard Error: 688 + .saturating_add(Weight::from_ref_time(67_220 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -434,19 +466,21 @@ impl WeightInfo for () { // Storage: Identity SubsOf (r:1 w:1) /// The range of component `s` is `[1, 100]`. fn remove_sub(s: u32, ) -> Weight { - Weight::from_ref_time(37_720_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(114_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 41_517 nanoseconds. + Weight::from_ref_time(45_123_530 as u64) + // Standard Error: 1_530 + .saturating_add(Weight::from_ref_time(105_429 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Identity SuperOf (r:1 w:1) // Storage: Identity SubsOf (r:1 w:1) - /// The range of component `s` is `[1, 99]`. + /// The range of component `s` is `[0, 99]`. fn quit_sub(s: u32, ) -> Weight { - Weight::from_ref_time(26_848_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 30_171 nanoseconds. + Weight::from_ref_time(33_355_514 as u64) + // Standard Error: 1_286 + .saturating_add(Weight::from_ref_time(114_716 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 3d20c9a0e614c..f81db997c303d 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_im_online //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/im-online/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,12 +58,15 @@ impl WeightInfo for SubstrateWeight { // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) + /// The range of component `k` is `[1, 1000]`. + /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - Weight::from_ref_time(79_225_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as u64).saturating_mul(k as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as u64).saturating_mul(e as u64)) + // Minimum execution time: 101_380 nanoseconds. + Weight::from_ref_time(82_735_670 as u64) + // Standard Error: 121 + .saturating_add(Weight::from_ref_time(21_279 as u64).saturating_mul(k as u64)) + // Standard Error: 1_222 + .saturating_add(Weight::from_ref_time(296_343 as u64).saturating_mul(e as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -73,12 +79,15 @@ impl WeightInfo for () { // Storage: ImOnline ReceivedHeartbeats (r:1 w:1) // Storage: ImOnline AuthoredBlocks (r:1 w:0) // Storage: ImOnline Keys (r:1 w:0) + /// The range of component `k` is `[1, 1000]`. + /// The range of component `e` is `[1, 100]`. fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - Weight::from_ref_time(79_225_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(41_000 as u64).saturating_mul(k as u64)) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(293_000 as u64).saturating_mul(e as u64)) + // Minimum execution time: 101_380 nanoseconds. + Weight::from_ref_time(82_735_670 as u64) + // Standard Error: 121 + .saturating_add(Weight::from_ref_time(21_279 as u64).saturating_mul(k as u64)) + // Standard Error: 1_222 + .saturating_add(Weight::from_ref_time(296_343 as u64).saturating_mul(e as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/indices/src/weights.rs b/frame/indices/src/weights.rs index 64bfa744d9b46..7b974875cdf51 100644 --- a/frame/indices/src/weights.rs +++ b/frame/indices/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_indices //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/indices/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,33 +59,38 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - Weight::from_ref_time(25_929_000 as u64) + // Minimum execution time: 31_756 nanoseconds. + Weight::from_ref_time(32_252_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(32_627_000 as u64) + // Minimum execution time: 38_686 nanoseconds. + Weight::from_ref_time(39_402_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - Weight::from_ref_time(26_804_000 as u64) + // Minimum execution time: 33_752 nanoseconds. + Weight::from_ref_time(34_348_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(27_390_000 as u64) + // Minimum execution time: 32_739 nanoseconds. + Weight::from_ref_time(33_151_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(30_973_000 as u64) + // Minimum execution time: 40_369 nanoseconds. + Weight::from_ref_time(40_982_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -92,33 +100,38 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Indices Accounts (r:1 w:1) fn claim() -> Weight { - Weight::from_ref_time(25_929_000 as u64) + // Minimum execution time: 31_756 nanoseconds. + Weight::from_ref_time(32_252_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - Weight::from_ref_time(32_627_000 as u64) + // Minimum execution time: 38_686 nanoseconds. + Weight::from_ref_time(39_402_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn free() -> Weight { - Weight::from_ref_time(26_804_000 as u64) + // Minimum execution time: 33_752 nanoseconds. + Weight::from_ref_time(34_348_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Indices Accounts (r:1 w:1) // Storage: System Account (r:1 w:1) fn force_transfer() -> Weight { - Weight::from_ref_time(27_390_000 as u64) + // Minimum execution time: 32_739 nanoseconds. + Weight::from_ref_time(33_151_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Indices Accounts (r:1 w:1) fn freeze() -> Weight { - Weight::from_ref_time(30_973_000 as u64) + // Minimum execution time: 40_369 nanoseconds. + Weight::from_ref_time(40_982_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/lottery/src/weights.rs b/frame/lottery/src/weights.rs index bae2e8a89c4e5..4ced6a642781a 100644 --- a/frame/lottery/src/weights.rs +++ b/frame/lottery/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_lottery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/lottery/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -63,28 +66,33 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - Weight::from_ref_time(44_706_000 as u64) + // Minimum execution time: 52_451 nanoseconds. + Weight::from_ref_time(52_843_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Lottery CallIndices (r:0 w:1) + /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - Weight::from_ref_time(12_556_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 14_389 nanoseconds. + Weight::from_ref_time(15_700_569 as u64) + // Standard Error: 4_677 + .saturating_add(Weight::from_ref_time(296_850 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - Weight::from_ref_time(38_051_000 as u64) + // Minimum execution time: 44_814 nanoseconds. + Weight::from_ref_time(45_611_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - Weight::from_ref_time(6_910_000 as u64) + // Minimum execution time: 10_384 nanoseconds. + Weight::from_ref_time(10_727_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -94,7 +102,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - Weight::from_ref_time(53_732_000 as u64) + // Minimum execution time: 62_720 nanoseconds. + Weight::from_ref_time(63_545_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -105,7 +114,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - Weight::from_ref_time(55_868_000 as u64) + // Minimum execution time: 63_452 nanoseconds. + Weight::from_ref_time(65_010_000 as u64) .saturating_add(T::DbWeight::get().reads(7 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -121,28 +131,33 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) // Storage: Lottery Tickets (r:0 w:1) fn buy_ticket() -> Weight { - Weight::from_ref_time(44_706_000 as u64) + // Minimum execution time: 52_451 nanoseconds. + Weight::from_ref_time(52_843_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Lottery CallIndices (r:0 w:1) + /// The range of component `n` is `[0, 10]`. fn set_calls(n: u32, ) -> Weight { - Weight::from_ref_time(12_556_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(295_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 14_389 nanoseconds. + Weight::from_ref_time(15_700_569 as u64) + // Standard Error: 4_677 + .saturating_add(Weight::from_ref_time(296_850 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Lottery Lottery (r:1 w:1) // Storage: Lottery LotteryIndex (r:1 w:1) // Storage: System Account (r:1 w:1) fn start_lottery() -> Weight { - Weight::from_ref_time(38_051_000 as u64) + // Minimum execution time: 44_814 nanoseconds. + Weight::from_ref_time(45_611_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Lottery Lottery (r:1 w:1) fn stop_repeat() -> Weight { - Weight::from_ref_time(6_910_000 as u64) + // Minimum execution time: 10_384 nanoseconds. + Weight::from_ref_time(10_727_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -152,7 +167,8 @@ impl WeightInfo for () { // Storage: Lottery TicketsCount (r:1 w:1) // Storage: Lottery Tickets (r:1 w:0) fn on_initialize_end() -> Weight { - Weight::from_ref_time(53_732_000 as u64) + // Minimum execution time: 62_720 nanoseconds. + Weight::from_ref_time(63_545_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -163,7 +179,8 @@ impl WeightInfo for () { // Storage: Lottery Tickets (r:1 w:0) // Storage: Lottery LotteryIndex (r:1 w:1) fn on_initialize_repeat() -> Weight { - Weight::from_ref_time(55_868_000 as u64) + // Minimum execution time: 63_452 nanoseconds. + Weight::from_ref_time(65_010_000 as u64) .saturating_add(RocksDbWeight::get().reads(7 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs index 4876b3d13e2e3..11574bc8fa399 100644 --- a/frame/membership/src/weights.rs +++ b/frame/membership/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_membership //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/membership/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -60,10 +63,12 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { - Weight::from_ref_time(15_318_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 23_796 nanoseconds. + Weight::from_ref_time(24_829_996 as u64) + // Standard Error: 723 + .saturating_add(Weight::from_ref_time(48_467 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -72,10 +77,12 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_005_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 27_255 nanoseconds. + Weight::from_ref_time(28_234_490 as u64) + // Standard Error: 833 + .saturating_add(Weight::from_ref_time(34_894 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -84,10 +91,12 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_029_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 26_626 nanoseconds. + Weight::from_ref_time(27_989_042 as u64) + // Standard Error: 729 + .saturating_add(Weight::from_ref_time(51_567 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -96,10 +105,12 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_105_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 25_412 nanoseconds. + Weight::from_ref_time(27_713_414 as u64) + // Standard Error: 883 + .saturating_add(Weight::from_ref_time(157_085 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -108,29 +119,35 @@ impl WeightInfo for SubstrateWeight { // Storage: TechnicalMembership Prime (r:1 w:1) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { - Weight::from_ref_time(18_852_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 27_122 nanoseconds. + Weight::from_ref_time(28_477_394 as u64) + // Standard Error: 801 + .saturating_add(Weight::from_ref_time(56_383 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { - Weight::from_ref_time(4_869_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 9_368 nanoseconds. + Weight::from_ref_time(10_133_132 as u64) + // Standard Error: 366 + .saturating_add(Weight::from_ref_time(17_708 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn clear_prime(m: u32, ) -> Weight { - Weight::from_ref_time(1_593_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 5_546 nanoseconds. + Weight::from_ref_time(5_962_740 as u64) + // Standard Error: 186 + .saturating_add(Weight::from_ref_time(2_096 as u64).saturating_mul(m as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -141,10 +158,12 @@ impl WeightInfo for () { // Storage: TechnicalCommittee Proposals (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 99]`. fn add_member(m: u32, ) -> Weight { - Weight::from_ref_time(15_318_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(51_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 23_796 nanoseconds. + Weight::from_ref_time(24_829_996 as u64) + // Standard Error: 723 + .saturating_add(Weight::from_ref_time(48_467 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -153,10 +172,12 @@ impl WeightInfo for () { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[2, 100]`. fn remove_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_005_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(45_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 27_255 nanoseconds. + Weight::from_ref_time(28_234_490 as u64) + // Standard Error: 833 + .saturating_add(Weight::from_ref_time(34_894 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -165,10 +186,12 @@ impl WeightInfo for () { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[2, 100]`. fn swap_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_029_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 26_626 nanoseconds. + Weight::from_ref_time(27_989_042 as u64) + // Standard Error: 729 + .saturating_add(Weight::from_ref_time(51_567 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -177,10 +200,12 @@ impl WeightInfo for () { // Storage: TechnicalMembership Prime (r:1 w:0) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn reset_member(m: u32, ) -> Weight { - Weight::from_ref_time(18_105_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 25_412 nanoseconds. + Weight::from_ref_time(27_713_414 as u64) + // Standard Error: 883 + .saturating_add(Weight::from_ref_time(157_085 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -189,29 +214,35 @@ impl WeightInfo for () { // Storage: TechnicalMembership Prime (r:1 w:1) // Storage: TechnicalCommittee Members (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn change_key(m: u32, ) -> Weight { - Weight::from_ref_time(18_852_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 27_122 nanoseconds. + Weight::from_ref_time(28_477_394 as u64) + // Standard Error: 801 + .saturating_add(Weight::from_ref_time(56_383 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: TechnicalMembership Members (r:1 w:0) // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn set_prime(m: u32, ) -> Weight { - Weight::from_ref_time(4_869_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(28_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 9_368 nanoseconds. + Weight::from_ref_time(10_133_132 as u64) + // Standard Error: 366 + .saturating_add(Weight::from_ref_time(17_708 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: TechnicalMembership Prime (r:0 w:1) // Storage: TechnicalCommittee Prime (r:0 w:1) + /// The range of component `m` is `[1, 100]`. fn clear_prime(m: u32, ) -> Weight { - Weight::from_ref_time(1_593_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(m as u64)) + // Minimum execution time: 5_546 nanoseconds. + Weight::from_ref_time(5_962_740 as u64) + // Standard Error: 186 + .saturating_add(Weight::from_ref_time(2_096 as u64).saturating_mul(m as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } diff --git a/frame/multisig/src/weights.rs b/frame/multisig/src/weights.rs index de403b4d249e4..1f435cb9f9087 100644 --- a/frame/multisig/src/weights.rs +++ b/frame/multisig/src/weights.rs @@ -18,25 +18,24 @@ //! Autogenerated weights for pallet_multisig //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-26, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_multisig // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_multisig -// --chain=dev -// --header=./HEADER-APACHE2 // --output=./frame/multisig/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -62,22 +61,22 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { - // Minimum execution time: 20_283 nanoseconds. - Weight::from_ref_time(20_861_089 as u64) - // Standard Error: 5 - .saturating_add(Weight::from_ref_time(583 as u64).saturating_mul(z as u64)) + // Minimum execution time: 20_447 nanoseconds. + Weight::from_ref_time(20_896_236 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(568 as u64).saturating_mul(z as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 52_699 nanoseconds. - Weight::from_ref_time(40_874_603 as u64) - // Standard Error: 546 - .saturating_add(Weight::from_ref_time(131_727 as u64).saturating_mul(s as u64)) + // Minimum execution time: 54_987 nanoseconds. + Weight::from_ref_time(42_525_077 as u64) + // Standard Error: 562 + .saturating_add(Weight::from_ref_time(136_064 as u64).saturating_mul(s as u64)) // Standard Error: 5 - .saturating_add(Weight::from_ref_time(1_537 as u64).saturating_mul(z as u64)) + .saturating_add(Weight::from_ref_time(1_508 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -85,12 +84,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 39_843 nanoseconds. - Weight::from_ref_time(28_912_325 as u64) - // Standard Error: 734 - .saturating_add(Weight::from_ref_time(125_761 as u64).saturating_mul(s as u64)) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_542 as u64).saturating_mul(z as u64)) + // Minimum execution time: 42_573 nanoseconds. + Weight::from_ref_time(30_585_734 as u64) + // Standard Error: 637 + .saturating_add(Weight::from_ref_time(128_012 as u64).saturating_mul(s as u64)) + // Standard Error: 6 + .saturating_add(Weight::from_ref_time(1_507 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -99,12 +98,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 54_980 nanoseconds. - Weight::from_ref_time(42_087_213 as u64) - // Standard Error: 786 - .saturating_add(Weight::from_ref_time(153_935 as u64).saturating_mul(s as u64)) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_545 as u64).saturating_mul(z as u64)) + // Minimum execution time: 57_143 nanoseconds. + Weight::from_ref_time(43_921_674 as u64) + // Standard Error: 704 + .saturating_add(Weight::from_ref_time(153_474 as u64).saturating_mul(s as u64)) + // Standard Error: 6 + .saturating_add(Weight::from_ref_time(1_536 as u64).saturating_mul(z as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -112,30 +111,30 @@ impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - // Minimum execution time: 37_802 nanoseconds. - Weight::from_ref_time(39_933_623 as u64) - // Standard Error: 1_059 - .saturating_add(Weight::from_ref_time(121_891 as u64).saturating_mul(s as u64)) + // Minimum execution time: 39_088 nanoseconds. + Weight::from_ref_time(41_258_697 as u64) + // Standard Error: 1_038 + .saturating_add(Weight::from_ref_time(126_040 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { - // Minimum execution time: 25_738 nanoseconds. - Weight::from_ref_time(27_676_766 as u64) - // Standard Error: 710 - .saturating_add(Weight::from_ref_time(125_733 as u64).saturating_mul(s as u64)) + // Minimum execution time: 26_872 nanoseconds. + Weight::from_ref_time(28_625_218 as u64) + // Standard Error: 793 + .saturating_add(Weight::from_ref_time(128_542 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { - // Minimum execution time: 36_591 nanoseconds. - Weight::from_ref_time(38_707_543 as u64) - // Standard Error: 881 - .saturating_add(Weight::from_ref_time(126_198 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_636 nanoseconds. + Weight::from_ref_time(39_614_705 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(136_222 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -145,22 +144,22 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `z` is `[0, 10000]`. fn as_multi_threshold_1(z: u32, ) -> Weight { - // Minimum execution time: 20_283 nanoseconds. - Weight::from_ref_time(20_861_089 as u64) - // Standard Error: 5 - .saturating_add(Weight::from_ref_time(583 as u64).saturating_mul(z as u64)) + // Minimum execution time: 20_447 nanoseconds. + Weight::from_ref_time(20_896_236 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(568 as u64).saturating_mul(z as u64)) } // Storage: Multisig Multisigs (r:1 w:1) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 52_699 nanoseconds. - Weight::from_ref_time(40_874_603 as u64) - // Standard Error: 546 - .saturating_add(Weight::from_ref_time(131_727 as u64).saturating_mul(s as u64)) + // Minimum execution time: 54_987 nanoseconds. + Weight::from_ref_time(42_525_077 as u64) + // Standard Error: 562 + .saturating_add(Weight::from_ref_time(136_064 as u64).saturating_mul(s as u64)) // Standard Error: 5 - .saturating_add(Weight::from_ref_time(1_537 as u64).saturating_mul(z as u64)) + .saturating_add(Weight::from_ref_time(1_508 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -168,12 +167,12 @@ impl WeightInfo for () { /// The range of component `s` is `[3, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 39_843 nanoseconds. - Weight::from_ref_time(28_912_325 as u64) - // Standard Error: 734 - .saturating_add(Weight::from_ref_time(125_761 as u64).saturating_mul(s as u64)) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_542 as u64).saturating_mul(z as u64)) + // Minimum execution time: 42_573 nanoseconds. + Weight::from_ref_time(30_585_734 as u64) + // Standard Error: 637 + .saturating_add(Weight::from_ref_time(128_012 as u64).saturating_mul(s as u64)) + // Standard Error: 6 + .saturating_add(Weight::from_ref_time(1_507 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -182,12 +181,12 @@ impl WeightInfo for () { /// The range of component `s` is `[2, 100]`. /// The range of component `z` is `[0, 10000]`. fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Minimum execution time: 54_980 nanoseconds. - Weight::from_ref_time(42_087_213 as u64) - // Standard Error: 786 - .saturating_add(Weight::from_ref_time(153_935 as u64).saturating_mul(s as u64)) - // Standard Error: 7 - .saturating_add(Weight::from_ref_time(1_545 as u64).saturating_mul(z as u64)) + // Minimum execution time: 57_143 nanoseconds. + Weight::from_ref_time(43_921_674 as u64) + // Standard Error: 704 + .saturating_add(Weight::from_ref_time(153_474 as u64).saturating_mul(s as u64)) + // Standard Error: 6 + .saturating_add(Weight::from_ref_time(1_536 as u64).saturating_mul(z as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -195,30 +194,30 @@ impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_create(s: u32, ) -> Weight { - // Minimum execution time: 37_802 nanoseconds. - Weight::from_ref_time(39_933_623 as u64) - // Standard Error: 1_059 - .saturating_add(Weight::from_ref_time(121_891 as u64).saturating_mul(s as u64)) + // Minimum execution time: 39_088 nanoseconds. + Weight::from_ref_time(41_258_697 as u64) + // Standard Error: 1_038 + .saturating_add(Weight::from_ref_time(126_040 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) /// The range of component `s` is `[2, 100]`. fn approve_as_multi_approve(s: u32, ) -> Weight { - // Minimum execution time: 25_738 nanoseconds. - Weight::from_ref_time(27_676_766 as u64) - // Standard Error: 710 - .saturating_add(Weight::from_ref_time(125_733 as u64).saturating_mul(s as u64)) + // Minimum execution time: 26_872 nanoseconds. + Weight::from_ref_time(28_625_218 as u64) + // Standard Error: 793 + .saturating_add(Weight::from_ref_time(128_542 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Multisig Multisigs (r:1 w:1) /// The range of component `s` is `[2, 100]`. fn cancel_as_multi(s: u32, ) -> Weight { - // Minimum execution time: 36_591 nanoseconds. - Weight::from_ref_time(38_707_543 as u64) - // Standard Error: 881 - .saturating_add(Weight::from_ref_time(126_198 as u64).saturating_mul(s as u64)) + // Minimum execution time: 37_636 nanoseconds. + Weight::from_ref_time(39_614_705 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(136_222 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/nomination-pools/src/weights.rs b/frame/nomination-pools/src/weights.rs index e20394ca668b9..1062b1749d417 100644 --- a/frame/nomination-pools/src/weights.rs +++ b/frame/nomination-pools/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_nomination_pools //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-15, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet // --chain=dev @@ -35,6 +35,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/nomination-pools/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -69,18 +70,19 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn join() -> Weight { - Weight::from_ref_time(123_947_000 as u64) + // Minimum execution time: 159_948 nanoseconds. + Weight::from_ref_time(161_133_000 as u64) .saturating_add(T::DbWeight::get().reads(17 as u64)) .saturating_add(T::DbWeight::get().writes(12 as u64)) } @@ -88,13 +90,14 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:2) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - Weight::from_ref_time(118_236_000 as u64) + // Minimum execution time: 155_517 nanoseconds. + Weight::from_ref_time(159_101_000 as u64) .saturating_add(T::DbWeight::get().reads(14 as u64)) .saturating_add(T::DbWeight::get().writes(12 as u64)) } @@ -102,13 +105,14 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:3) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - Weight::from_ref_time(132_475_000 as u64) + // Minimum execution time: 172_788 nanoseconds. + Weight::from_ref_time(174_212_000 as u64) .saturating_add(T::DbWeight::get().reads(14 as u64)) .saturating_add(T::DbWeight::get().writes(13 as u64)) } @@ -117,63 +121,69 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - Weight::from_ref_time(50_299_000 as u64) + // Minimum execution time: 64_560 nanoseconds. + Weight::from_ref_time(64_950_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - Weight::from_ref_time(121_254_000 as u64) + // Minimum execution time: 161_398 nanoseconds. + Weight::from_ref_time(162_991_000 as u64) .saturating_add(T::DbWeight::get().reads(18 as u64)) .saturating_add(T::DbWeight::get().writes(13 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - Weight::from_ref_time(41_928_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(4 as u64)) + // Minimum execution time: 66_036 nanoseconds. + Weight::from_ref_time(67_183_304 as u64) + // Standard Error: 565 + .saturating_add(Weight::from_ref_time(57_830 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(81_611_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(8 as u64)) + // Minimum execution time: 111_156 nanoseconds. + Weight::from_ref_time(112_507_059 as u64) + // Standard Error: 655 + .saturating_add(Weight::from_ref_time(53_711 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(9 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) @@ -185,29 +195,32 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) + // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(139_849_000 as u64) - .saturating_add(T::DbWeight::get().reads(19 as u64)) - .saturating_add(T::DbWeight::get().writes(16 as u64)) + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + // Minimum execution time: 168_270 nanoseconds. + Weight::from_ref_time(170_059_380 as u64) + // Standard Error: 1_506 + .saturating_add(Weight::from_ref_time(1_258 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(20 as u64)) + .saturating_add(T::DbWeight::get().writes(17 as u64)) } + // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools MaxPools (r:1 w:0) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: NominationPools PoolMembers (r:1 w:1) - // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) // Storage: System Account (r:2 w:2) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -216,36 +229,40 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(126_246_000 as u64) - .saturating_add(T::DbWeight::get().reads(22 as u64)) + // Minimum execution time: 146_153 nanoseconds. + Weight::from_ref_time(146_955_000 as u64) + .saturating_add(T::DbWeight::get().reads(21 as u64)) .saturating_add(T::DbWeight::get().writes(15 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking MaxNominatorsCount (r:1 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(48_829_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 71_380 nanoseconds. + Weight::from_ref_time(71_060_388 as u64) + // Standard Error: 2_587 + .saturating_add(Weight::from_ref_time(1_185_729 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(12 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - Weight::from_ref_time(26_761_000 as u64) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + // Minimum execution time: 46_275 nanoseconds. + Weight::from_ref_time(46_689_000 as u64) + .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) @@ -253,9 +270,10 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - Weight::from_ref_time(14_519_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 19_246 nanoseconds. + Weight::from_ref_time(20_415_018 as u64) + // Standard Error: 95 + .saturating_add(Weight::from_ref_time(2_040 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -265,26 +283,30 @@ impl WeightInfo for SubstrateWeight { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - Weight::from_ref_time(6_173_000 as u64) + // Minimum execution time: 9_231 nanoseconds. + Weight::from_ref_time(9_526_000 as u64) .saturating_add(T::DbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - Weight::from_ref_time(22_261_000 as u64) + // Minimum execution time: 31_246 nanoseconds. + Weight::from_ref_time(31_762_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(47_959_000 as u64) - .saturating_add(T::DbWeight::get().reads(8 as u64)) + // Minimum execution time: 73_812 nanoseconds. + Weight::from_ref_time(74_790_000 as u64) + .saturating_add(T::DbWeight::get().reads(9 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } } @@ -294,18 +316,19 @@ impl WeightInfo for () { // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) - // Storage: Staking Bonded (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn join() -> Weight { - Weight::from_ref_time(123_947_000 as u64) + // Minimum execution time: 159_948 nanoseconds. + Weight::from_ref_time(161_133_000 as u64) .saturating_add(RocksDbWeight::get().reads(17 as u64)) .saturating_add(RocksDbWeight::get().writes(12 as u64)) } @@ -313,13 +336,14 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:2) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra_transfer() -> Weight { - Weight::from_ref_time(118_236_000 as u64) + // Minimum execution time: 155_517 nanoseconds. + Weight::from_ref_time(159_101_000 as u64) .saturating_add(RocksDbWeight::get().reads(14 as u64)) .saturating_add(RocksDbWeight::get().writes(12 as u64)) } @@ -327,13 +351,14 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:3 w:3) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra_reward() -> Weight { - Weight::from_ref_time(132_475_000 as u64) + // Minimum execution time: 172_788 nanoseconds. + Weight::from_ref_time(174_212_000 as u64) .saturating_add(RocksDbWeight::get().reads(14 as u64)) .saturating_add(RocksDbWeight::get().writes(13 as u64)) } @@ -342,63 +367,69 @@ impl WeightInfo for () { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: System Account (r:1 w:1) fn claim_payout() -> Weight { - Weight::from_ref_time(50_299_000 as u64) + // Minimum execution time: 64_560 nanoseconds. + Weight::from_ref_time(64_950_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: System Account (r:2 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Balances Locks (r:1 w:1) - // Storage: BagsList ListNodes (r:3 w:3) - // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) // Storage: NominationPools SubPoolsStorage (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) fn unbond() -> Weight { - Weight::from_ref_time(121_254_000 as u64) + // Minimum execution time: 161_398 nanoseconds. + Weight::from_ref_time(162_991_000 as u64) .saturating_add(RocksDbWeight::get().reads(18 as u64)) .saturating_add(RocksDbWeight::get().writes(13 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { - Weight::from_ref_time(41_928_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(4 as u64)) + // Minimum execution time: 66_036 nanoseconds. + Weight::from_ref_time(67_183_304 as u64) + // Standard Error: 565 + .saturating_add(Weight::from_ref_time(57_830 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(81_611_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) + // Minimum execution time: 111_156 nanoseconds. + Weight::from_ref_time(112_507_059 as u64) + // Standard Error: 655 + .saturating_add(Weight::from_ref_time(53_711 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(9 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } // Storage: NominationPools PoolMembers (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) // Storage: NominationPools BondedPools (r:1 w:1) // Storage: NominationPools SubPoolsStorage (r:1 w:1) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking SlashingSpans (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) @@ -410,29 +441,32 @@ impl WeightInfo for () { // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) // Storage: NominationPools CounterForSubPoolsStorage (r:1 w:1) + // Storage: NominationPools Metadata (r:1 w:1) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(_s: u32, ) -> Weight { - Weight::from_ref_time(139_849_000 as u64) - .saturating_add(RocksDbWeight::get().reads(19 as u64)) - .saturating_add(RocksDbWeight::get().writes(16 as u64)) + fn withdraw_unbonded_kill(s: u32, ) -> Weight { + // Minimum execution time: 168_270 nanoseconds. + Weight::from_ref_time(170_059_380 as u64) + // Standard Error: 1_506 + .saturating_add(Weight::from_ref_time(1_258 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(20 as u64)) + .saturating_add(RocksDbWeight::get().writes(17 as u64)) } + // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: NominationPools MinCreateBond (r:1 w:0) // Storage: NominationPools MinJoinBond (r:1 w:0) // Storage: NominationPools MaxPools (r:1 w:0) // Storage: NominationPools CounterForBondedPools (r:1 w:1) // Storage: NominationPools PoolMembers (r:1 w:1) - // Storage: NominationPools LastPoolId (r:1 w:1) // Storage: NominationPools MaxPoolMembersPerPool (r:1 w:0) // Storage: NominationPools MaxPoolMembers (r:1 w:0) // Storage: NominationPools CounterForPoolMembers (r:1 w:1) // Storage: System Account (r:2 w:2) - // Storage: Staking Ledger (r:1 w:1) // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking HistoryDepth (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: NominationPools RewardPools (r:1 w:1) // Storage: NominationPools CounterForRewardPools (r:1 w:1) @@ -441,36 +475,40 @@ impl WeightInfo for () { // Storage: NominationPools BondedPools (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn create() -> Weight { - Weight::from_ref_time(126_246_000 as u64) - .saturating_add(RocksDbWeight::get().reads(22 as u64)) + // Minimum execution time: 146_153 nanoseconds. + Weight::from_ref_time(146_955_000 as u64) + .saturating_add(RocksDbWeight::get().reads(21 as u64)) .saturating_add(RocksDbWeight::get().writes(15 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking MaxNominatorsCount (r:1 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) - // Storage: Staking Bonded (r:1 w:0) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(48_829_000 as u64) - // Standard Error: 10_000 - .saturating_add(Weight::from_ref_time(2_204_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 71_380 nanoseconds. + Weight::from_ref_time(71_060_388 as u64) + // Standard Error: 2_587 + .saturating_add(Weight::from_ref_time(1_185_729 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(12 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) fn set_state() -> Weight { - Weight::from_ref_time(26_761_000 as u64) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + // Minimum execution time: 46_275 nanoseconds. + Weight::from_ref_time(46_689_000 as u64) + .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) @@ -478,9 +516,10 @@ impl WeightInfo for () { // Storage: NominationPools CounterForMetadata (r:1 w:1) /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { - Weight::from_ref_time(14_519_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 19_246 nanoseconds. + Weight::from_ref_time(20_415_018 as u64) + // Standard Error: 95 + .saturating_add(Weight::from_ref_time(2_040 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -490,26 +529,30 @@ impl WeightInfo for () { // Storage: NominationPools MinCreateBond (r:0 w:1) // Storage: NominationPools MaxPools (r:0 w:1) fn set_configs() -> Weight { - Weight::from_ref_time(6_173_000 as u64) + // Minimum execution time: 9_231 nanoseconds. + Weight::from_ref_time(9_526_000 as u64) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } // Storage: NominationPools BondedPools (r:1 w:1) fn update_roles() -> Weight { - Weight::from_ref_time(22_261_000 as u64) + // Minimum execution time: 31_246 nanoseconds. + Weight::from_ref_time(31_762_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: NominationPools BondedPools (r:1 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: BagsList ListNodes (r:1 w:1) - // Storage: BagsList ListBags (r:1 w:1) - // Storage: BagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(47_959_000 as u64) - .saturating_add(RocksDbWeight::get().reads(8 as u64)) + // Minimum execution time: 73_812 nanoseconds. + Weight::from_ref_time(74_790_000 as u64) + .saturating_add(RocksDbWeight::get().reads(9 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } } diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index 186c41b798c6b..e73c986891ccd 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_preimage -// --chain=dev // --output=./frame/preimage/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -67,9 +68,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(32_591_000 as u64) + // Minimum execution time: 33_810 nanoseconds. + Weight::from_ref_time(34_299_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -77,9 +79,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(23_350_000 as u64) + // Minimum execution time: 24_398 nanoseconds. + Weight::from_ref_time(24_839_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_702 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -87,66 +90,76 @@ impl WeightInfo for SubstrateWeight { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(21_436_000 as u64) + // Minimum execution time: 22_235 nanoseconds. + Weight::from_ref_time(22_473_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_567_000 as u64) + // Minimum execution time: 43_241 nanoseconds. + Weight::from_ref_time(44_470_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_065_000 as u64) + // Minimum execution time: 29_529 nanoseconds. + Weight::from_ref_time(30_364_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(28_470_000 as u64) + // Minimum execution time: 28_914 nanoseconds. + Weight::from_ref_time(30_103_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(14_601_000 as u64) + // Minimum execution time: 14_479 nanoseconds. + Weight::from_ref_time(15_244_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(20_121_000 as u64) + // Minimum execution time: 20_171 nanoseconds. + Weight::from_ref_time(20_806_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(9_440_000 as u64) + // Minimum execution time: 9_756 nanoseconds. + Weight::from_ref_time(10_115_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_013_000 as u64) + // Minimum execution time: 28_379 nanoseconds. + Weight::from_ref_time(29_778_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(9_223_000 as u64) + // Minimum execution time: 9_595 nanoseconds. + Weight::from_ref_time(9_888_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(9_252_000 as u64) + // Minimum execution time: 9_642 nanoseconds. + Weight::from_ref_time(9_985_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -158,9 +171,10 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(32_591_000 as u64) + // Minimum execution time: 33_810 nanoseconds. + Weight::from_ref_time(34_299_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -168,9 +182,10 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(23_350_000 as u64) + // Minimum execution time: 24_398 nanoseconds. + Weight::from_ref_time(24_839_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_702 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -178,66 +193,76 @@ impl WeightInfo for () { // Storage: Preimage PreimageFor (r:0 w:1) /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(21_436_000 as u64) + // Minimum execution time: 22_235 nanoseconds. + Weight::from_ref_time(22_473_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(Weight::from_ref_time(1_703 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_567_000 as u64) + // Minimum execution time: 43_241 nanoseconds. + Weight::from_ref_time(44_470_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_065_000 as u64) + // Minimum execution time: 29_529 nanoseconds. + Weight::from_ref_time(30_364_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(28_470_000 as u64) + // Minimum execution time: 28_914 nanoseconds. + Weight::from_ref_time(30_103_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(14_601_000 as u64) + // Minimum execution time: 14_479 nanoseconds. + Weight::from_ref_time(15_244_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(20_121_000 as u64) + // Minimum execution time: 20_171 nanoseconds. + Weight::from_ref_time(20_806_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(9_440_000 as u64) + // Minimum execution time: 9_756 nanoseconds. + Weight::from_ref_time(10_115_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_013_000 as u64) + // Minimum execution time: 28_379 nanoseconds. + Weight::from_ref_time(29_778_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(9_223_000 as u64) + // Minimum execution time: 9_595 nanoseconds. + Weight::from_ref_time(9_888_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(9_252_000 as u64) + // Minimum execution time: 9_642 nanoseconds. + Weight::from_ref_time(9_985_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/proxy/src/weights.rs b/frame/proxy/src/weights.rs index 25457d52f4391..706810d3402ec 100644 --- a/frame/proxy/src/weights.rs +++ b/frame/proxy/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_proxy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/proxy/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -60,96 +63,120 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Proxy Proxies (r:1 w:0) + /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { - Weight::from_ref_time(17_768_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 24_285 nanoseconds. + Weight::from_ref_time(25_355_667 as u64) + // Standard Error: 1_468 + .saturating_add(Weight::from_ref_time(38_185 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(35_682_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(a as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 44_948 nanoseconds. + Weight::from_ref_time(44_762_064 as u64) + // Standard Error: 1_778 + .saturating_add(Weight::from_ref_time(118_940 as u64).saturating_mul(a as u64)) + // Standard Error: 1_837 + .saturating_add(Weight::from_ref_time(51_232 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_586_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(a as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_274 nanoseconds. + Weight::from_ref_time(32_219_165 as u64) + // Standard Error: 1_832 + .saturating_add(Weight::from_ref_time(132_454 as u64).saturating_mul(a as u64)) + // Standard Error: 1_893 + .saturating_add(Weight::from_ref_time(9_077 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_794_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(a as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_219 nanoseconds. + Weight::from_ref_time(32_439_563 as u64) + // Standard Error: 1_829 + .saturating_add(Weight::from_ref_time(120_251 as u64).saturating_mul(a as u64)) + // Standard Error: 1_890 + .saturating_add(Weight::from_ref_time(8_689 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_002_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as u64).saturating_mul(a as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 40_388 nanoseconds. + Weight::from_ref_time(40_718_245 as u64) + // Standard Error: 1_821 + .saturating_add(Weight::from_ref_time(129_674 as u64).saturating_mul(a as u64)) + // Standard Error: 1_882 + .saturating_add(Weight::from_ref_time(56_001 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_166_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_997 nanoseconds. + Weight::from_ref_time(34_840_036 as u64) + // Standard Error: 1_659 + .saturating_add(Weight::from_ref_time(71_349 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_128_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_900 nanoseconds. + Weight::from_ref_time(35_069_110 as u64) + // Standard Error: 1_848 + .saturating_add(Weight::from_ref_time(82_380 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { - Weight::from_ref_time(24_066_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 29_627 nanoseconds. + Weight::from_ref_time(30_641_642 as u64) + // Standard Error: 1_495 + .saturating_add(Weight::from_ref_time(51_919 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { - Weight::from_ref_time(31_077_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 37_761 nanoseconds. + Weight::from_ref_time(38_748_697 as u64) + // Standard Error: 1_594 + .saturating_add(Weight::from_ref_time(19_022 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { - Weight::from_ref_time(24_657_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_145 nanoseconds. + Weight::from_ref_time(31_933_568 as u64) + // Standard Error: 1_492 + .saturating_add(Weight::from_ref_time(50_250 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -158,96 +185,120 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { // Storage: Proxy Proxies (r:1 w:0) + /// The range of component `p` is `[1, 31]`. fn proxy(p: u32, ) -> Weight { - Weight::from_ref_time(17_768_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(76_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 24_285 nanoseconds. + Weight::from_ref_time(25_355_667 as u64) + // Standard Error: 1_468 + .saturating_add(Weight::from_ref_time(38_185 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn proxy_announced(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(35_682_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(158_000 as u64).saturating_mul(a as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 44_948 nanoseconds. + Weight::from_ref_time(44_762_064 as u64) + // Standard Error: 1_778 + .saturating_add(Weight::from_ref_time(118_940 as u64).saturating_mul(a as u64)) + // Standard Error: 1_837 + .saturating_add(Weight::from_ref_time(51_232 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn remove_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_586_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(175_000 as u64).saturating_mul(a as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(18_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_274 nanoseconds. + Weight::from_ref_time(32_219_165 as u64) + // Standard Error: 1_832 + .saturating_add(Weight::from_ref_time(132_454 as u64).saturating_mul(a as u64)) + // Standard Error: 1_893 + .saturating_add(Weight::from_ref_time(9_077 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn reject_announcement(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(25_794_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(173_000 as u64).saturating_mul(a as u64)) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(13_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_219 nanoseconds. + Weight::from_ref_time(32_439_563 as u64) + // Standard Error: 1_829 + .saturating_add(Weight::from_ref_time(120_251 as u64).saturating_mul(a as u64)) + // Standard Error: 1_890 + .saturating_add(Weight::from_ref_time(8_689 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:0) // Storage: Proxy Announcements (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. fn announce(a: u32, p: u32, ) -> Weight { - Weight::from_ref_time(33_002_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(163_000 as u64).saturating_mul(a as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 40_388 nanoseconds. + Weight::from_ref_time(40_718_245 as u64) + // Standard Error: 1_821 + .saturating_add(Weight::from_ref_time(129_674 as u64).saturating_mul(a as u64)) + // Standard Error: 1_882 + .saturating_add(Weight::from_ref_time(56_001 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn add_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_166_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(105_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_997 nanoseconds. + Weight::from_ref_time(34_840_036 as u64) + // Standard Error: 1_659 + .saturating_add(Weight::from_ref_time(71_349 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn remove_proxy(p: u32, ) -> Weight { - Weight::from_ref_time(28_128_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(118_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 33_900 nanoseconds. + Weight::from_ref_time(35_069_110 as u64) + // Standard Error: 1_848 + .saturating_add(Weight::from_ref_time(82_380 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn remove_proxies(p: u32, ) -> Weight { - Weight::from_ref_time(24_066_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(81_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 29_627 nanoseconds. + Weight::from_ref_time(30_641_642 as u64) + // Standard Error: 1_495 + .saturating_add(Weight::from_ref_time(51_919 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[1, 31]`. fn create_pure(p: u32, ) -> Weight { - Weight::from_ref_time(31_077_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 37_761 nanoseconds. + Weight::from_ref_time(38_748_697 as u64) + // Standard Error: 1_594 + .saturating_add(Weight::from_ref_time(19_022 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Proxy Proxies (r:1 w:1) + /// The range of component `p` is `[0, 30]`. fn kill_pure(p: u32, ) -> Weight { - Weight::from_ref_time(24_657_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 31_145 nanoseconds. + Weight::from_ref_time(31_933_568 as u64) + // Standard Error: 1_492 + .saturating_add(Weight::from_ref_time(50_250 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/ranked-collective/src/weights.rs b/frame/ranked-collective/src/weights.rs index 6b6cca52316c9..c054d200452e8 100644 --- a/frame/ranked-collective/src/weights.rs +++ b/frame/ranked-collective/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,25 @@ //! Autogenerated weights for pallet_ranked_collective //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /Users/gav/Core/substrate/target/release/substrate +// ./target/production/substrate // benchmark // pallet -// --pallet -// pallet-ranked-collective -// --extrinsic=* // --chain=dev // --steps=50 // --repeat=20 -// --output=../../../frame/ranked-collective/src/weights.rs -// --template=../../../.maintain/frame-weight-template.hbs -// --header=../../../HEADER-APACHE2 -// --record-proof +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/ranked-collective/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,7 +63,8 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - Weight::from_ref_time(11_000_000 as u64) + // Minimum execution time: 24_344 nanoseconds. + Weight::from_ref_time(24_856_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -69,10 +72,12 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) + /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { - Weight::from_ref_time(16_855_000 as u64) - // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 36_881 nanoseconds. + Weight::from_ref_time(39_284_238 as u64) + // Standard Error: 16_355 + .saturating_add(Weight::from_ref_time(11_385_424 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -82,10 +87,12 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) + /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { - Weight::from_ref_time(11_936_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 27_444 nanoseconds. + Weight::from_ref_time(28_576_394 as u64) + // Standard Error: 4_818 + .saturating_add(Weight::from_ref_time(519_056 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -93,10 +100,12 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) + /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { - Weight::from_ref_time(17_582_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 36_539 nanoseconds. + Weight::from_ref_time(39_339_893 as u64) + // Standard Error: 16_526 + .saturating_add(Weight::from_ref_time(807_457 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -105,17 +114,21 @@ impl WeightInfo for SubstrateWeight { // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - Weight::from_ref_time(22_000_000 as u64) + // Minimum execution time: 50_548 nanoseconds. + Weight::from_ref_time(51_276_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - // Storage: RankedCollective Voting (r:0 w:1) + // Storage: RankedCollective VotingCleanup (r:1 w:0) + // Storage: RankedCollective Voting (r:0 w:2) + /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { - Weight::from_ref_time(6_188_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) + // Minimum execution time: 16_222 nanoseconds. + Weight::from_ref_time(22_982_955 as u64) + // Standard Error: 3_863 + .saturating_add(Weight::from_ref_time(1_074_054 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } } @@ -127,7 +140,8 @@ impl WeightInfo for () { // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) fn add_member() -> Weight { - Weight::from_ref_time(11_000_000 as u64) + // Minimum execution time: 24_344 nanoseconds. + Weight::from_ref_time(24_856_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -135,10 +149,12 @@ impl WeightInfo for () { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) + /// The range of component `r` is `[0, 10]`. fn remove_member(r: u32, ) -> Weight { - Weight::from_ref_time(16_855_000 as u64) - // Standard Error: 27_000 - .saturating_add(Weight::from_ref_time(8_107_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 36_881 nanoseconds. + Weight::from_ref_time(39_284_238 as u64) + // Standard Error: 16_355 + .saturating_add(Weight::from_ref_time(11_385_424 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -148,10 +164,12 @@ impl WeightInfo for () { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IndexToId (r:0 w:1) // Storage: RankedCollective IdToIndex (r:0 w:1) + /// The range of component `r` is `[0, 10]`. fn promote_member(r: u32, ) -> Weight { - Weight::from_ref_time(11_936_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(9_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 27_444 nanoseconds. + Weight::from_ref_time(28_576_394 as u64) + // Standard Error: 4_818 + .saturating_add(Weight::from_ref_time(519_056 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -159,10 +177,12 @@ impl WeightInfo for () { // Storage: RankedCollective MemberCount (r:1 w:1) // Storage: RankedCollective IdToIndex (r:1 w:1) // Storage: RankedCollective IndexToId (r:1 w:1) + /// The range of component `r` is `[0, 10]`. fn demote_member(r: u32, ) -> Weight { - Weight::from_ref_time(17_582_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 36_539 nanoseconds. + Weight::from_ref_time(39_339_893 as u64) + // Standard Error: 16_526 + .saturating_add(Weight::from_ref_time(807_457 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -171,17 +191,21 @@ impl WeightInfo for () { // Storage: RankedCollective Voting (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn vote() -> Weight { - Weight::from_ref_time(22_000_000 as u64) + // Minimum execution time: 50_548 nanoseconds. + Weight::from_ref_time(51_276_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: RankedPolls ReferendumInfoFor (r:1 w:0) - // Storage: RankedCollective Voting (r:0 w:1) + // Storage: RankedCollective VotingCleanup (r:1 w:0) + // Storage: RankedCollective Voting (r:0 w:2) + /// The range of component `n` is `[0, 100]`. fn cleanup_poll(n: u32, ) -> Weight { - Weight::from_ref_time(6_188_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(867_000 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) + // Minimum execution time: 16_222 nanoseconds. + Weight::from_ref_time(22_982_955 as u64) + // Standard Error: 3_863 + .saturating_add(Weight::from_ref_time(1_074_054 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } } diff --git a/frame/recovery/src/weights.rs b/frame/recovery/src/weights.rs index b496f16a46b28..39a8d09a38261 100644 --- a/frame/recovery/src/weights.rs +++ b/frame/recovery/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_recovery //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/recovery/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -60,69 +63,83 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - Weight::from_ref_time(6_579_000 as u64) + // Minimum execution time: 10_672 nanoseconds. + Weight::from_ref_time(10_946_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - Weight::from_ref_time(13_402_000 as u64) + // Minimum execution time: 17_092 nanoseconds. + Weight::from_ref_time(17_660_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_217_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 32_800 nanoseconds. + Weight::from_ref_time(33_769_078 as u64) + // Standard Error: 4_075 + .saturating_add(Weight::from_ref_time(252_382 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - Weight::from_ref_time(34_082_000 as u64) + // Minimum execution time: 39_224 nanoseconds. + Weight::from_ref_time(39_663_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(22_038_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 27_158 nanoseconds. + Weight::from_ref_time(28_130_506 as u64) + // Standard Error: 4_523 + .saturating_add(Weight::from_ref_time(321_436 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_621_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 36_269 nanoseconds. + Weight::from_ref_time(36_966_173 as u64) + // Standard Error: 5_016 + .saturating_add(Weight::from_ref_time(223_069 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(33_287_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 40_213 nanoseconds. + Weight::from_ref_time(41_140_968 as u64) + // Standard Error: 3_822 + .saturating_add(Weight::from_ref_time(163_217 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(31_964_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 38_740 nanoseconds. + Weight::from_ref_time(39_710_400 as u64) + // Standard Error: 5_554 + .saturating_add(Weight::from_ref_time(224_200 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - Weight::from_ref_time(12_702_000 as u64) + // Minimum execution time: 20_316 nanoseconds. + Weight::from_ref_time(20_912_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -132,69 +149,83 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Recovery Proxy (r:1 w:0) fn as_recovered() -> Weight { - Weight::from_ref_time(6_579_000 as u64) + // Minimum execution time: 10_672 nanoseconds. + Weight::from_ref_time(10_946_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } // Storage: Recovery Proxy (r:0 w:1) fn set_recovered() -> Weight { - Weight::from_ref_time(13_402_000 as u64) + // Minimum execution time: 17_092 nanoseconds. + Weight::from_ref_time(17_660_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn create_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_217_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(172_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 32_800 nanoseconds. + Weight::from_ref_time(33_769_078 as u64) + // Standard Error: 4_075 + .saturating_add(Weight::from_ref_time(252_382 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) fn initiate_recovery() -> Weight { - Weight::from_ref_time(34_082_000 as u64) + // Minimum execution time: 39_224 nanoseconds. + Weight::from_ref_time(39_663_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn vouch_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(22_038_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(307_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 27_158 nanoseconds. + Weight::from_ref_time(28_130_506 as u64) + // Standard Error: 4_523 + .saturating_add(Weight::from_ref_time(321_436 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Recoverable (r:1 w:0) // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Proxy (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn claim_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(28_621_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(353_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 36_269 nanoseconds. + Weight::from_ref_time(36_966_173 as u64) + // Standard Error: 5_016 + .saturating_add(Weight::from_ref_time(223_069 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn close_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(33_287_000 as u64) - // Standard Error: 19_000 - .saturating_add(Weight::from_ref_time(264_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 40_213 nanoseconds. + Weight::from_ref_time(41_140_968 as u64) + // Standard Error: 3_822 + .saturating_add(Weight::from_ref_time(163_217 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Recovery ActiveRecoveries (r:1 w:0) // Storage: Recovery Recoverable (r:1 w:1) + /// The range of component `n` is `[1, 9]`. fn remove_recovery(n: u32, ) -> Weight { - Weight::from_ref_time(31_964_000 as u64) - // Standard Error: 13_000 - .saturating_add(Weight::from_ref_time(222_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 38_740 nanoseconds. + Weight::from_ref_time(39_710_400 as u64) + // Standard Error: 5_554 + .saturating_add(Weight::from_ref_time(224_200 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Recovery Proxy (r:1 w:1) fn cancel_recovered() -> Weight { - Weight::from_ref_time(12_702_000 as u64) + // Minimum execution time: 20_316 nanoseconds. + Weight::from_ref_time(20_912_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/referenda/src/weights.rs b/frame/referenda/src/weights.rs index 50f8aa41b30aa..d8609abb9fe80 100644 --- a/frame/referenda/src/weights.rs +++ b/frame/referenda/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_referenda //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/referenda/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -80,14 +83,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(34_640_000 as u64) + // Minimum execution time: 41_475 nanoseconds. + Weight::from_ref_time(42_153_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - Weight::from_ref_time(44_290_000 as u64) + // Minimum execution time: 52_291 nanoseconds. + Weight::from_ref_time(53_147_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -95,7 +100,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - Weight::from_ref_time(49_428_000 as u64) + // Minimum execution time: 57_322 nanoseconds. + Weight::from_ref_time(58_145_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -103,7 +109,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - Weight::from_ref_time(50_076_000 as u64) + // Minimum execution time: 57_170 nanoseconds. + Weight::from_ref_time(58_012_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -111,7 +118,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - Weight::from_ref_time(55_935_000 as u64) + // Minimum execution time: 67_805 nanoseconds. + Weight::from_ref_time(68_844_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -119,34 +127,39 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - Weight::from_ref_time(52_921_000 as u64) + // Minimum execution time: 63_408 nanoseconds. + Weight::from_ref_time(64_049_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - Weight::from_ref_time(29_160_000 as u64) + // Minimum execution time: 36_639 nanoseconds. + Weight::from_ref_time(37_329_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - Weight::from_ref_time(34_972_000 as u64) + // Minimum execution time: 42_442 nanoseconds. + Weight::from_ref_time(43_006_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - Weight::from_ref_time(60_620_000 as u64) + // Minimum execution time: 74_681 nanoseconds. + Weight::from_ref_time(75_567_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - Weight::from_ref_time(9_615_000 as u64) + // Minimum execution time: 14_262 nanoseconds. + Weight::from_ref_time(14_504_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -154,7 +167,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - Weight::from_ref_time(113_077_000 as u64) + // Minimum execution time: 88_618 nanoseconds. + Weight::from_ref_time(89_443_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -162,7 +176,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - Weight::from_ref_time(114_376_000 as u64) + // Minimum execution time: 89_784 nanoseconds. + Weight::from_ref_time(90_619_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -170,7 +185,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - Weight::from_ref_time(43_901_000 as u64) + // Minimum execution time: 73_179 nanoseconds. + Weight::from_ref_time(74_025_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -178,7 +194,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - Weight::from_ref_time(43_279_000 as u64) + // Minimum execution time: 73_168 nanoseconds. + Weight::from_ref_time(73_769_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -187,7 +204,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - Weight::from_ref_time(45_564_000 as u64) + // Minimum execution time: 75_027 nanoseconds. + Weight::from_ref_time(76_220_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -196,27 +214,31 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - Weight::from_ref_time(45_061_000 as u64) + // Minimum execution time: 74_815 nanoseconds. + Weight::from_ref_time(75_803_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - Weight::from_ref_time(23_757_000 as u64) + // Minimum execution time: 31_877 nanoseconds. + Weight::from_ref_time(32_236_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - Weight::from_ref_time(24_781_000 as u64) + // Minimum execution time: 33_322 nanoseconds. + Weight::from_ref_time(33_762_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - Weight::from_ref_time(18_344_000 as u64) + // Minimum execution time: 25_393 nanoseconds. + Weight::from_ref_time(25_913_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -224,7 +246,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - Weight::from_ref_time(34_752_000 as u64) + // Minimum execution time: 47_114 nanoseconds. + Weight::from_ref_time(47_586_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -232,51 +255,57 @@ impl WeightInfo for SubstrateWeight { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - Weight::from_ref_time(37_055_000 as u64) + // Minimum execution time: 48_443 nanoseconds. + Weight::from_ref_time(50_003_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - Weight::from_ref_time(31_442_000 as u64) + // Minimum execution time: 44_556 nanoseconds. + Weight::from_ref_time(45_167_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - Weight::from_ref_time(33_201_000 as u64) + // Minimum execution time: 45_474 nanoseconds. + Weight::from_ref_time(46_105_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - Weight::from_ref_time(30_047_000 as u64) + // Minimum execution time: 42_795 nanoseconds. + Weight::from_ref_time(43_123_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - Weight::from_ref_time(29_195_000 as u64) + // Minimum execution time: 41_928 nanoseconds. + Weight::from_ref_time(42_272_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - Weight::from_ref_time(50_119_000 as u64) - .saturating_add(T::DbWeight::get().reads(5 as u64)) - .saturating_add(T::DbWeight::get().writes(5 as u64)) + // Minimum execution time: 55_186 nanoseconds. + Weight::from_ref_time(55_714_000 as u64) + .saturating_add(T::DbWeight::get().reads(4 as u64)) + .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - Weight::from_ref_time(32_203_000 as u64) + // Minimum execution time: 44_892 nanoseconds. + Weight::from_ref_time(45_353_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -288,14 +317,16 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) // Storage: Referenda ReferendumInfoFor (r:0 w:1) fn submit() -> Weight { - Weight::from_ref_time(34_640_000 as u64) + // Minimum execution time: 41_475 nanoseconds. + Weight::from_ref_time(42_153_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_preparing() -> Weight { - Weight::from_ref_time(44_290_000 as u64) + // Minimum execution time: 52_291 nanoseconds. + Weight::from_ref_time(53_147_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -303,7 +334,8 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_queued() -> Weight { - Weight::from_ref_time(49_428_000 as u64) + // Minimum execution time: 57_322 nanoseconds. + Weight::from_ref_time(58_145_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -311,7 +343,8 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:0) // Storage: Referenda TrackQueue (r:1 w:1) fn place_decision_deposit_not_queued() -> Weight { - Weight::from_ref_time(50_076_000 as u64) + // Minimum execution time: 57_170 nanoseconds. + Weight::from_ref_time(58_012_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -319,7 +352,8 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_passing() -> Weight { - Weight::from_ref_time(55_935_000 as u64) + // Minimum execution time: 67_805 nanoseconds. + Weight::from_ref_time(68_844_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -327,34 +361,39 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn place_decision_deposit_failing() -> Weight { - Weight::from_ref_time(52_921_000 as u64) + // Minimum execution time: 63_408 nanoseconds. + Weight::from_ref_time(64_049_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn refund_decision_deposit() -> Weight { - Weight::from_ref_time(29_160_000 as u64) + // Minimum execution time: 36_639 nanoseconds. + Weight::from_ref_time(37_329_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn cancel() -> Weight { - Weight::from_ref_time(34_972_000 as u64) + // Minimum execution time: 42_442 nanoseconds. + Weight::from_ref_time(43_006_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn kill() -> Weight { - Weight::from_ref_time(60_620_000 as u64) + // Minimum execution time: 74_681 nanoseconds. + Weight::from_ref_time(75_567_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda TrackQueue (r:1 w:0) // Storage: Referenda DecidingCount (r:1 w:1) fn one_fewer_deciding_queue_empty() -> Weight { - Weight::from_ref_time(9_615_000 as u64) + // Minimum execution time: 14_262 nanoseconds. + Weight::from_ref_time(14_504_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -362,7 +401,8 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_failing() -> Weight { - Weight::from_ref_time(113_077_000 as u64) + // Minimum execution time: 88_618 nanoseconds. + Weight::from_ref_time(89_443_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -370,7 +410,8 @@ impl WeightInfo for () { // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) fn one_fewer_deciding_passing() -> Weight { - Weight::from_ref_time(114_376_000 as u64) + // Minimum execution time: 89_784 nanoseconds. + Weight::from_ref_time(90_619_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -378,7 +419,8 @@ impl WeightInfo for () { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_insertion() -> Weight { - Weight::from_ref_time(43_901_000 as u64) + // Minimum execution time: 73_179 nanoseconds. + Weight::from_ref_time(74_025_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -386,7 +428,8 @@ impl WeightInfo for () { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_requeued_slide() -> Weight { - Weight::from_ref_time(43_279_000 as u64) + // Minimum execution time: 73_168 nanoseconds. + Weight::from_ref_time(73_769_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -395,7 +438,8 @@ impl WeightInfo for () { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_queued() -> Weight { - Weight::from_ref_time(45_564_000 as u64) + // Minimum execution time: 75_027 nanoseconds. + Weight::from_ref_time(76_220_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -404,27 +448,31 @@ impl WeightInfo for () { // Storage: Referenda TrackQueue (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_not_queued() -> Weight { - Weight::from_ref_time(45_061_000 as u64) + // Minimum execution time: 74_815 nanoseconds. + Weight::from_ref_time(75_803_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_no_deposit() -> Weight { - Weight::from_ref_time(23_757_000 as u64) + // Minimum execution time: 31_877 nanoseconds. + Weight::from_ref_time(32_236_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_preparing() -> Weight { - Weight::from_ref_time(24_781_000 as u64) + // Minimum execution time: 33_322 nanoseconds. + Weight::from_ref_time(33_762_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) fn nudge_referendum_timed_out() -> Weight { - Weight::from_ref_time(18_344_000 as u64) + // Minimum execution time: 25_393 nanoseconds. + Weight::from_ref_time(25_913_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -432,7 +480,8 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_failing() -> Weight { - Weight::from_ref_time(34_752_000 as u64) + // Minimum execution time: 47_114 nanoseconds. + Weight::from_ref_time(47_586_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -440,51 +489,57 @@ impl WeightInfo for () { // Storage: Referenda DecidingCount (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_deciding_passing() -> Weight { - Weight::from_ref_time(37_055_000 as u64) + // Minimum execution time: 48_443 nanoseconds. + Weight::from_ref_time(50_003_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_begin_confirming() -> Weight { - Weight::from_ref_time(31_442_000 as u64) + // Minimum execution time: 44_556 nanoseconds. + Weight::from_ref_time(45_167_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_end_confirming() -> Weight { - Weight::from_ref_time(33_201_000 as u64) + // Minimum execution time: 45_474 nanoseconds. + Weight::from_ref_time(46_105_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_not_confirming() -> Weight { - Weight::from_ref_time(30_047_000 as u64) + // Minimum execution time: 42_795 nanoseconds. + Weight::from_ref_time(43_123_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_continue_confirming() -> Weight { - Weight::from_ref_time(29_195_000 as u64) + // Minimum execution time: 41_928 nanoseconds. + Weight::from_ref_time(42_272_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) fn nudge_referendum_approved() -> Weight { - Weight::from_ref_time(50_119_000 as u64) - .saturating_add(RocksDbWeight::get().reads(5 as u64)) - .saturating_add(RocksDbWeight::get().writes(5 as u64)) + // Minimum execution time: 55_186 nanoseconds. + Weight::from_ref_time(55_714_000 as u64) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) + .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Referenda ReferendumInfoFor (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn nudge_referendum_rejected() -> Weight { - Weight::from_ref_time(32_203_000 as u64) + // Minimum execution time: 44_892 nanoseconds. + Weight::from_ref_time(45_353_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/remark/src/weights.rs b/frame/remark/src/weights.rs index 40e9e933aab2b..0d739657c852b 100644 --- a/frame/remark/src/weights.rs +++ b/frame/remark/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_remark //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/remark/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -51,10 +54,12 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { - Weight::from_ref_time(13_140_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_017 nanoseconds. + Weight::from_ref_time(8_269_935 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_407 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) } } @@ -62,10 +67,12 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) + /// The range of component `l` is `[1, 1048576]`. fn store(l: u32, ) -> Weight { - Weight::from_ref_time(13_140_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(l as u64)) + // Minimum execution time: 17_017 nanoseconds. + Weight::from_ref_time(8_269_935 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_407 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } } diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index cb72fe3e2fdda..5b86e7a143e7a 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_scheduler // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_scheduler -// --chain=dev // --output=./frame/scheduler/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -65,52 +66,61 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Scheduler IncompleteSince (r:1 w:1) fn service_agendas_base() -> Weight { - Weight::from_ref_time(4_992_000 as u64) + // Minimum execution time: 5_131 nanoseconds. + Weight::from_ref_time(5_286_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { - Weight::from_ref_time(4_320_000 as u64) - // Standard Error: 619 - .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) + // Minimum execution time: 4_111 nanoseconds. + Weight::from_ref_time(8_763_440 as u64) + // Standard Error: 783 + .saturating_add(Weight::from_ref_time(372_339 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn service_task_base() -> Weight { - Weight::from_ref_time(10_864_000 as u64) + // Minimum execution time: 10_880 nanoseconds. + Weight::from_ref_time(11_194_000 as u64) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { - Weight::from_ref_time(24_586_000 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) + // Minimum execution time: 25_347 nanoseconds. + Weight::from_ref_time(25_717_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_128 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:0 w:1) fn service_task_named() -> Weight { - Weight::from_ref_time(13_127_000 as u64) + // Minimum execution time: 12_894 nanoseconds. + Weight::from_ref_time(13_108_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn service_task_periodic() -> Weight { - Weight::from_ref_time(11_053_000 as u64) + // Minimum execution time: 10_667 nanoseconds. + Weight::from_ref_time(10_908_000 as u64) } fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(4_158_000 as u64) + // Minimum execution time: 4_124 nanoseconds. + Weight::from_ref_time(4_680_000 as u64) } fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(4_104_000 as u64) + // Minimum execution time: 4_156 nanoseconds. + Weight::from_ref_time(4_361_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(20_074_000 as u64) - // Standard Error: 765 - .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) + // Minimum execution time: 20_504 nanoseconds. + Weight::from_ref_time(27_066_818 as u64) + // Standard Error: 1_114 + .saturating_add(Weight::from_ref_time(372_897 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -118,9 +128,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Lookup (r:0 w:1) /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(21_509_000 as u64) - // Standard Error: 708 - .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) + // Minimum execution time: 21_686 nanoseconds. + Weight::from_ref_time(25_696_496 as u64) + // Standard Error: 1_261 + .saturating_add(Weight::from_ref_time(362_498 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -128,9 +139,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(22_427_000 as u64) - // Standard Error: 850 - .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) + // Minimum execution time: 23_084 nanoseconds. + Weight::from_ref_time(31_255_518 as u64) + // Standard Error: 1_258 + .saturating_add(Weight::from_ref_time(382_534 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -138,9 +150,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(22_875_000 as u64) - // Standard Error: 693 - .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) + // Minimum execution time: 23_862 nanoseconds. + Weight::from_ref_time(28_591_336 as u64) + // Standard Error: 742 + .saturating_add(Weight::from_ref_time(369_305 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -150,52 +163,61 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Scheduler IncompleteSince (r:1 w:1) fn service_agendas_base() -> Weight { - Weight::from_ref_time(4_992_000 as u64) + // Minimum execution time: 5_131 nanoseconds. + Weight::from_ref_time(5_286_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 512]`. fn service_agenda_base(s: u32, ) -> Weight { - Weight::from_ref_time(4_320_000 as u64) - // Standard Error: 619 - .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) + // Minimum execution time: 4_111 nanoseconds. + Weight::from_ref_time(8_763_440 as u64) + // Standard Error: 783 + .saturating_add(Weight::from_ref_time(372_339 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn service_task_base() -> Weight { - Weight::from_ref_time(10_864_000 as u64) + // Minimum execution time: 10_880 nanoseconds. + Weight::from_ref_time(11_194_000 as u64) } // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) /// The range of component `s` is `[128, 4194304]`. fn service_task_fetched(s: u32, ) -> Weight { - Weight::from_ref_time(24_586_000 as u64) - // Standard Error: 1 - .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) + // Minimum execution time: 25_347 nanoseconds. + Weight::from_ref_time(25_717_000 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(1_128 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:0 w:1) fn service_task_named() -> Weight { - Weight::from_ref_time(13_127_000 as u64) + // Minimum execution time: 12_894 nanoseconds. + Weight::from_ref_time(13_108_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn service_task_periodic() -> Weight { - Weight::from_ref_time(11_053_000 as u64) + // Minimum execution time: 10_667 nanoseconds. + Weight::from_ref_time(10_908_000 as u64) } fn execute_dispatch_signed() -> Weight { - Weight::from_ref_time(4_158_000 as u64) + // Minimum execution time: 4_124 nanoseconds. + Weight::from_ref_time(4_680_000 as u64) } fn execute_dispatch_unsigned() -> Weight { - Weight::from_ref_time(4_104_000 as u64) + // Minimum execution time: 4_156 nanoseconds. + Weight::from_ref_time(4_361_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(20_074_000 as u64) - // Standard Error: 765 - .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) + // Minimum execution time: 20_504 nanoseconds. + Weight::from_ref_time(27_066_818 as u64) + // Standard Error: 1_114 + .saturating_add(Weight::from_ref_time(372_897 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -203,9 +225,10 @@ impl WeightInfo for () { // Storage: Scheduler Lookup (r:0 w:1) /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(21_509_000 as u64) - // Standard Error: 708 - .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) + // Minimum execution time: 21_686 nanoseconds. + Weight::from_ref_time(25_696_496 as u64) + // Standard Error: 1_261 + .saturating_add(Weight::from_ref_time(362_498 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -213,9 +236,10 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(22_427_000 as u64) - // Standard Error: 850 - .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) + // Minimum execution time: 23_084 nanoseconds. + Weight::from_ref_time(31_255_518 as u64) + // Standard Error: 1_258 + .saturating_add(Weight::from_ref_time(382_534 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -223,9 +247,10 @@ impl WeightInfo for () { // Storage: Scheduler Agenda (r:1 w:1) /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(22_875_000 as u64) - // Standard Error: 693 - .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) + // Minimum execution time: 23_862 nanoseconds. + Weight::from_ref_time(28_591_336 as u64) + // Standard Error: 742 + .saturating_add(Weight::from_ref_time(369_305 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/session/src/weights.rs b/frame/session/src/weights.rs index 6e775d3d6f47a..d29413a33dd17 100644 --- a/frame/session/src/weights.rs +++ b/frame/session/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_session //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/session/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,7 +58,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - Weight::from_ref_time(48_484_000 as u64) + // Minimum execution time: 59_046 nanoseconds. + Weight::from_ref_time(59_934_000 as u64) .saturating_add(T::DbWeight::get().reads(6 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -63,7 +67,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - Weight::from_ref_time(38_003_000 as u64) + // Minimum execution time: 48_872 nanoseconds. + Weight::from_ref_time(49_666_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -75,7 +80,8 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:4 w:4) fn set_keys() -> Weight { - Weight::from_ref_time(48_484_000 as u64) + // Minimum execution time: 59_046 nanoseconds. + Weight::from_ref_time(59_934_000 as u64) .saturating_add(RocksDbWeight::get().reads(6 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -83,7 +89,8 @@ impl WeightInfo for () { // Storage: Session NextKeys (r:1 w:1) // Storage: Session KeyOwner (r:0 w:4) fn purge_keys() -> Weight { - Weight::from_ref_time(38_003_000 as u64) + // Minimum execution time: 48_872 nanoseconds. + Weight::from_ref_time(49_666_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 5070232365d3e..56374ffbc4b62 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,23 +18,24 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-09-19, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_staking // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --pallet=pallet_staking -// --chain=dev // --output=./frame/staking/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -86,17 +87,19 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(52_281_000 as u64) + // Minimum execution time: 53_097 nanoseconds. + Weight::from_ref_time(53_708_000 as u64) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(89_748_000 as u64) + // Minimum execution time: 92_199 nanoseconds. + Weight::from_ref_time(93_541_000 as u64) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(7 as u64)) } @@ -106,11 +109,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(94_782_000 as u64) + // Minimum execution time: 98_227 nanoseconds. + Weight::from_ref_time(99_070_000 as u64) .saturating_add(T::DbWeight::get().reads(12 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } @@ -120,9 +124,10 @@ impl WeightInfo for SubstrateWeight { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(44_499_000 as u64) - // Standard Error: 387 - .saturating_add(Weight::from_ref_time(72_726 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_058 nanoseconds. + Weight::from_ref_time(46_592_713 as u64) + // Standard Error: 413 + .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -133,17 +138,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - Weight::from_ref_time(83_230_000 as u64) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(10_289 as u64).saturating_mul(s as u64)) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + // Minimum execution time: 86_087 nanoseconds. + Weight::from_ref_time(87_627_894 as u64) .saturating_add(T::DbWeight::get().reads(13 as u64)) .saturating_add(T::DbWeight::get().writes(11 as u64)) } @@ -154,12 +158,13 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:1 w:1) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(64_430_000 as u64) + // Minimum execution time: 67_690 nanoseconds. + Weight::from_ref_time(68_348_000 as u64) .saturating_add(T::DbWeight::get().reads(11 as u64)) .saturating_add(T::DbWeight::get().writes(5 as u64)) } @@ -167,12 +172,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(42_030_000 as u64) - // Standard Error: 5_873 - .saturating_add(Weight::from_ref_time(6_747_156 as u64).saturating_mul(k as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + // Minimum execution time: 43_512 nanoseconds. + Weight::from_ref_time(47_300_477 as u64) + // Standard Error: 11_609 + .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) @@ -182,16 +187,17 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(70_834_000 as u64) - // Standard Error: 4_405 - .saturating_add(Weight::from_ref_time(2_583_120 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(13 as u64)) + // Minimum execution time: 74_296 nanoseconds. + Weight::from_ref_time(73_201_782 as u64) + // Standard Error: 5_007 + .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(12 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(6 as u64)) } @@ -199,54 +205,62 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(63_328_000 as u64) + // Minimum execution time: 66_605 nanoseconds. + Weight::from_ref_time(67_279_000 as u64) .saturating_add(T::DbWeight::get().reads(8 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(17_763_000 as u64) + // Minimum execution time: 18_897 nanoseconds. + Weight::from_ref_time(19_357_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(26_163_000 as u64) + // Minimum execution time: 26_509 nanoseconds. + Weight::from_ref_time(26_961_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(4_842_000 as u64) + // Minimum execution time: 5_025 nanoseconds. + Weight::from_ref_time(5_240_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(4_974_000 as u64) + // Minimum execution time: 5_107 nanoseconds. + Weight::from_ref_time(5_320_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(5_039_000 as u64) + // Minimum execution time: 5_094 nanoseconds. + Weight::from_ref_time(5_377_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(4_950_000 as u64) + // Minimum execution time: 5_219 nanoseconds. + Weight::from_ref_time(5_434_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(5_150_000 as u64) - // Standard Error: 30 - .saturating_add(Weight::from_ref_time(10_540 as u64).saturating_mul(v as u64)) + // Minimum execution time: 5_122 nanoseconds. + Weight::from_ref_time(5_977_533 as u64) + // Standard Error: 34 + .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) @@ -254,9 +268,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) @@ -264,72 +278,77 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(76_282_000 as u64) - // Standard Error: 2_397 - .saturating_add(Weight::from_ref_time(1_137_934 as u64).saturating_mul(s as u64)) + // Minimum execution time: 80_216 nanoseconds. + Weight::from_ref_time(86_090_609 as u64) + // Standard Error: 2_006 + .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(11 as u64)) - .saturating_add(T::DbWeight::get().writes(11 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(93_690_000 as u64) - // Standard Error: 43_203 - .saturating_add(Weight::from_ref_time(6_149_862 as u64).saturating_mul(s as u64)) + // Minimum execution time: 92_034 nanoseconds. + Weight::from_ref_time(896_585_370 as u64) + // Standard Error: 58_231 + .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:2 w:0) - // Storage: System Account (r:2 w:2) - /// The range of component `n` is `[1, 256]`. + // Storage: Staking Payee (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(151_647_000 as u64) - // Standard Error: 8_237 - .saturating_add(Weight::from_ref_time(21_296_415 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(12 as u64)) + // Minimum execution time: 127_936 nanoseconds. + Weight::from_ref_time(184_556_084 as u64) + // Standard Error: 26_981 + .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(9 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:2 w:0) - // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:2 w:0) - // Storage: System Account (r:2 w:2) - // Storage: Balances Locks (r:2 w:2) - /// The range of component `n` is `[1, 256]`. + // Storage: Staking Payee (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(197_310_000 as u64) - // Standard Error: 13_818 - .saturating_add(Weight::from_ref_time(30_053_043 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(15 as u64)) + // Minimum execution time: 157_778 nanoseconds. + Weight::from_ref_time(223_306_359 as u64) + // Standard Error: 27_216 + .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(10 as u64)) .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(89_469_000 as u64) - // Standard Error: 1_197 - .saturating_add(Weight::from_ref_time(74_212 as u64).saturating_mul(l as u64)) + // Minimum execution time: 92_880 nanoseconds. + Weight::from_ref_time(94_434_663 as u64) + // Standard Error: 1_734 + .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) .saturating_add(T::DbWeight::get().reads(9 as u64)) .saturating_add(T::DbWeight::get().writes(8 as u64)) } @@ -340,25 +359,26 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(88_333_000 as u64) - // Standard Error: 1_567 - .saturating_add(Weight::from_ref_time(1_101_099 as u64).saturating_mul(s as u64)) + // Minimum execution time: 92_334 nanoseconds. + Weight::from_ref_time(95_207_614 as u64) + // Standard Error: 1_822 + .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(12 as u64)) - .saturating_add(T::DbWeight::get().writes(13 as u64)) + .saturating_add(T::DbWeight::get().writes(12 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } - // Storage: VoterBagsList CounterForListNodes (r:1 w:0) + // Storage: VoterList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: VoterBagsList ListBags (r:200 w:0) - // Storage: VoterBagsList ListNodes (r:101 w:0) + // Storage: VoterList ListBags (r:200 w:0) + // Storage: VoterList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -373,23 +393,24 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) /// The range of component `v` is `[1, 10]`. - /// The range of component `n` is `[1, 100]`. + /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(572_530_000 as u64) - // Standard Error: 3_166_542 - .saturating_add(Weight::from_ref_time(101_354_358 as u64).saturating_mul(v as u64)) - // Standard Error: 315_238 - .saturating_add(Weight::from_ref_time(15_565_319 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(261 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + // Minimum execution time: 535_169 nanoseconds. + Weight::from_ref_time(548_667_000 as u64) + // Standard Error: 1_759_252 + .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) + // Standard Error: 175_299 + .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(207 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(T::DbWeight::get().writes(6 as u64)) + .saturating_add(T::DbWeight::get().writes(3 as u64)) .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } - // Storage: VoterBagsList CounterForListNodes (r:1 w:0) + // Storage: VoterList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: VoterBagsList ListBags (r:200 w:0) - // Storage: VoterBagsList ListNodes (r:1500 w:0) + // Storage: VoterList ListBags (r:200 w:0) + // Storage: VoterList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) @@ -397,24 +418,28 @@ impl WeightInfo for SubstrateWeight { /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. /// The range of component `s` is `[1, 20]`. - fn get_npos_voters(v: u32, n: u32, _s: u32, ) -> Weight { - Weight::from_ref_time(24_930_788_000 as u64) - // Standard Error: 266_386 - .saturating_add(Weight::from_ref_time(6_687_552 as u64).saturating_mul(v as u64)) - // Standard Error: 266_386 - .saturating_add(Weight::from_ref_time(6_839_134 as u64).saturating_mul(n as u64)) - .saturating_add(T::DbWeight::get().reads(6722 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(v as u64))) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { + // Minimum execution time: 25_323_129 nanoseconds. + Weight::from_ref_time(25_471_672_000 as u64) + // Standard Error: 266_391 + .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) + // Standard Error: 266_391 + .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) + .saturating_add(T::DbWeight::get().reads(202 as u64)) + .saturating_add(T::DbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(T::DbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(4_864_727_000 as u64) - // Standard Error: 54_240 - .saturating_add(Weight::from_ref_time(3_319_738 as u64).saturating_mul(v as u64)) - .saturating_add(T::DbWeight::get().reads(502 as u64)) + // Minimum execution time: 4_905_036 nanoseconds. + Weight::from_ref_time(78_163_554 as u64) + // Standard Error: 23_723 + .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) + .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(v as u64))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -423,7 +448,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(10_141_000 as u64) + // Minimum execution time: 10_096 nanoseconds. + Weight::from_ref_time(10_538_000 as u64) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) @@ -433,7 +459,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(8_993_000 as u64) + // Minimum execution time: 9_045 nanoseconds. + Weight::from_ref_time(9_379_000 as u64) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) @@ -443,18 +470,20 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(79_082_000 as u64) + // Minimum execution time: 81_457 nanoseconds. + Weight::from_ref_time(82_410_000 as u64) .saturating_add(T::DbWeight::get().reads(11 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(19_265_000 as u64) + // Minimum execution time: 19_684 nanoseconds. + Weight::from_ref_time(20_059_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -468,17 +497,19 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - Weight::from_ref_time(52_281_000 as u64) + // Minimum execution time: 53_097 nanoseconds. + Weight::from_ref_time(53_708_000 as u64) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListNodes (r:3 w:3) + // Storage: VoterList ListBags (r:2 w:2) fn bond_extra() -> Weight { - Weight::from_ref_time(89_748_000 as u64) + // Minimum execution time: 92_199 nanoseconds. + Weight::from_ref_time(93_541_000 as u64) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(7 as u64)) } @@ -488,11 +519,12 @@ impl WeightInfo for () { // Storage: Staking CurrentEra (r:1 w:0) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListBags (r:2 w:2) fn unbond() -> Weight { - Weight::from_ref_time(94_782_000 as u64) + // Minimum execution time: 98_227 nanoseconds. + Weight::from_ref_time(99_070_000 as u64) .saturating_add(RocksDbWeight::get().reads(12 as u64)) .saturating_add(RocksDbWeight::get().writes(8 as u64)) } @@ -502,9 +534,10 @@ impl WeightInfo for () { // Storage: System Account (r:1 w:1) /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { - Weight::from_ref_time(44_499_000 as u64) - // Standard Error: 387 - .saturating_add(Weight::from_ref_time(72_726 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_058 nanoseconds. + Weight::from_ref_time(46_592_713 as u64) + // Standard Error: 413 + .saturating_add(Weight::from_ref_time(63_036 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -515,17 +548,16 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { - Weight::from_ref_time(83_230_000 as u64) - // Standard Error: 612 - .saturating_add(Weight::from_ref_time(10_289 as u64).saturating_mul(s as u64)) + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { + // Minimum execution time: 86_087 nanoseconds. + Weight::from_ref_time(87_627_894 as u64) .saturating_add(RocksDbWeight::get().reads(13 as u64)) .saturating_add(RocksDbWeight::get().writes(11 as u64)) } @@ -536,12 +568,13 @@ impl WeightInfo for () { // Storage: Staking MaxValidatorsCount (r:1 w:0) // Storage: Staking Nominators (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:1 w:1) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:1 w:1) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - Weight::from_ref_time(64_430_000 as u64) + // Minimum execution time: 67_690 nanoseconds. + Weight::from_ref_time(68_348_000 as u64) .saturating_add(RocksDbWeight::get().reads(11 as u64)) .saturating_add(RocksDbWeight::get().writes(5 as u64)) } @@ -549,12 +582,12 @@ impl WeightInfo for () { // Storage: Staking Nominators (r:1 w:1) /// The range of component `k` is `[1, 128]`. fn kick(k: u32, ) -> Weight { - Weight::from_ref_time(42_030_000 as u64) - // Standard Error: 5_873 - .saturating_add(Weight::from_ref_time(6_747_156 as u64).saturating_mul(k as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + // Minimum execution time: 43_512 nanoseconds. + Weight::from_ref_time(47_300_477 as u64) + // Standard Error: 11_609 + .saturating_add(Weight::from_ref_time(6_770_405 as u64).saturating_mul(k as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(k as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(k as u64))) } // Storage: Staking Ledger (r:1 w:0) @@ -564,16 +597,17 @@ impl WeightInfo for () { // Storage: Staking Validators (r:2 w:0) // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { - Weight::from_ref_time(70_834_000 as u64) - // Standard Error: 4_405 - .saturating_add(Weight::from_ref_time(2_583_120 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(13 as u64)) + // Minimum execution time: 74_296 nanoseconds. + Weight::from_ref_time(73_201_782 as u64) + // Standard Error: 5_007 + .saturating_add(Weight::from_ref_time(2_810_370 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(12 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } @@ -581,54 +615,62 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill() -> Weight { - Weight::from_ref_time(63_328_000 as u64) + // Minimum execution time: 66_605 nanoseconds. + Weight::from_ref_time(67_279_000 as u64) .saturating_add(RocksDbWeight::get().reads(8 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) // Storage: Staking Payee (r:0 w:1) fn set_payee() -> Weight { - Weight::from_ref_time(17_763_000 as u64) + // Minimum execution time: 18_897 nanoseconds. + Weight::from_ref_time(19_357_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - Weight::from_ref_time(26_163_000 as u64) + // Minimum execution time: 26_509 nanoseconds. + Weight::from_ref_time(26_961_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - Weight::from_ref_time(4_842_000 as u64) + // Minimum execution time: 5_025 nanoseconds. + Weight::from_ref_time(5_240_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - Weight::from_ref_time(4_974_000 as u64) + // Minimum execution time: 5_107 nanoseconds. + Weight::from_ref_time(5_320_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - Weight::from_ref_time(5_039_000 as u64) + // Minimum execution time: 5_094 nanoseconds. + Weight::from_ref_time(5_377_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - Weight::from_ref_time(4_950_000 as u64) + // Minimum execution time: 5_219 nanoseconds. + Weight::from_ref_time(5_434_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Invulnerables (r:0 w:1) /// The range of component `v` is `[0, 1000]`. fn set_invulnerables(v: u32, ) -> Weight { - Weight::from_ref_time(5_150_000 as u64) - // Standard Error: 30 - .saturating_add(Weight::from_ref_time(10_540 as u64).saturating_mul(v as u64)) + // Minimum execution time: 5_122 nanoseconds. + Weight::from_ref_time(5_977_533 as u64) + // Standard Error: 34 + .saturating_add(Weight::from_ref_time(10_205 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking Bonded (r:1 w:1) @@ -636,9 +678,9 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Ledger (r:0 w:1) @@ -646,72 +688,77 @@ impl WeightInfo for () { // Storage: Staking SpanSlash (r:0 w:2) /// The range of component `s` is `[0, 100]`. fn force_unstake(s: u32, ) -> Weight { - Weight::from_ref_time(76_282_000 as u64) - // Standard Error: 2_397 - .saturating_add(Weight::from_ref_time(1_137_934 as u64).saturating_mul(s as u64)) + // Minimum execution time: 80_216 nanoseconds. + Weight::from_ref_time(86_090_609 as u64) + // Standard Error: 2_006 + .saturating_add(Weight::from_ref_time(1_039_308 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(11 as u64)) - .saturating_add(RocksDbWeight::get().writes(11 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } // Storage: Staking UnappliedSlashes (r:1 w:1) /// The range of component `s` is `[1, 1000]`. fn cancel_deferred_slash(s: u32, ) -> Weight { - Weight::from_ref_time(93_690_000 as u64) - // Standard Error: 43_203 - .saturating_add(Weight::from_ref_time(6_149_862 as u64).saturating_mul(s as u64)) + // Minimum execution time: 92_034 nanoseconds. + Weight::from_ref_time(896_585_370 as u64) + // Standard Error: 58_231 + .saturating_add(Weight::from_ref_time(4_908_277 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Bonded (r:1 w:0) // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:2 w:0) - // Storage: System Account (r:2 w:2) - /// The range of component `n` is `[1, 256]`. + // Storage: Staking Payee (r:1 w:0) + // Storage: System Account (r:1 w:1) + /// The range of component `n` is `[0, 256]`. fn payout_stakers_dead_controller(n: u32, ) -> Weight { - Weight::from_ref_time(151_647_000 as u64) - // Standard Error: 8_237 - .saturating_add(Weight::from_ref_time(21_296_415 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(12 as u64)) + // Minimum execution time: 127_936 nanoseconds. + Weight::from_ref_time(184_556_084 as u64) + // Standard Error: 26_981 + .saturating_add(Weight::from_ref_time(21_786_304 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(9 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(n as u64))) } // Storage: Staking CurrentEra (r:1 w:0) // Storage: Staking ErasValidatorReward (r:1 w:0) - // Storage: Staking Bonded (r:2 w:0) - // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) // Storage: Staking ErasStakersClipped (r:1 w:0) // Storage: Staking ErasRewardPoints (r:1 w:0) // Storage: Staking ErasValidatorPrefs (r:1 w:0) - // Storage: Staking Payee (r:2 w:0) - // Storage: System Account (r:2 w:2) - // Storage: Balances Locks (r:2 w:2) - /// The range of component `n` is `[1, 256]`. + // Storage: Staking Payee (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) + /// The range of component `n` is `[0, 256]`. fn payout_stakers_alive_staked(n: u32, ) -> Weight { - Weight::from_ref_time(197_310_000 as u64) - // Standard Error: 13_818 - .saturating_add(Weight::from_ref_time(30_053_043 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(15 as u64)) + // Minimum execution time: 157_778 nanoseconds. + Weight::from_ref_time(223_306_359 as u64) + // Standard Error: 27_216 + .saturating_add(Weight::from_ref_time(30_612_663 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(10 as u64)) .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(n as u64))) } // Storage: Staking Ledger (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:3 w:3) + // Storage: VoterList ListNodes (r:3 w:3) // Storage: Staking Bonded (r:1 w:0) - // Storage: VoterBagsList ListBags (r:2 w:2) + // Storage: VoterList ListBags (r:2 w:2) /// The range of component `l` is `[1, 32]`. fn rebond(l: u32, ) -> Weight { - Weight::from_ref_time(89_469_000 as u64) - // Standard Error: 1_197 - .saturating_add(Weight::from_ref_time(74_212 as u64).saturating_mul(l as u64)) + // Minimum execution time: 92_880 nanoseconds. + Weight::from_ref_time(94_434_663 as u64) + // Standard Error: 1_734 + .saturating_add(Weight::from_ref_time(34_453 as u64).saturating_mul(l as u64)) .saturating_add(RocksDbWeight::get().reads(9 as u64)) .saturating_add(RocksDbWeight::get().writes(8 as u64)) } @@ -722,25 +769,26 @@ impl WeightInfo for () { // Storage: Staking Validators (r:1 w:0) // Storage: Staking Nominators (r:1 w:1) // Storage: Staking CounterForNominators (r:1 w:1) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: Staking Payee (r:0 w:1) // Storage: Staking SpanSlash (r:0 w:1) /// The range of component `s` is `[1, 100]`. fn reap_stash(s: u32, ) -> Weight { - Weight::from_ref_time(88_333_000 as u64) - // Standard Error: 1_567 - .saturating_add(Weight::from_ref_time(1_101_099 as u64).saturating_mul(s as u64)) + // Minimum execution time: 92_334 nanoseconds. + Weight::from_ref_time(95_207_614 as u64) + // Standard Error: 1_822 + .saturating_add(Weight::from_ref_time(1_036_787 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(12 as u64)) - .saturating_add(RocksDbWeight::get().writes(13 as u64)) + .saturating_add(RocksDbWeight::get().writes(12 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) } - // Storage: VoterBagsList CounterForListNodes (r:1 w:0) + // Storage: VoterList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:1 w:0) - // Storage: VoterBagsList ListBags (r:200 w:0) - // Storage: VoterBagsList ListNodes (r:101 w:0) + // Storage: VoterList ListBags (r:200 w:0) + // Storage: VoterList ListNodes (r:101 w:0) // Storage: Staking Nominators (r:101 w:0) // Storage: Staking Validators (r:2 w:0) // Storage: Staking Bonded (r:101 w:0) @@ -755,23 +803,24 @@ impl WeightInfo for () { // Storage: Staking ErasTotalStake (r:0 w:1) // Storage: Staking ErasStartSessionIndex (r:0 w:1) /// The range of component `v` is `[1, 10]`. - /// The range of component `n` is `[1, 100]`. + /// The range of component `n` is `[0, 100]`. fn new_era(v: u32, n: u32, ) -> Weight { - Weight::from_ref_time(572_530_000 as u64) - // Standard Error: 3_166_542 - .saturating_add(Weight::from_ref_time(101_354_358 as u64).saturating_mul(v as u64)) - // Standard Error: 315_238 - .saturating_add(Weight::from_ref_time(15_565_319 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(261 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) + // Minimum execution time: 535_169 nanoseconds. + Weight::from_ref_time(548_667_000 as u64) + // Standard Error: 1_759_252 + .saturating_add(Weight::from_ref_time(58_283_319 as u64).saturating_mul(v as u64)) + // Standard Error: 175_299 + .saturating_add(Weight::from_ref_time(13_578_512 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(207 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) - .saturating_add(RocksDbWeight::get().writes(6 as u64)) + .saturating_add(RocksDbWeight::get().writes(3 as u64)) .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(v as u64))) } - // Storage: VoterBagsList CounterForListNodes (r:1 w:0) + // Storage: VoterList CounterForListNodes (r:1 w:0) // Storage: Staking SlashingSpans (r:21 w:0) - // Storage: VoterBagsList ListBags (r:200 w:0) - // Storage: VoterBagsList ListNodes (r:1500 w:0) + // Storage: VoterList ListBags (r:200 w:0) + // Storage: VoterList ListNodes (r:1500 w:0) // Storage: Staking Nominators (r:1500 w:0) // Storage: Staking Validators (r:500 w:0) // Storage: Staking Bonded (r:1500 w:0) @@ -779,24 +828,28 @@ impl WeightInfo for () { /// The range of component `v` is `[500, 1000]`. /// The range of component `n` is `[500, 1000]`. /// The range of component `s` is `[1, 20]`. - fn get_npos_voters(v: u32, n: u32, _s: u32, ) -> Weight { - Weight::from_ref_time(24_930_788_000 as u64) - // Standard Error: 266_386 - .saturating_add(Weight::from_ref_time(6_687_552 as u64).saturating_mul(v as u64)) - // Standard Error: 266_386 - .saturating_add(Weight::from_ref_time(6_839_134 as u64).saturating_mul(n as u64)) - .saturating_add(RocksDbWeight::get().reads(6722 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(v as u64))) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) + fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { + // Minimum execution time: 25_323_129 nanoseconds. + Weight::from_ref_time(25_471_672_000 as u64) + // Standard Error: 266_391 + .saturating_add(Weight::from_ref_time(6_665_504 as u64).saturating_mul(v as u64)) + // Standard Error: 266_391 + .saturating_add(Weight::from_ref_time(6_956_606 as u64).saturating_mul(n as u64)) + .saturating_add(RocksDbWeight::get().reads(202 as u64)) + .saturating_add(RocksDbWeight::get().reads((5 as u64).saturating_mul(v as u64))) + .saturating_add(RocksDbWeight::get().reads((4 as u64).saturating_mul(n as u64))) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) } // Storage: Staking CounterForValidators (r:1 w:0) // Storage: Staking Validators (r:501 w:0) /// The range of component `v` is `[500, 1000]`. fn get_npos_targets(v: u32, ) -> Weight { - Weight::from_ref_time(4_864_727_000 as u64) - // Standard Error: 54_240 - .saturating_add(Weight::from_ref_time(3_319_738 as u64).saturating_mul(v as u64)) - .saturating_add(RocksDbWeight::get().reads(502 as u64)) + // Minimum execution time: 4_905_036 nanoseconds. + Weight::from_ref_time(78_163_554 as u64) + // Standard Error: 23_723 + .saturating_add(Weight::from_ref_time(9_784_870 as u64).saturating_mul(v as u64)) + .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(v as u64))) } // Storage: Staking MinCommission (r:0 w:1) // Storage: Staking MinValidatorBond (r:0 w:1) @@ -805,7 +858,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_set() -> Weight { - Weight::from_ref_time(10_141_000 as u64) + // Minimum execution time: 10_096 nanoseconds. + Weight::from_ref_time(10_538_000 as u64) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:0 w:1) @@ -815,7 +869,8 @@ impl WeightInfo for () { // Storage: Staking MaxNominatorsCount (r:0 w:1) // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_configs_all_remove() -> Weight { - Weight::from_ref_time(8_993_000 as u64) + // Minimum execution time: 9_045 nanoseconds. + Weight::from_ref_time(9_379_000 as u64) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking Ledger (r:1 w:0) @@ -825,18 +880,20 @@ impl WeightInfo for () { // Storage: Staking CounterForNominators (r:1 w:1) // Storage: Staking MinNominatorBond (r:1 w:0) // Storage: Staking Validators (r:1 w:0) - // Storage: VoterBagsList ListNodes (r:2 w:2) - // Storage: VoterBagsList ListBags (r:1 w:1) - // Storage: VoterBagsList CounterForListNodes (r:1 w:1) + // Storage: VoterList ListNodes (r:2 w:2) + // Storage: VoterList ListBags (r:1 w:1) + // Storage: VoterList CounterForListNodes (r:1 w:1) fn chill_other() -> Weight { - Weight::from_ref_time(79_082_000 as u64) + // Minimum execution time: 81_457 nanoseconds. + Weight::from_ref_time(82_410_000 as u64) .saturating_add(RocksDbWeight::get().reads(11 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Staking MinCommission (r:1 w:0) // Storage: Staking Validators (r:1 w:1) fn force_apply_min_commission() -> Weight { - Weight::from_ref_time(19_265_000 as u64) + // Minimum execution time: 19_684 nanoseconds. + Weight::from_ref_time(20_059_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs index 851f4cba077c0..7414bb9038fdd 100644 --- a/frame/state-trie-migration/src/weights.rs +++ b/frame/state-trie-migration/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_state_trie_migration //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/state-trie-migration/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,38 +62,46 @@ impl WeightInfo for SubstrateWeight { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - Weight::from_ref_time(19_019_000 as u64) + // Minimum execution time: 23_874 nanoseconds. + Weight::from_ref_time(24_127_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - Weight::from_ref_time(1_874_000 as u64) + // Minimum execution time: 6_119 nanoseconds. + Weight::from_ref_time(6_325_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) } fn migrate_custom_top_success() -> Weight { - Weight::from_ref_time(16_381_000 as u64) + // Minimum execution time: 20_365 nanoseconds. + Weight::from_ref_time(20_790_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - Weight::from_ref_time(25_966_000 as u64) + // Minimum execution time: 38_979 nanoseconds. + Weight::from_ref_time(40_271_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn migrate_custom_child_success() -> Weight { - Weight::from_ref_time(16_712_000 as u64) + // Minimum execution time: 21_217 nanoseconds. + Weight::from_ref_time(21_526_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - Weight::from_ref_time(29_885_000 as u64) + // Minimum execution time: 43_853 nanoseconds. + Weight::from_ref_time(44_693_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: unknown [0x6b6579] (r:1 w:1) + /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 5_575 nanoseconds. + Weight::from_ref_time(5_719_000 as u64) + // Standard Error: 3 + .saturating_add(Weight::from_ref_time(1_404 as u64).saturating_mul(v as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -101,38 +112,46 @@ impl WeightInfo for () { // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) // Storage: StateTrieMigration MigrationProcess (r:1 w:1) fn continue_migrate() -> Weight { - Weight::from_ref_time(19_019_000 as u64) + // Minimum execution time: 23_874 nanoseconds. + Weight::from_ref_time(24_127_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: StateTrieMigration SignedMigrationMaxLimits (r:1 w:0) fn continue_migrate_wrong_witness() -> Weight { - Weight::from_ref_time(1_874_000 as u64) + // Minimum execution time: 6_119 nanoseconds. + Weight::from_ref_time(6_325_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) } fn migrate_custom_top_success() -> Weight { - Weight::from_ref_time(16_381_000 as u64) + // Minimum execution time: 20_365 nanoseconds. + Weight::from_ref_time(20_790_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_top_fail() -> Weight { - Weight::from_ref_time(25_966_000 as u64) + // Minimum execution time: 38_979 nanoseconds. + Weight::from_ref_time(40_271_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn migrate_custom_child_success() -> Weight { - Weight::from_ref_time(16_712_000 as u64) + // Minimum execution time: 21_217 nanoseconds. + Weight::from_ref_time(21_526_000 as u64) } // Storage: unknown [0x666f6f] (r:1 w:1) fn migrate_custom_child_fail() -> Weight { - Weight::from_ref_time(29_885_000 as u64) + // Minimum execution time: 43_853 nanoseconds. + Weight::from_ref_time(44_693_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: unknown [0x6b6579] (r:1 w:1) + /// The range of component `v` is `[1, 4194304]`. fn process_top_key(v: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(v as u64)) + // Minimum execution time: 5_575 nanoseconds. + Weight::from_ref_time(5_719_000 as u64) + // Standard Error: 3 + .saturating_add(Weight::from_ref_time(1_404 as u64).saturating_mul(v as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/support/src/weights/block_weights.rs b/frame/support/src/weights/block_weights.rs index 240b80460aa09..5c8e1f1c86e9d 100644 --- a/frame/support/src/weights/block_weights.rs +++ b/frame/support/src/weights/block_weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +16,13 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24 (Y/M/D) +//! DATE: 2022-11-07 (Y/M/D) +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! //! SHORT-NAME: `block`, LONG-NAME: `BlockExecution`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` //! WEIGHT-PATH: `./frame/support/src/weights/` -//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1`, WEIGHT-ADD: `0` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: // ./target/production/substrate @@ -31,6 +32,7 @@ // --execution=wasm // --wasm-execution=compiled // --weight-path=./frame/support/src/weights/ +// --header=./HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -39,19 +41,19 @@ use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; parameter_types! { /// Time to execute an empty block. - /// Calculated by multiplying the *Average* with `1` and adding `0`. + /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 5_303_128, 5_507_784 - /// Average: 5_346_284 - /// Median: 5_328_139 - /// Std-Dev: 41749.5 + /// Min, Max: 351_000, 392_617 + /// Average: 358_523 + /// Median: 359_836 + /// Std-Dev: 6698.67 /// /// Percentiles nanoseconds: - /// 99th: 5_489_273 - /// 95th: 5_433_314 - /// 75th: 5_354_812 - pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(5_346_284); + /// 99th: 390_723 + /// 95th: 365_799 + /// 75th: 361_582 + pub const BlockExecutionWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(358_523); } #[cfg(test)] diff --git a/frame/support/src/weights/extrinsic_weights.rs b/frame/support/src/weights/extrinsic_weights.rs index 913dbc4e91fc1..1db2281dfe488 100644 --- a/frame/support/src/weights/extrinsic_weights.rs +++ b/frame/support/src/weights/extrinsic_weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +16,13 @@ // limitations under the License. //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24 (Y/M/D) +//! DATE: 2022-11-07 (Y/M/D) +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! //! SHORT-NAME: `extrinsic`, LONG-NAME: `ExtrinsicBase`, RUNTIME: `Development` //! WARMUPS: `10`, REPEAT: `100` //! WEIGHT-PATH: `./frame/support/src/weights/` -//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1`, WEIGHT-ADD: `0` +//! WEIGHT-METRIC: `Average`, WEIGHT-MUL: `1.0`, WEIGHT-ADD: `0` // Executed Command: // ./target/production/substrate @@ -31,6 +32,7 @@ // --execution=wasm // --wasm-execution=compiled // --weight-path=./frame/support/src/weights/ +// --header=./HEADER-APACHE2 // --warmup=10 // --repeat=100 @@ -39,19 +41,19 @@ use sp_weights::{constants::WEIGHT_PER_NANOS, Weight}; parameter_types! { /// Time to execute a NO-OP extrinsic, for example `System::remark`. - /// Calculated by multiplying the *Average* with `1` and adding `0`. + /// Calculated by multiplying the *Average* with `1.0` and adding `0`. /// /// Stats nanoseconds: - /// Min, Max: 86_060, 86_999 - /// Average: 86_298 - /// Median: 86_248 - /// Std-Dev: 207.19 + /// Min, Max: 98_722, 101_420 + /// Average: 98_974 + /// Median: 98_951 + /// Std-Dev: 271.62 /// /// Percentiles nanoseconds: - /// 99th: 86_924 - /// 95th: 86_828 - /// 75th: 86_347 - pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(86_298); + /// 99th: 99_202 + /// 95th: 99_163 + /// 75th: 99_030 + pub const ExtrinsicBaseWeight: Weight = WEIGHT_PER_NANOS.saturating_mul(98_974); } #[cfg(test)] diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index 9080c9aad43f2..696a6a09b8f80 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for frame_system //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-02, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `ci3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet // --chain=dev @@ -35,6 +35,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/system/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -58,44 +59,52 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `b` is `[0, 3932160]`. - fn remark(_b: u32, ) -> Weight { - Weight::from_ref_time(1_000_000 as u64) + fn remark(b: u32, ) -> Weight { + // Minimum execution time: 3_951 nanoseconds. + Weight::from_ref_time(1_307_232 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(363 as u64).saturating_mul(b as u64)) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + // Minimum execution time: 14_880 nanoseconds. + Weight::from_ref_time(15_173_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(Weight::from_ref_time(1_424 as u64).saturating_mul(b as u64)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - Weight::from_ref_time(5_367_000 as u64) + // Minimum execution time: 9_819 nanoseconds. + Weight::from_ref_time(10_513_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[1, 1000]`. + /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 4_038 nanoseconds. + Weight::from_ref_time(4_098_000 as u64) + // Standard Error: 710 + .saturating_add(Weight::from_ref_time(620_813 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[1, 1000]`. + /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 3_972 nanoseconds. + Weight::from_ref_time(4_082_000 as u64) + // Standard Error: 884 + .saturating_add(Weight::from_ref_time(536_923 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `p` is `[1, 1000]`. + /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 5_703 nanoseconds. + Weight::from_ref_time(5_763_000 as u64) + // Standard Error: 1_248 + .saturating_add(Weight::from_ref_time(1_126_062 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } } @@ -103,44 +112,52 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { /// The range of component `b` is `[0, 3932160]`. - fn remark(_b: u32, ) -> Weight { - Weight::from_ref_time(1_000_000 as u64) + fn remark(b: u32, ) -> Weight { + // Minimum execution time: 3_951 nanoseconds. + Weight::from_ref_time(1_307_232 as u64) + // Standard Error: 0 + .saturating_add(Weight::from_ref_time(363 as u64).saturating_mul(b as u64)) } /// The range of component `b` is `[0, 3932160]`. fn remark_with_event(b: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + // Minimum execution time: 14_880 nanoseconds. + Weight::from_ref_time(15_173_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) + .saturating_add(Weight::from_ref_time(1_424 as u64).saturating_mul(b as u64)) } // Storage: System Digest (r:1 w:1) // Storage: unknown [0x3a686561707061676573] (r:0 w:1) fn set_heap_pages() -> Weight { - Weight::from_ref_time(5_367_000 as u64) + // Minimum execution time: 9_819 nanoseconds. + Weight::from_ref_time(10_513_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[1, 1000]`. + /// The range of component `i` is `[0, 1000]`. fn set_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(603_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 4_038 nanoseconds. + Weight::from_ref_time(4_098_000 as u64) + // Standard Error: 710 + .saturating_add(Weight::from_ref_time(620_813 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `i` is `[1, 1000]`. + /// The range of component `i` is `[0, 1000]`. fn kill_storage(i: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(513_000 as u64).saturating_mul(i as u64)) + // Minimum execution time: 3_972 nanoseconds. + Weight::from_ref_time(4_082_000 as u64) + // Standard Error: 884 + .saturating_add(Weight::from_ref_time(536_923 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(i as u64))) } // Storage: Skipped Metadata (r:0 w:0) - /// The range of component `p` is `[1, 1000]`. + /// The range of component `p` is `[0, 1000]`. fn kill_prefix(p: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(1_026_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 5_703 nanoseconds. + Weight::from_ref_time(5_763_000 as u64) + // Standard Error: 1_248 + .saturating_add(Weight::from_ref_time(1_126_062 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(p as u64))) } } diff --git a/frame/timestamp/src/weights.rs b/frame/timestamp/src/weights.rs index d51f7c9cfe79d..52123920977da 100644 --- a/frame/timestamp/src/weights.rs +++ b/frame/timestamp/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_timestamp //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/timestamp/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,12 +57,14 @@ impl WeightInfo for SubstrateWeight { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - Weight::from_ref_time(8_080_000 as u64) + // Minimum execution time: 11_331 nanoseconds. + Weight::from_ref_time(11_584_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } fn on_finalize() -> Weight { - Weight::from_ref_time(2_681_000 as u64) + // Minimum execution time: 5_280 nanoseconds. + Weight::from_ref_time(5_412_000 as u64) } } @@ -68,11 +73,13 @@ impl WeightInfo for () { // Storage: Timestamp Now (r:1 w:1) // Storage: Babe CurrentSlot (r:1 w:0) fn set() -> Weight { - Weight::from_ref_time(8_080_000 as u64) + // Minimum execution time: 11_331 nanoseconds. + Weight::from_ref_time(11_584_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } fn on_finalize() -> Weight { - Weight::from_ref_time(2_681_000 as u64) + // Minimum execution time: 5_280 nanoseconds. + Weight::from_ref_time(5_412_000 as u64) } } diff --git a/frame/tips/src/weights.rs b/frame/tips/src/weights.rs index 086b1c72b21c0..1aa3fd8fa2eb7 100644 --- a/frame/tips/src/weights.rs +++ b/frame/tips/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_tips //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/tips/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -57,38 +60,46 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) + /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { - Weight::from_ref_time(30_669_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 35_458 nanoseconds. + Weight::from_ref_time(36_920_009 as u64) + // Standard Error: 252 + .saturating_add(Weight::from_ref_time(1_835 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - Weight::from_ref_time(28_768_000 as u64) + // Minimum execution time: 34_322 nanoseconds. + Weight::from_ref_time(35_292_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) + /// The range of component `r` is `[0, 300]`. + /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { - Weight::from_ref_time(20_385_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(r as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 26_691 nanoseconds. + Weight::from_ref_time(27_313_497 as u64) + // Standard Error: 141 + .saturating_add(Weight::from_ref_time(818 as u64).saturating_mul(r as u64)) + // Standard Error: 3_352 + .saturating_add(Weight::from_ref_time(108_557 as u64).saturating_mul(t as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) + /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { - Weight::from_ref_time(12_287_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 17_464 nanoseconds. + Weight::from_ref_time(17_621_090 as u64) + // Standard Error: 3_702 + .saturating_add(Weight::from_ref_time(269_919 as u64).saturating_mul(t as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -96,19 +107,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) + /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { - Weight::from_ref_time(45_656_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 52_221 nanoseconds. + Weight::from_ref_time(53_168_303 as u64) + // Standard Error: 6_591 + .saturating_add(Weight::from_ref_time(243_706 as u64).saturating_mul(t as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) + /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { - Weight::from_ref_time(18_525_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 22_911 nanoseconds. + Weight::from_ref_time(23_750_488 as u64) + // Standard Error: 2_561 + .saturating_add(Weight::from_ref_time(12_282 as u64).saturating_mul(t as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -118,38 +133,46 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:1 w:1) + /// The range of component `r` is `[0, 300]`. fn report_awesome(r: u32, ) -> Weight { - Weight::from_ref_time(30_669_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(4_000 as u64).saturating_mul(r as u64)) + // Minimum execution time: 35_458 nanoseconds. + Weight::from_ref_time(36_920_009 as u64) + // Standard Error: 252 + .saturating_add(Weight::from_ref_time(1_835 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) fn retract_tip() -> Weight { - Weight::from_ref_time(28_768_000 as u64) + // Minimum execution time: 34_322 nanoseconds. + Weight::from_ref_time(35_292_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Reasons (r:1 w:1) // Storage: Tips Tips (r:0 w:1) + /// The range of component `r` is `[0, 300]`. + /// The range of component `t` is `[1, 13]`. fn tip_new(r: u32, t: u32, ) -> Weight { - Weight::from_ref_time(20_385_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(r as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(166_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 26_691 nanoseconds. + Weight::from_ref_time(27_313_497 as u64) + // Standard Error: 141 + .saturating_add(Weight::from_ref_time(818 as u64).saturating_mul(r as u64)) + // Standard Error: 3_352 + .saturating_add(Weight::from_ref_time(108_557 as u64).saturating_mul(t as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Elections Members (r:1 w:0) // Storage: Tips Tips (r:1 w:1) + /// The range of component `t` is `[1, 13]`. fn tip(t: u32, ) -> Weight { - Weight::from_ref_time(12_287_000 as u64) - // Standard Error: 6_000 - .saturating_add(Weight::from_ref_time(363_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 17_464 nanoseconds. + Weight::from_ref_time(17_621_090 as u64) + // Standard Error: 3_702 + .saturating_add(Weight::from_ref_time(269_919 as u64).saturating_mul(t as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -157,19 +180,23 @@ impl WeightInfo for () { // Storage: Elections Members (r:1 w:0) // Storage: System Account (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) + /// The range of component `t` is `[1, 13]`. fn close_tip(t: u32, ) -> Weight { - Weight::from_ref_time(45_656_000 as u64) - // Standard Error: 14_000 - .saturating_add(Weight::from_ref_time(276_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 52_221 nanoseconds. + Weight::from_ref_time(53_168_303 as u64) + // Standard Error: 6_591 + .saturating_add(Weight::from_ref_time(243_706 as u64).saturating_mul(t as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Tips Tips (r:1 w:1) // Storage: Tips Reasons (r:0 w:1) + /// The range of component `t` is `[1, 13]`. fn slash_tip(t: u32, ) -> Weight { - Weight::from_ref_time(18_525_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(37_000 as u64).saturating_mul(t as u64)) + // Minimum execution time: 22_911 nanoseconds. + Weight::from_ref_time(23_750_488 as u64) + // Standard Error: 2_561 + .saturating_add(Weight::from_ref_time(12_282 as u64).saturating_mul(t as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/transaction-storage/src/weights.rs b/frame/transaction-storage/src/weights.rs index 6fd9b1b818df6..16d12aa75ab4d 100644 --- a/frame/transaction-storage/src/weights.rs +++ b/frame/transaction-storage/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_transaction_storage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/transaction-storage/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -52,28 +55,28 @@ pub trait WeightInfo { /// Weights for pallet_transaction_storage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(l as u64)) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 46_730 nanoseconds. + Weight::from_ref_time(46_922_000 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(5_601 as u64).saturating_mul(l as u64)) + .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage Transactions (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - Weight::from_ref_time(50_978_000 as u64) - .saturating_add(T::DbWeight::get().reads(6 as u64)) + // Minimum execution time: 56_802 nanoseconds. + Weight::from_ref_time(58_670_000 as u64) + .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) @@ -82,7 +85,8 @@ impl WeightInfo for SubstrateWeight { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - Weight::from_ref_time(106_990_000 as u64) + // Minimum execution time: 74_016 nanoseconds. + Weight::from_ref_time(94_111_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -90,28 +94,28 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: TransactionStorage MaxTransactionSize (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) + /// The range of component `l` is `[1, 8388608]`. fn store(l: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(5_000 as u64).saturating_mul(l as u64)) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 46_730 nanoseconds. + Weight::from_ref_time(46_922_000 as u64) + // Standard Error: 2 + .saturating_add(Weight::from_ref_time(5_601 as u64).saturating_mul(l as u64)) + .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage Transactions (r:1 w:0) + // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage ByteFee (r:1 w:0) // Storage: TransactionStorage EntryFee (r:1 w:0) - // Storage: unknown [0x3a65787472696e7369635f696e646578] (r:1 w:0) // Storage: TransactionStorage BlockTransactions (r:1 w:1) - // Storage: TransactionStorage MaxBlockTransactions (r:1 w:0) fn renew() -> Weight { - Weight::from_ref_time(50_978_000 as u64) - .saturating_add(RocksDbWeight::get().reads(6 as u64)) + // Minimum execution time: 56_802 nanoseconds. + Weight::from_ref_time(58_670_000 as u64) + .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: TransactionStorage ProofChecked (r:1 w:1) @@ -120,7 +124,8 @@ impl WeightInfo for () { // Storage: System ParentHash (r:1 w:0) // Storage: TransactionStorage Transactions (r:1 w:0) fn check_proof_max() -> Weight { - Weight::from_ref_time(106_990_000 as u64) + // Minimum execution time: 74_016 nanoseconds. + Weight::from_ref_time(94_111_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index 37c62e0c18c52..3ee071ac700f1 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_treasury //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/treasury/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -55,39 +58,41 @@ pub trait WeightInfo { /// Weights for pallet_treasury using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Treasury ProposalCount (r:1 w:1) - // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - Weight::from_ref_time(22_063_000 as u64) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + // Minimum execution time: 137 nanoseconds. + Weight::from_ref_time(153_000 as u64) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - Weight::from_ref_time(26_473_000 as u64) + // Minimum execution time: 31_437 nanoseconds. + Weight::from_ref_time(32_241_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - Weight::from_ref_time(29_955_000 as u64) + // Minimum execution time: 38_351 nanoseconds. + Weight::from_ref_time(38_828_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) + /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(10_786_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 11_937 nanoseconds. + Weight::from_ref_time(15_541_763 as u64) + // Standard Error: 1_036 + .saturating_add(Weight::from_ref_time(128_326 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - Weight::from_ref_time(6_647_000 as u64) + // Minimum execution time: 9_611 nanoseconds. + Weight::from_ref_time(10_012_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -95,10 +100,12 @@ impl WeightInfo for SubstrateWeight { // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) + /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { - Weight::from_ref_time(25_805_000 as u64) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 43_016 nanoseconds. + Weight::from_ref_time(56_538_751 as u64) + // Standard Error: 14_890 + .saturating_add(Weight::from_ref_time(26_789_120 as u64).saturating_mul(p as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(p as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -108,39 +115,41 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Treasury ProposalCount (r:1 w:1) - // Storage: Treasury Proposals (r:0 w:1) fn spend() -> Weight { - Weight::from_ref_time(22_063_000 as u64) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + // Minimum execution time: 137 nanoseconds. + Weight::from_ref_time(153_000 as u64) } // Storage: Treasury ProposalCount (r:1 w:1) // Storage: Treasury Proposals (r:0 w:1) fn propose_spend() -> Weight { - Weight::from_ref_time(26_473_000 as u64) + // Minimum execution time: 31_437 nanoseconds. + Weight::from_ref_time(32_241_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:1) // Storage: System Account (r:1 w:1) fn reject_proposal() -> Weight { - Weight::from_ref_time(29_955_000 as u64) + // Minimum execution time: 38_351 nanoseconds. + Weight::from_ref_time(38_828_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Treasury Proposals (r:1 w:0) // Storage: Treasury Approvals (r:1 w:1) + /// The range of component `p` is `[0, 99]`. fn approve_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(10_786_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(110_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 11_937 nanoseconds. + Weight::from_ref_time(15_541_763 as u64) + // Standard Error: 1_036 + .saturating_add(Weight::from_ref_time(128_326 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Treasury Approvals (r:1 w:1) fn remove_approval() -> Weight { - Weight::from_ref_time(6_647_000 as u64) + // Minimum execution time: 9_611 nanoseconds. + Weight::from_ref_time(10_012_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -148,10 +157,12 @@ impl WeightInfo for () { // Storage: Bounties BountyApprovals (r:1 w:1) // Storage: Treasury Proposals (r:2 w:2) // Storage: System Account (r:4 w:4) + /// The range of component `p` is `[0, 100]`. fn on_initialize_proposals(p: u32, ) -> Weight { - Weight::from_ref_time(25_805_000 as u64) - // Standard Error: 18_000 - .saturating_add(Weight::from_ref_time(28_473_000 as u64).saturating_mul(p as u64)) + // Minimum execution time: 43_016 nanoseconds. + Weight::from_ref_time(56_538_751 as u64) + // Standard Error: 14_890 + .saturating_add(Weight::from_ref_time(26_789_120 as u64).saturating_mul(p as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(p as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) diff --git a/frame/uniques/src/weights.rs b/frame/uniques/src/weights.rs index a134b68d093b6..8a8e1090bb718 100644 --- a/frame/uniques/src/weights.rs +++ b/frame/uniques/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,24 +18,24 @@ //! Autogenerated weights for pallet_uniques //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-10-20, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// /home/benchbot/cargo_target_dir/production/substrate +// ./target/production/substrate // benchmark // pallet +// --chain=dev // --steps=50 // --repeat=20 +// --pallet=pallet_uniques // --extrinsic=* // --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/substrate/.git/.artifacts/bench.json -// --pallet=pallet_uniques -// --chain=dev // --output=./frame/uniques/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -81,16 +81,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 32_901 nanoseconds. - Weight::from_ref_time(33_628_000 as u64) + // Minimum execution time: 35_358 nanoseconds. + Weight::from_ref_time(35_935_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 21_633 nanoseconds. - Weight::from_ref_time(22_380_000 as u64) + // Minimum execution time: 22_767 nanoseconds. + Weight::from_ref_time(23_235_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -106,14 +106,14 @@ impl WeightInfo for SubstrateWeight { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 2_429_508 nanoseconds. - Weight::from_ref_time(2_446_263_000 as u64) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(8_477_090 as u64).saturating_mul(n as u64)) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(314_268 as u64).saturating_mul(m as u64)) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(249_624 as u64).saturating_mul(a as u64)) + // Minimum execution time: 2_453_194 nanoseconds. + Weight::from_ref_time(2_469_109_000 as u64) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(8_974_176 as u64).saturating_mul(n as u64)) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(344_842 as u64).saturating_mul(m as u64)) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(185_438 as u64).saturating_mul(a as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) @@ -126,8 +126,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 41_938 nanoseconds. - Weight::from_ref_time(42_814_000 as u64) + // Minimum execution time: 45_115 nanoseconds. + Weight::from_ref_time(45_746_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -136,8 +136,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 43_593 nanoseconds. - Weight::from_ref_time(44_420_000 as u64) + // Minimum execution time: 46_447 nanoseconds. + Weight::from_ref_time(46_994_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -146,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 34_037 nanoseconds. - Weight::from_ref_time(34_848_000 as u64) + // Minimum execution time: 35_953 nanoseconds. + Weight::from_ref_time(36_375_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -155,10 +155,10 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 23_295 nanoseconds. - Weight::from_ref_time(23_482_000 as u64) - // Standard Error: 9_490 - .saturating_add(Weight::from_ref_time(10_899_320 as u64).saturating_mul(i as u64)) + // Minimum execution time: 24_238 nanoseconds. + Weight::from_ref_time(24_788_000 as u64) + // Standard Error: 9_232 + .saturating_add(Weight::from_ref_time(11_322_011 as u64).saturating_mul(i as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -167,30 +167,30 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - // Minimum execution time: 27_005 nanoseconds. - Weight::from_ref_time(27_344_000 as u64) + // Minimum execution time: 28_595 nanoseconds. + Weight::from_ref_time(29_280_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - // Minimum execution time: 26_815 nanoseconds. - Weight::from_ref_time(27_400_000 as u64) + // Minimum execution time: 28_581 nanoseconds. + Weight::from_ref_time(29_038_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - // Minimum execution time: 21_988 nanoseconds. - Weight::from_ref_time(22_596_000 as u64) + // Minimum execution time: 24_298 nanoseconds. + Weight::from_ref_time(24_742_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - // Minimum execution time: 21_886 nanoseconds. - Weight::from_ref_time(22_617_000 as u64) + // Minimum execution time: 24_004 nanoseconds. + Weight::from_ref_time(24_536_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -198,23 +198,23 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 30_241 nanoseconds. - Weight::from_ref_time(30_677_000 as u64) + // Minimum execution time: 32_599 nanoseconds. + Weight::from_ref_time(33_201_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 23_011 nanoseconds. - Weight::from_ref_time(23_796_000 as u64) + // Minimum execution time: 25_137 nanoseconds. + Weight::from_ref_time(25_877_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - // Minimum execution time: 25_739 nanoseconds. - Weight::from_ref_time(26_572_000 as u64) + // Minimum execution time: 27_736 nanoseconds. + Weight::from_ref_time(28_279_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -222,8 +222,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 48_486 nanoseconds. - Weight::from_ref_time(49_029_000 as u64) + // Minimum execution time: 51_195 nanoseconds. + Weight::from_ref_time(51_674_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -231,79 +231,79 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - // Minimum execution time: 47_408 nanoseconds. - Weight::from_ref_time(48_753_000 as u64) + // Minimum execution time: 50_159 nanoseconds. + Weight::from_ref_time(51_412_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 40_085 nanoseconds. - Weight::from_ref_time(40_625_000 as u64) + // Minimum execution time: 42_608 nanoseconds. + Weight::from_ref_time(42_880_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 40_546 nanoseconds. - Weight::from_ref_time(41_113_000 as u64) + // Minimum execution time: 43_239 nanoseconds. + Weight::from_ref_time(43_752_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 39_902 nanoseconds. - Weight::from_ref_time(40_599_000 as u64) + // Minimum execution time: 41_224 nanoseconds. + Weight::from_ref_time(41_974_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 37_597 nanoseconds. - Weight::from_ref_time(37_964_000 as u64) + // Minimum execution time: 40_836 nanoseconds. + Weight::from_ref_time(41_864_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 28_719 nanoseconds. - Weight::from_ref_time(29_213_000 as u64) + // Minimum execution time: 29_558 nanoseconds. + Weight::from_ref_time(29_948_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 27_608 nanoseconds. - Weight::from_ref_time(28_096_000 as u64) + // Minimum execution time: 29_694 nanoseconds. + Weight::from_ref_time(30_156_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 25_568 nanoseconds. - Weight::from_ref_time(26_098_000 as u64) + // Minimum execution time: 27_819 nanoseconds. + Weight::from_ref_time(28_245_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 24_521 nanoseconds. - Weight::from_ref_time(25_062_000 as u64) + // Minimum execution time: 26_317 nanoseconds. + Weight::from_ref_time(26_893_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 25_337 nanoseconds. - Weight::from_ref_time(25_902_000 as u64) + // Minimum execution time: 26_546 nanoseconds. + Weight::from_ref_time(27_142_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -312,8 +312,8 @@ impl WeightInfo for SubstrateWeight { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - // Minimum execution time: 46_736 nanoseconds. - Weight::from_ref_time(47_264_000 as u64) + // Minimum execution time: 49_238 nanoseconds. + Weight::from_ref_time(50_444_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } @@ -324,16 +324,16 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn create() -> Weight { - // Minimum execution time: 32_901 nanoseconds. - Weight::from_ref_time(33_628_000 as u64) + // Minimum execution time: 35_358 nanoseconds. + Weight::from_ref_time(35_935_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_create() -> Weight { - // Minimum execution time: 21_633 nanoseconds. - Weight::from_ref_time(22_380_000 as u64) + // Minimum execution time: 22_767 nanoseconds. + Weight::from_ref_time(23_235_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -349,14 +349,14 @@ impl WeightInfo for () { /// The range of component `m` is `[0, 1000]`. /// The range of component `a` is `[0, 1000]`. fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Minimum execution time: 2_429_508 nanoseconds. - Weight::from_ref_time(2_446_263_000 as u64) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(8_477_090 as u64).saturating_mul(n as u64)) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(314_268 as u64).saturating_mul(m as u64)) - // Standard Error: 28_236 - .saturating_add(Weight::from_ref_time(249_624 as u64).saturating_mul(a as u64)) + // Minimum execution time: 2_453_194 nanoseconds. + Weight::from_ref_time(2_469_109_000 as u64) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(8_974_176 as u64).saturating_mul(n as u64)) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(344_842 as u64).saturating_mul(m as u64)) + // Standard Error: 27_900 + .saturating_add(Weight::from_ref_time(185_438 as u64).saturating_mul(a as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(n as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) @@ -369,8 +369,8 @@ impl WeightInfo for () { // Storage: Uniques CollectionMaxSupply (r:1 w:0) // Storage: Uniques Account (r:0 w:1) fn mint() -> Weight { - // Minimum execution time: 41_938 nanoseconds. - Weight::from_ref_time(42_814_000 as u64) + // Minimum execution time: 45_115 nanoseconds. + Weight::from_ref_time(45_746_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } @@ -379,8 +379,8 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:1) // Storage: Uniques ItemPriceOf (r:0 w:1) fn burn() -> Weight { - // Minimum execution time: 43_593 nanoseconds. - Weight::from_ref_time(44_420_000 as u64) + // Minimum execution time: 46_447 nanoseconds. + Weight::from_ref_time(46_994_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -389,8 +389,8 @@ impl WeightInfo for () { // Storage: Uniques Account (r:0 w:2) // Storage: Uniques ItemPriceOf (r:0 w:1) fn transfer() -> Weight { - // Minimum execution time: 34_037 nanoseconds. - Weight::from_ref_time(34_848_000 as u64) + // Minimum execution time: 35_953 nanoseconds. + Weight::from_ref_time(36_375_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } @@ -398,10 +398,10 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:102 w:102) /// The range of component `i` is `[0, 5000]`. fn redeposit(i: u32, ) -> Weight { - // Minimum execution time: 23_295 nanoseconds. - Weight::from_ref_time(23_482_000 as u64) - // Standard Error: 9_490 - .saturating_add(Weight::from_ref_time(10_899_320 as u64).saturating_mul(i as u64)) + // Minimum execution time: 24_238 nanoseconds. + Weight::from_ref_time(24_788_000 as u64) + // Standard Error: 9_232 + .saturating_add(Weight::from_ref_time(11_322_011 as u64).saturating_mul(i as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(i as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -410,30 +410,30 @@ impl WeightInfo for () { // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn freeze() -> Weight { - // Minimum execution time: 27_005 nanoseconds. - Weight::from_ref_time(27_344_000 as u64) + // Minimum execution time: 28_595 nanoseconds. + Weight::from_ref_time(29_280_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn thaw() -> Weight { - // Minimum execution time: 26_815 nanoseconds. - Weight::from_ref_time(27_400_000 as u64) + // Minimum execution time: 28_581 nanoseconds. + Weight::from_ref_time(29_038_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn freeze_collection() -> Weight { - // Minimum execution time: 21_988 nanoseconds. - Weight::from_ref_time(22_596_000 as u64) + // Minimum execution time: 24_298 nanoseconds. + Weight::from_ref_time(24_742_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) fn thaw_collection() -> Weight { - // Minimum execution time: 21_886 nanoseconds. - Weight::from_ref_time(22_617_000 as u64) + // Minimum execution time: 24_004 nanoseconds. + Weight::from_ref_time(24_536_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -441,23 +441,23 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:2) fn transfer_ownership() -> Weight { - // Minimum execution time: 30_241 nanoseconds. - Weight::from_ref_time(30_677_000 as u64) + // Minimum execution time: 32_599 nanoseconds. + Weight::from_ref_time(33_201_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Uniques Class (r:1 w:1) fn set_team() -> Weight { - // Minimum execution time: 23_011 nanoseconds. - Weight::from_ref_time(23_796_000 as u64) + // Minimum execution time: 25_137 nanoseconds. + Weight::from_ref_time(25_877_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassAccount (r:0 w:1) fn force_item_status() -> Weight { - // Minimum execution time: 25_739 nanoseconds. - Weight::from_ref_time(26_572_000 as u64) + // Minimum execution time: 27_736 nanoseconds. + Weight::from_ref_time(28_279_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -465,8 +465,8 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn set_attribute() -> Weight { - // Minimum execution time: 48_486 nanoseconds. - Weight::from_ref_time(49_029_000 as u64) + // Minimum execution time: 51_195 nanoseconds. + Weight::from_ref_time(51_674_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } @@ -474,79 +474,79 @@ impl WeightInfo for () { // Storage: Uniques InstanceMetadataOf (r:1 w:0) // Storage: Uniques Attribute (r:1 w:1) fn clear_attribute() -> Weight { - // Minimum execution time: 47_408 nanoseconds. - Weight::from_ref_time(48_753_000 as u64) + // Minimum execution time: 50_159 nanoseconds. + Weight::from_ref_time(51_412_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn set_metadata() -> Weight { - // Minimum execution time: 40_085 nanoseconds. - Weight::from_ref_time(40_625_000 as u64) + // Minimum execution time: 42_608 nanoseconds. + Weight::from_ref_time(42_880_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques InstanceMetadataOf (r:1 w:1) fn clear_metadata() -> Weight { - // Minimum execution time: 40_546 nanoseconds. - Weight::from_ref_time(41_113_000 as u64) + // Minimum execution time: 43_239 nanoseconds. + Weight::from_ref_time(43_752_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:1) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn set_collection_metadata() -> Weight { - // Minimum execution time: 39_902 nanoseconds. - Weight::from_ref_time(40_599_000 as u64) + // Minimum execution time: 41_224 nanoseconds. + Weight::from_ref_time(41_974_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques ClassMetadataOf (r:1 w:1) fn clear_collection_metadata() -> Weight { - // Minimum execution time: 37_597 nanoseconds. - Weight::from_ref_time(37_964_000 as u64) + // Minimum execution time: 40_836 nanoseconds. + Weight::from_ref_time(41_864_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn approve_transfer() -> Weight { - // Minimum execution time: 28_719 nanoseconds. - Weight::from_ref_time(29_213_000 as u64) + // Minimum execution time: 29_558 nanoseconds. + Weight::from_ref_time(29_948_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Asset (r:1 w:1) fn cancel_approval() -> Weight { - // Minimum execution time: 27_608 nanoseconds. - Weight::from_ref_time(28_096_000 as u64) + // Minimum execution time: 29_694 nanoseconds. + Weight::from_ref_time(30_156_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques OwnershipAcceptance (r:1 w:1) fn set_accept_ownership() -> Weight { - // Minimum execution time: 25_568 nanoseconds. - Weight::from_ref_time(26_098_000 as u64) + // Minimum execution time: 27_819 nanoseconds. + Weight::from_ref_time(28_245_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques CollectionMaxSupply (r:1 w:1) // Storage: Uniques Class (r:1 w:0) fn set_collection_max_supply() -> Weight { - // Minimum execution time: 24_521 nanoseconds. - Weight::from_ref_time(25_062_000 as u64) + // Minimum execution time: 26_317 nanoseconds. + Weight::from_ref_time(26_893_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Uniques Asset (r:1 w:0) // Storage: Uniques ItemPriceOf (r:0 w:1) fn set_price() -> Weight { - // Minimum execution time: 25_337 nanoseconds. - Weight::from_ref_time(25_902_000 as u64) + // Minimum execution time: 26_546 nanoseconds. + Weight::from_ref_time(27_142_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } @@ -555,8 +555,8 @@ impl WeightInfo for () { // Storage: Uniques Class (r:1 w:0) // Storage: Uniques Account (r:0 w:2) fn buy_item() -> Weight { - // Minimum execution time: 46_736 nanoseconds. - Weight::from_ref_time(47_264_000 as u64) + // Minimum execution time: 49_238 nanoseconds. + Weight::from_ref_time(50_444_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } diff --git a/frame/utility/src/weights.rs b/frame/utility/src/weights.rs index b88319f2597c9..eac94e44b8dbf 100644 --- a/frame/utility/src/weights.rs +++ b/frame/utility/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,12 +18,12 @@ //! Autogenerated weights for pallet_utility //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-06-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/production/substrate +// ./target/production/substrate // benchmark // pallet // --chain=dev @@ -35,6 +35,7 @@ // --wasm-execution=compiled // --heap-pages=4096 // --output=./frame/utility/src/weights.rs +// --header=./HEADER-APACHE2 // --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] @@ -58,27 +59,32 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - Weight::from_ref_time(23_113_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 14_470 nanoseconds. + Weight::from_ref_time(17_443_346 as u64) + // Standard Error: 2_037 + .saturating_add(Weight::from_ref_time(3_510_555 as u64).saturating_mul(c as u64)) } fn as_derivative() -> Weight { - Weight::from_ref_time(4_182_000 as u64) + // Minimum execution time: 6_799 nanoseconds. + Weight::from_ref_time(6_976_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - Weight::from_ref_time(18_682_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 14_630 nanoseconds. + Weight::from_ref_time(24_580_656 as u64) + // Standard Error: 2_202 + .saturating_add(Weight::from_ref_time(3_584_516 as u64).saturating_mul(c as u64)) } fn dispatch_as() -> Weight { - Weight::from_ref_time(12_049_000 as u64) + // Minimum execution time: 16_597 nanoseconds. + Weight::from_ref_time(16_950_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - Weight::from_ref_time(19_136_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 13_885 nanoseconds. + Weight::from_ref_time(20_147_978 as u64) + // Standard Error: 2_232 + .saturating_add(Weight::from_ref_time(3_516_969 as u64).saturating_mul(c as u64)) } } @@ -86,26 +92,31 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { /// The range of component `c` is `[0, 1000]`. fn batch(c: u32, ) -> Weight { - Weight::from_ref_time(23_113_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_701_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 14_470 nanoseconds. + Weight::from_ref_time(17_443_346 as u64) + // Standard Error: 2_037 + .saturating_add(Weight::from_ref_time(3_510_555 as u64).saturating_mul(c as u64)) } fn as_derivative() -> Weight { - Weight::from_ref_time(4_182_000 as u64) + // Minimum execution time: 6_799 nanoseconds. + Weight::from_ref_time(6_976_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn batch_all(c: u32, ) -> Weight { - Weight::from_ref_time(18_682_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(2_794_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 14_630 nanoseconds. + Weight::from_ref_time(24_580_656 as u64) + // Standard Error: 2_202 + .saturating_add(Weight::from_ref_time(3_584_516 as u64).saturating_mul(c as u64)) } fn dispatch_as() -> Weight { - Weight::from_ref_time(12_049_000 as u64) + // Minimum execution time: 16_597 nanoseconds. + Weight::from_ref_time(16_950_000 as u64) } /// The range of component `c` is `[0, 1000]`. fn force_batch(c: u32, ) -> Weight { - Weight::from_ref_time(19_136_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(2_697_000 as u64).saturating_mul(c as u64)) + // Minimum execution time: 13_885 nanoseconds. + Weight::from_ref_time(20_147_978 as u64) + // Standard Error: 2_232 + .saturating_add(Weight::from_ref_time(3_516_969 as u64).saturating_mul(c as u64)) } } diff --git a/frame/vesting/src/weights.rs b/frame/vesting/src/weights.rs index 567d2fef74130..5462445414719 100644 --- a/frame/vesting/src/weights.rs +++ b/frame/vesting/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_vesting //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/vesting/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -59,95 +62,119 @@ pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_978_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_113 nanoseconds. + Weight::from_ref_time(44_114_539 as u64) + // Standard Error: 958 + .saturating_add(Weight::from_ref_time(56_239 as u64).saturating_mul(l as u64)) + // Standard Error: 1_704 + .saturating_add(Weight::from_ref_time(64_926 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_856_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_918 nanoseconds. + Weight::from_ref_time(43_452_573 as u64) + // Standard Error: 984 + .saturating_add(Weight::from_ref_time(50_162 as u64).saturating_mul(l as u64)) + // Standard Error: 1_752 + .saturating_add(Weight::from_ref_time(42_080 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_522_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_603 nanoseconds. + Weight::from_ref_time(42_696_097 as u64) + // Standard Error: 996 + .saturating_add(Weight::from_ref_time(65_316 as u64).saturating_mul(l as u64)) + // Standard Error: 1_772 + .saturating_add(Weight::from_ref_time(65_862 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_558_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_099 nanoseconds. + Weight::from_ref_time(42_937_914 as u64) + // Standard Error: 884 + .saturating_add(Weight::from_ref_time(52_079 as u64).saturating_mul(l as u64)) + // Standard Error: 1_573 + .saturating_add(Weight::from_ref_time(36_274 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_260_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(l as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 59_023 nanoseconds. + Weight::from_ref_time(59_606_862 as u64) + // Standard Error: 2_078 + .saturating_add(Weight::from_ref_time(55_335 as u64).saturating_mul(l as u64)) + // Standard Error: 3_698 + .saturating_add(Weight::from_ref_time(26_743 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_166_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(l as u64)) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 58_249 nanoseconds. + Weight::from_ref_time(59_025_976 as u64) + // Standard Error: 2_078 + .saturating_add(Weight::from_ref_time(55_736 as u64).saturating_mul(l as u64)) + // Standard Error: 3_697 + .saturating_add(Weight::from_ref_time(24_903 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().writes(4 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(34_042_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_279 nanoseconds. + Weight::from_ref_time(44_197_440 as u64) + // Standard Error: 946 + .saturating_add(Weight::from_ref_time(62_308 as u64).saturating_mul(l as u64)) + // Standard Error: 1_747 + .saturating_add(Weight::from_ref_time(64_473 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_937_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 44_925 nanoseconds. + Weight::from_ref_time(44_219_676 as u64) + // Standard Error: 889 + .saturating_add(Weight::from_ref_time(60_311 as u64).saturating_mul(l as u64)) + // Standard Error: 1_641 + .saturating_add(Weight::from_ref_time(63_095 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } @@ -157,95 +184,119 @@ impl WeightInfo for SubstrateWeight { impl WeightInfo for () { // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_978_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(82_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(88_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_113 nanoseconds. + Weight::from_ref_time(44_114_539 as u64) + // Standard Error: 958 + .saturating_add(Weight::from_ref_time(56_239 as u64).saturating_mul(l as u64)) + // Standard Error: 1_704 + .saturating_add(Weight::from_ref_time(64_926 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_856_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(79_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_918 nanoseconds. + Weight::from_ref_time(43_452_573 as u64) + // Standard Error: 984 + .saturating_add(Weight::from_ref_time(50_162 as u64).saturating_mul(l as u64)) + // Standard Error: 1_752 + .saturating_add(Weight::from_ref_time(42_080 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_other_locked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_522_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(74_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(72_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_603 nanoseconds. + Weight::from_ref_time(42_696_097 as u64) + // Standard Error: 996 + .saturating_add(Weight::from_ref_time(65_316 as u64).saturating_mul(l as u64)) + // Standard Error: 1_772 + .saturating_add(Weight::from_ref_time(65_862 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[1, 28]`. fn vest_other_unlocked(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(32_558_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(61_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 43_099 nanoseconds. + Weight::from_ref_time(42_937_914 as u64) + // Standard Error: 884 + .saturating_add(Weight::from_ref_time(52_079 as u64).saturating_mul(l as u64)) + // Standard Error: 1_573 + .saturating_add(Weight::from_ref_time(36_274 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. fn vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_260_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(l as u64)) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(55_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 59_023 nanoseconds. + Weight::from_ref_time(59_606_862 as u64) + // Standard Error: 2_078 + .saturating_add(Weight::from_ref_time(55_335 as u64).saturating_mul(l as u64)) + // Standard Error: 3_698 + .saturating_add(Weight::from_ref_time(26_743 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: System Account (r:2 w:2) // Storage: Balances Locks (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[0, 27]`. fn force_vested_transfer(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(49_166_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(77_000 as u64).saturating_mul(l as u64)) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(43_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 58_249 nanoseconds. + Weight::from_ref_time(59_025_976 as u64) + // Standard Error: 2_078 + .saturating_add(Weight::from_ref_time(55_736 as u64).saturating_mul(l as u64)) + // Standard Error: 3_697 + .saturating_add(Weight::from_ref_time(24_903 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().writes(4 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. fn not_unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(34_042_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(83_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(80_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 45_279 nanoseconds. + Weight::from_ref_time(44_197_440 as u64) + // Standard Error: 946 + .saturating_add(Weight::from_ref_time(62_308 as u64).saturating_mul(l as u64)) + // Standard Error: 1_747 + .saturating_add(Weight::from_ref_time(64_473 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Vesting Vesting (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `l` is `[0, 49]`. + /// The range of component `s` is `[2, 28]`. fn unlocking_merge_schedules(l: u32, s: u32, ) -> Weight { - Weight::from_ref_time(33_937_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(78_000 as u64).saturating_mul(l as u64)) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(73_000 as u64).saturating_mul(s as u64)) + // Minimum execution time: 44_925 nanoseconds. + Weight::from_ref_time(44_219_676 as u64) + // Standard Error: 889 + .saturating_add(Weight::from_ref_time(60_311 as u64).saturating_mul(l as u64)) + // Standard Error: 1_641 + .saturating_add(Weight::from_ref_time(63_095 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } diff --git a/frame/whitelist/src/weights.rs b/frame/whitelist/src/weights.rs index ca474d5e55634..22d238d0fdd0f 100644 --- a/frame/whitelist/src/weights.rs +++ b/frame/whitelist/src/weights.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +18,8 @@ //! Autogenerated weights for pallet_whitelist //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-24, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-11-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm2`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: @@ -32,8 +33,10 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 // --output=./frame/whitelist/src/weights.rs +// --header=./HEADER-APACHE2 +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,35 +59,38 @@ impl WeightInfo for SubstrateWeight { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - Weight::from_ref_time(20_938_000 as u64) + // Minimum execution time: 26_352 nanoseconds. + Weight::from_ref_time(26_727_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - Weight::from_ref_time(22_332_000 as u64) + // Minimum execution time: 25_536 nanoseconds. + Weight::from_ref_time(25_969_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - Weight::from_ref_time(5_989_917_000 as u64) + // Minimum execution time: 4_802_466 nanoseconds. + Weight::from_ref_time(4_820_197_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - Weight::from_ref_time(25_325_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 29_184 nanoseconds. + Weight::from_ref_time(30_530_970 as u64) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_496 as u64).saturating_mul(n as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(3 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } } @@ -93,34 +99,37 @@ impl WeightInfo for () { // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) fn whitelist_call() -> Weight { - Weight::from_ref_time(20_938_000 as u64) + // Minimum execution time: 26_352 nanoseconds. + Weight::from_ref_time(26_727_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn remove_whitelisted_call() -> Weight { - Weight::from_ref_time(22_332_000 as u64) + // Minimum execution time: 25_536 nanoseconds. + Weight::from_ref_time(25_969_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:1 w:1) fn dispatch_whitelisted_call() -> Weight { - Weight::from_ref_time(5_989_917_000 as u64) + // Minimum execution time: 4_802_466 nanoseconds. + Weight::from_ref_time(4_820_197_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Whitelist WhitelistedCall (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `n` is `[1, 10000]`. fn dispatch_whitelisted_call_with_preimage(n: u32, ) -> Weight { - Weight::from_ref_time(25_325_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(n as u64)) + // Minimum execution time: 29_184 nanoseconds. + Weight::from_ref_time(30_530_970 as u64) + // Standard Error: 7 + .saturating_add(Weight::from_ref_time(1_496 as u64).saturating_mul(n as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(3 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } } From a1dee7ecd2a157c490d189f0932b32aa12106857 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Nov 2022 21:24:54 +0100 Subject: [PATCH 328/348] Remove partial key size limit from trie codec (#12566) * remove size limit from trie codec * test previous upper limit is not enforced anymore * fmt * restore test --- primitives/trie/src/lib.rs | 16 +++++++++++++++- primitives/trie/src/node_codec.rs | 2 -- primitives/trie/src/node_header.rs | 5 +---- primitives/trie/src/trie_stream.rs | 3 +-- 4 files changed, 17 insertions(+), 9 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 5634d96d58356..d036db7b1fecd 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -527,7 +527,6 @@ where /// Constants used into trie simplification codec. mod trie_constants { const FIRST_PREFIX: u8 = 0b_00 << 6; - pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; @@ -987,6 +986,21 @@ mod tests { assert_eq!(first_storage_root, second_storage_root); } + #[test] + fn big_key() { + let check = |keysize: usize| { + let mut memdb = PrefixedMemoryDB::::default(); + let mut root = Default::default(); + let mut t = TrieDBMutBuilder::::new(&mut memdb, &mut root).build(); + t.insert(&vec![0x01u8; keysize][..], &[0x01u8, 0x23]).unwrap(); + std::mem::drop(t); + let t = TrieDBBuilder::::new(&memdb, &root).build(); + assert_eq!(t.get(&vec![0x01u8; keysize][..]).unwrap(), Some(vec![0x01u8, 0x23])); + }; + check(u16::MAX as usize / 2); // old limit + check(u16::MAX as usize / 2 + 1); // value over old limit still works + } + #[test] fn node_with_no_children_fail_decoding() { let branch = NodeCodec::::branch_node_nibbled( diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 0202b1c6ba7d2..f632320dd296d 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -279,8 +279,6 @@ fn partial_from_iterator_encode>( nibble_count: usize, node_kind: NodeKind, ) -> Vec { - let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index c2c9510c5ac43..f3544be65b2e9 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -117,8 +117,6 @@ pub(crate) fn size_and_prefix_iterator( prefix: u8, prefix_mask: usize, ) -> impl Iterator { - let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let max_value = 255u8 >> prefix_mask; let l1 = sp_std::cmp::min((max_value as usize).saturating_sub(1), size); let (first_byte, mut rem) = if size == l1 { @@ -165,12 +163,11 @@ fn decode_size( return Ok(result) } result -= 1; - while result <= trie_constants::NIBBLE_SIZE_BOUND { + loop { let n = input.read_byte()? as usize; if n < 255 { return Ok(result + n + 1) } result += 255; } - Ok(trie_constants::NIBBLE_SIZE_BOUND) } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index ca798db47b552..435e6a986722e 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -54,8 +54,7 @@ fn branch_node_bit_mask(has_children: impl Iterator) -> (u8, u8) { /// Create a leaf/branch node, encoding a number of nibbles. fn fuse_nibbles_node(nibbles: &[u8], kind: NodeKind) -> impl Iterator + '_ { - let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); - + let size = nibbles.len(); let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), NodeKind::BranchNoValue => From 26e53b4244e34632b401eba4281e8cd14e62087e Mon Sep 17 00:00:00 2001 From: zjb0807 Date: Wed, 9 Nov 2022 19:42:30 +1300 Subject: [PATCH 329/348] Keep the same name (#12616) Co-authored-by: x --- frame/scheduler/src/lib.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index b5ea0deeba9a3..3a463b5808cd1 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -277,15 +277,15 @@ pub mod pallet { /// Dispatched some task. Dispatched { task: TaskAddress, - id: Option<[u8; 32]>, + id: Option, result: DispatchResult, }, /// The call for the provided hash was not found so the task has been aborted. - CallUnavailable { task: TaskAddress, id: Option<[u8; 32]> }, + CallUnavailable { task: TaskAddress, id: Option }, /// The given task was unable to be renewed since the agenda is full at that block. - PeriodicFailed { task: TaskAddress, id: Option<[u8; 32]> }, + PeriodicFailed { task: TaskAddress, id: Option }, /// The given task can never be executed since it is overweight. - PermanentlyOverweight { task: TaskAddress, id: Option<[u8; 32]> }, + PermanentlyOverweight { task: TaskAddress, id: Option }, } #[pallet::error] From f9ebd2aeddb47d5c503205be473f522bcc2c7d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 9 Nov 2022 09:22:34 +0100 Subject: [PATCH 330/348] Do not finalize parent twice (#12653) If the parent block is alread finalized, we don't need to do this again. --- client/service/src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 438d0b7f77061..1d896d8acd8bf 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -656,7 +656,7 @@ where // Ensure parent chain is finalized to maintain invariant that finality is called // sequentially. - if finalized && parent_exists { + if finalized && parent_exists && info.finalized_hash != parent_hash { self.apply_finality_with_block_hash( operation, parent_hash, From 846e6ea2d43dd5f40d447501138be21eb132d2e5 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Nov 2022 09:23:44 +0100 Subject: [PATCH 331/348] update paritydb and remove dev deps on rocksdb (#12641) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update paritydb and remove dev deps on rocksdb * feature rocksdb for node testing * feature decl in node-bench * revert change to rocksdb inclusion logic * Update bin/node/bench/Cargo.toml Co-authored-by: Bastian Köcher Co-authored-by: parity-processbot <> Co-authored-by: Bastian Köcher --- Cargo.lock | 45 +++++++++++---------------------------- Cargo.toml | 1 - bin/node/bench/Cargo.toml | 2 +- client/db/Cargo.toml | 2 +- 4 files changed, 15 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48205d9bd86da..bec6b0bd976e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -601,16 +601,6 @@ dependencies = [ "digest 0.10.3", ] -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - [[package]] name = "blake2b_simd" version = "1.0.0" @@ -1797,7 +1787,7 @@ checksum = "64fba5a42bd76a17cad4bfa00de168ee1cbfa06a5e8ce992ae880218c05641a9" dependencies = [ "byteorder", "dynasm", - "memmap2 0.5.0", + "memmap2", ] [[package]] @@ -4097,9 +4087,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.23.2" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c" +checksum = "7e9e2dd86df36ce760a60f6ff6ad526f7ba1f14ba0356f8254fb6905e6494df1" dependencies = [ "libc", "lz4-sys", @@ -4107,9 +4097,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.9.2" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" dependencies = [ "cc", "libc", @@ -4185,15 +4175,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "memmap2" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" -dependencies = [ - "libc", -] - [[package]] name = "memmap2" version = "0.5.0" @@ -6451,19 +6432,19 @@ dependencies = [ [[package]] name = "parity-db" -version = "0.3.16" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb474d0ed0836e185cb998a6b140ed1073d1fbf27d690ecf9ede8030289382c" +checksum = "3a7511a0bec4a336b5929999d02b560d2439c993cccf98c26481484e811adc43" dependencies = [ - "blake2-rfc", + "blake2", "crc32fast", "fs2", "hex", "libc", "log", "lz4", - "memmap2 0.2.1", - "parking_lot 0.11.2", + "memmap2", + "parking_lot 0.12.1", "rand 0.8.5", "snap", ] @@ -7694,7 +7675,7 @@ name = "sc-chain-spec" version = "4.0.0-dev" dependencies = [ "impl-trait-for-tuples", - "memmap2 0.5.0", + "memmap2", "parity-scale-codec", "sc-chain-spec-derive", "sc-network-common", @@ -11167,7 +11148,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -11615,7 +11596,7 @@ dependencies = [ "enumset", "lazy_static", "loupe", - "memmap2 0.5.0", + "memmap2", "more-asserts", "rustc-demangle", "serde", diff --git a/Cargo.toml b/Cargo.toml index d3c801fc2c7be..45aa5f9854d25 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -254,7 +254,6 @@ members = [ # This list is ordered alphabetically. [profile.dev.package] blake2 = { opt-level = 3 } -blake2-rfc = { opt-level = 3 } blake2b_simd = { opt-level = 3 } chacha20poly1305 = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index 54a1d4900c60b..5fb4c418e8ae8 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -38,7 +38,7 @@ fs_extra = "1" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } -parity-db = { version = "0.3" } +parity-db = "0.4.2" sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } futures = { version = "0.3.21", features = ["thread-pool"] } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index b21038b77564f..c12bf933f6bb1 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -22,7 +22,7 @@ kvdb-memorydb = "0.12.0" kvdb-rocksdb = { version = "0.16.0", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" -parity-db = "0.3.16" +parity-db = "0.4.2" parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } From daf72d122d183bfe3d75eefd1bc3766794e9ed06 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Wed, 9 Nov 2022 09:50:36 +0100 Subject: [PATCH 332/348] Epoch-Changes tree pruning was lagging by one epoch (#12567) * Remove all not required nodes from the epoch-changes tree Some outdated nodes were left there because of the predicate * Test to excercise the fix * Add a fork on genesis to the test * Fix typo in comments --- client/consensus/babe/src/tests.rs | 60 ++++++------------ client/consensus/epochs/src/lib.rs | 97 +++++++++++++++++++++++++++--- 2 files changed, 110 insertions(+), 47 deletions(-) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index ee1605f037ff4..a2886663b2c8f 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -869,12 +869,12 @@ fn importing_epoch_change_block_prunes_tree() { // Create and import the canon chain and keep track of fork blocks (A, C, D) // from the diagram above. - let canon_hashes = propose_and_import_blocks_wrap(BlockId::Number(0), 30); + let canon = propose_and_import_blocks_wrap(BlockId::Number(0), 30); // Create the forks - let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[0]), 10); - let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[12]), 15); - let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon_hashes[18]), 10); + let fork_1 = propose_and_import_blocks_wrap(BlockId::Hash(canon[0]), 10); + let fork_2 = propose_and_import_blocks_wrap(BlockId::Hash(canon[12]), 15); + let fork_3 = propose_and_import_blocks_wrap(BlockId::Hash(canon[18]), 10); // We should be tracking a total of 9 epochs in the fork tree assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9); @@ -884,51 +884,31 @@ fn importing_epoch_change_block_prunes_tree() { // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). - client.finalize_block(canon_hashes[12], None, false).unwrap(); + client.finalize_block(canon[12], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 7); - // at this point no hashes from the first fork must exist on the tree - assert!(!epoch_changes - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| h) - .any(|h| fork_1.contains(h)),); + let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); - // but the epoch changes from the other forks must still exist - assert!(epoch_changes - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| h) - .any(|h| fork_2.contains(h))); + // no hashes from the first fork must exist on the tree + assert!(!nodes.iter().any(|h| fork_1.contains(h))); - assert!(epoch_changes - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| h) - .any(|h| fork_3.contains(h)),); + // but the epoch changes from the other forks must still exist + assert!(nodes.iter().any(|h| fork_2.contains(h))); + assert!(nodes.iter().any(|h| fork_3.contains(h))); // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(canon_hashes[24], None, false).unwrap(); + client.finalize_block(canon[24], None, false).unwrap(); propose_and_import_blocks_wrap(BlockId::Hash(client.chain_info().best_hash), 8); - // at this point no hashes from the second fork must exist on the tree - assert!(!epoch_changes - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| h) - .any(|h| fork_2.contains(h)),); + let nodes: Vec<_> = epoch_changes.shared_data().tree().iter().map(|(h, _, _)| *h).collect(); - // while epoch changes from the last fork should still exist - assert!(epoch_changes - .shared_data() - .tree() - .iter() - .map(|(h, _, _)| h) - .any(|h| fork_3.contains(h)),); + // no hashes from the other forks must exist on the tree + assert!(!nodes.iter().any(|h| fork_2.contains(h))); + assert!(!nodes.iter().any(|h| fork_3.contains(h))); + + // Check that we contain the nodes that we care about + assert!(nodes.iter().any(|h| *h == canon[18])); + assert!(nodes.iter().any(|h| *h == canon[24])); } #[test] diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 2e0186495db5e..f8b6253ef2353 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -518,8 +518,8 @@ where let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot, - PersistedEpochHeader::Regular(ref epoch_n) => slot >= epoch_n.end_slot, + PersistedEpochHeader::Genesis(ref epoch_0, _) => epoch_0.start_slot <= slot, + PersistedEpochHeader::Regular(ref epoch_n) => epoch_n.start_slot <= slot, }; // prune any epochs which could not be _live_ as of the children of the @@ -777,11 +777,6 @@ where } } - /// Return the inner fork tree. - pub fn tree(&self) -> &ForkTree> { - &self.inner - } - /// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and /// `hash`. pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) { @@ -832,6 +827,11 @@ where self.epochs.remove(&(h, n)); }); } + + /// Return the inner fork tree (mostly useful for testing) + pub fn tree(&self) -> &ForkTree> { + &self.inner + } } /// Type alias to produce the epoch-changes tree from a block type. @@ -1114,6 +1114,89 @@ mod tests { } } + #[test] + fn prune_removes_stale_nodes() { + // +---D +-------F + // | | + // 0---A---B--(x)--C--(y)--G + // | | + // +---H +-------E + // + // Test parameters: + // - epoch duration: 100 + // + // We are going to prune the tree at: + // - 'x', a node between B and C + // - 'y', a node between C and G + + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, block) { + (b"0", _) => Ok(true), + (b"A", b) => Ok(b != b"0"), + (b"B", b) => Ok(b != b"0" && b != b"A" && b != b"D"), + (b"C", b) => Ok(b == b"F" || b == b"G" || b == b"y"), + (b"x", b) => Ok(b == b"C" || b == b"F" || b == b"G" || b == b"y"), + (b"y", b) => Ok(b == b"G"), + _ => Ok(false), + } + }; + + let mut epoch_changes = EpochChanges::new(); + + let mut import_at = |slot, hash: &Hash, number, parent_hash, parent_number| { + let make_genesis = |slot| Epoch { start_slot: slot, duration: 100 }; + // Get epoch descriptor valid for 'slot' + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, parent_hash, parent_number, slot) + .unwrap() + .unwrap(); + // Increment it + let next_epoch_desc = epoch_changes + .viable_epoch(&epoch_descriptor, &make_genesis) + .unwrap() + .increment(()); + // Assign it to hash/number + epoch_changes + .import(&is_descendent_of, *hash, number, *parent_hash, next_epoch_desc) + .unwrap(); + }; + + import_at(100, b"A", 10, b"0", 0); + import_at(200, b"B", 20, b"A", 10); + import_at(300, b"C", 30, b"B", 20); + import_at(200, b"D", 20, b"A", 10); + import_at(300, b"E", 30, b"B", 20); + import_at(400, b"F", 40, b"C", 30); + import_at(400, b"G", 40, b"C", 30); + import_at(100, b"H", 10, b"0", 0); + + let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); + nodes.sort(); + assert_eq!(nodes, vec![b"A", b"B", b"C", b"D", b"E", b"F", b"G", b"H"]); + + // Finalize block 'x' @ number 25, slot 230 + // This should prune all nodes imported by blocks with a number < 25 that are not + // ancestors of 'x' and all nodes before the one holding the epoch information + // to which 'x' belongs to (i.e. before A). + + epoch_changes.prune_finalized(&is_descendent_of, b"x", 25, 230).unwrap(); + + let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); + nodes.sort(); + assert_eq!(nodes, vec![b"A", b"B", b"C", b"E", b"F", b"G"]); + + // Finalize block y @ number 35, slot 330 + // This should prune all nodes imported by blocks with a number < 35 that are not + // ancestors of 'y' and all nodes before the one holding the epoch information + // to which 'y' belongs to (i.e. before B). + + epoch_changes.prune_finalized(&is_descendent_of, b"y", 35, 330).unwrap(); + + let mut nodes: Vec<_> = epoch_changes.tree().iter().map(|(h, _, _)| h).collect(); + nodes.sort(); + assert_eq!(nodes, vec![b"B", b"C", b"F", b"G"]); + } + /// Test that ensures that the gap is not enabled when we import multiple genesis blocks. #[test] fn gap_is_not_enabled_when_multiple_genesis_epochs_are_imported() { From cbb48e5e503300817b6837d6597659ed35212e99 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 9 Nov 2022 10:11:51 +0100 Subject: [PATCH 333/348] Bound Election and Staking by MaxActiveValidators (#12436) * bounding election provider with kian * multi phase implement bounded election provider * election provider blanket implementation * staking compiles * fix test for election provider support * fmt * fixing epmp tests, does not compile yet * fix epmp tests * fix staking tests * fmt * fix runtime tests * fmt * remove outdated wip tags * add enum error * sort and truncate supports * comment * error when unsupported number of election winners * compiling wip after kian's suggestions * fix TODOs * remove,fix tags * ensure validator count does not exceed maxwinners * clean up * some more clean up and todos * handle too many winners * rename parameter for mock * todo * add sort and truncate rule if there are too many winners * fmt * fail, not swallow emergency result bound not met * remove too many winners resolution as it can be guaranteed to be bounded * fix benchmark * give MaxWinners more contextual name * make ready solution generic over T * kian feedback * fix stuff * Kian's way of solvign this * comment fix * fix compile * remove use of BoundedExecution * fmt * comment out failing integrity test * cap validator count increment to max winners * dont panic * add test for bad data provider * Update frame/staking/src/pallet/impls.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix namespace conflict and add test for onchain max winners less than desired targets * defensive unwrap * early convert to bounded vec * fix syntax * fmt * fix doc * fix rustdoc * fmt * fix maxwinner count for benchmarking * add instant election for noelection * fmt * fix compile * pr feedbacks * always error at validator count exceeding max winners * add useful error message * pr comments * import fix * add checked_desired_targets * fmt * fmt * fix rust doc Co-authored-by: parity-processbot <> Co-authored-by: kianenigma Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- bin/node/runtime/src/lib.rs | 26 ++- frame/babe/src/mock.rs | 5 +- .../src/benchmarking.rs | 8 +- .../election-provider-multi-phase/src/lib.rs | 164 ++++++++-------- .../election-provider-multi-phase/src/mock.rs | 49 ++--- .../src/signed.rs | 48 ++++- frame/election-provider-support/src/lib.rs | 137 ++++++++------ .../election-provider-support/src/onchain.rs | 178 +++++++++--------- frame/fast-unstake/src/mock.rs | 9 +- frame/grandpa/src/mock.rs | 5 +- .../nomination-pools/benchmarking/src/mock.rs | 2 +- .../nomination-pools/test-staking/src/mock.rs | 2 +- frame/offences/benchmarking/src/mock.rs | 5 +- frame/root-offences/src/mock.rs | 5 +- frame/session/benchmarking/src/mock.rs | 5 +- frame/staking/src/lib.rs | 4 + frame/staking/src/mock.rs | 6 +- frame/staking/src/pallet/impls.rs | 79 +++++--- frame/staking/src/pallet/mod.rs | 55 +++++- frame/staking/src/tests.rs | 54 ++++++ primitives/npos-elections/src/lib.rs | 4 +- 21 files changed, 538 insertions(+), 312 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 999b178d10c55..d94a31422be8b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -570,7 +570,7 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; @@ -628,7 +628,14 @@ frame_election_provider_support::generate_solution_type!( parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; - pub MaxElectingVoters: u32 = 10_000; + pub MaxElectingVoters: u32 = 40_000; + pub MaxElectableTargets: u16 = 10_000; + // OnChain values are lower. + pub MaxOnChainElectingVoters: u32 = 5000; + pub MaxOnChainElectableTargets: u16 = 1250; + // The maximum winners that can be elected by the Election pallet which is equivalent to the + // maximum active validators the staking pallet can have. + pub MaxActiveValidators: u32 = 1000; } /// The numbers configured here could always be more than the the maximum limits of staking pallet @@ -679,11 +686,9 @@ impl onchain::Config for OnChainSeqPhragmen { >; type DataProvider = ::DataProvider; type WeightInfo = frame_election_provider_support::weights::SubstrateWeight; -} - -impl onchain::BoundedConfig for OnChainSeqPhragmen { - type VotersBound = MaxElectingVoters; - type TargetsBound = ConstU32<2_000>; + type MaxWinners = ::MaxWinners; + type VotersBound = MaxOnChainElectingVoters; + type TargetsBound = MaxOnChainElectableTargets; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -726,11 +731,12 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SlashHandler = (); // burn slashes type RewardHandler = (); // nothing to do upon rewards type DataProvider = Staking; - type Fallback = onchain::BoundedExecution; - type GovernanceFallback = onchain::BoundedExecution; + type Fallback = onchain::OnChainExecution; + type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, OffchainRandomBalancing>; type ForceOrigin = EnsureRootOrHalfCouncil; - type MaxElectableTargets = ConstU16<{ u16::MAX }>; + type MaxElectableTargets = MaxElectableTargets; + type MaxWinners = MaxActiveValidators; type MaxElectingVoters = MaxElectingVoters; type BenchmarkingConfig = ElectionProviderBenchmarkConfig; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 46aeabe743fe2..204de8aae172e 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -178,6 +178,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -199,7 +202,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index a0fd0a9a0512f..10041f6aec07c 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -220,11 +220,7 @@ frame_benchmarking::benchmarks! { let receiver = account("receiver", 0, SEED); let initial_balance = T::Currency::minimum_balance() * 10u32.into(); T::Currency::make_free_balance_be(&receiver, initial_balance); - let ready = ReadySolution { - supports: vec![], - score: Default::default(), - compute: Default::default() - }; + let ready = Default::default(); let deposit: BalanceOf = 10u32.into(); let reward: BalanceOf = T::SignedRewardBase::get(); @@ -403,7 +399,7 @@ frame_benchmarking::benchmarks! { assert_eq!(raw_solution.solution.voter_count() as u32, a); assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); }: { - assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); + assert!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned).is_ok()); } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 3e5a6d12f575c..bc19e5143424c 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -114,8 +114,8 @@ //! If we reach the end of both phases (i.e. call to [`ElectionProvider::elect`] happens) and no //! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to //! determine what needs to be done. The on-chain election is slow, and contains no balancing or -//! reduction post-processing. [`NoFallback`] does nothing and enables [`Phase::Emergency`], which -//! is a more *fail-safe* approach. +//! reduction post-processing. If [`pallet::Config::Fallback`] fails, the next phase +//! [`Phase::Emergency`] is enabled, which is a more *fail-safe* approach. //! //! ### Emergency Phase //! @@ -231,23 +231,25 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, - NposSolution, + BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ElectionProviderBase, + InstantElectionProvider, NposSolution, }; use frame_support::{ dispatch::DispatchClass, ensure, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, weights::Weight, + DefaultNoBound, EqNoBound, PartialEqNoBound, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; use scale_info::TypeInfo; use sp_arithmetic::{ - traits::{Bounded, CheckedAdd, Zero}, + traits::{CheckedAdd, Zero}, UpperOf, }; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, ElectionScore, EvaluateSupport, Supports, VoteWeight, + assignment_ratio_to_staked_normalized, BoundedSupports, ElectionScore, EvaluateSupport, + Supports, VoteWeight, }; use sp_runtime::{ transaction_validity::{ @@ -311,33 +313,6 @@ pub trait BenchmarkingConfig { const MAXIMUM_TARGETS: u32; } -/// A fallback implementation that transitions the pallet to the emergency phase. -pub struct NoFallback(sp_std::marker::PhantomData); - -impl ElectionProviderBase for NoFallback { - type AccountId = T::AccountId; - type BlockNumber = T::BlockNumber; - type DataProvider = T::DataProvider; - type Error = &'static str; - - fn ongoing() -> bool { - false - } -} - -impl ElectionProvider for NoFallback { - fn elect() -> Result, Self::Error> { - // Do nothing, this will enable the emergency phase. - Err("NoFallback.") - } -} - -impl InstantElectionProvider for NoFallback { - fn elect_with_bounds(_: usize, _: usize) -> Result, Self::Error> { - Err("NoFallback.") - } -} - /// Current phase of the pallet. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, Debug, TypeInfo)] pub enum Phase { @@ -445,13 +420,23 @@ impl Default for RawSolution { } /// A checked solution, ready to be enacted. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] -pub struct ReadySolution { +#[derive( + PartialEqNoBound, + EqNoBound, + Clone, + Encode, + Decode, + RuntimeDebug, + DefaultNoBound, + scale_info::TypeInfo, +)] +#[scale_info(skip_type_params(T))] +pub struct ReadySolution { /// The final supports of the solution. /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - pub supports: Supports, + pub supports: BoundedSupports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. @@ -465,7 +450,6 @@ pub struct ReadySolution { /// /// These are stored together because they are often accessed together. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default, TypeInfo)] -#[codec(mel_bound())] #[scale_info(skip_type_params(T))] pub struct RoundSnapshot { /// All of the voters. @@ -561,6 +545,8 @@ pub enum FeasibilityError { InvalidRound, /// Comparison against `MinimumUntrustedScore` failed. UntrustedScoreTooLow, + /// Data Provider returned too many desired targets + TooManyDesiredTargets, } impl From for FeasibilityError { @@ -574,7 +560,10 @@ pub use pallet::*; pub mod pallet { use super::*; use frame_election_provider_support::{InstantElectionProvider, NposSolver}; - use frame_support::{pallet_prelude::*, traits::EstimateCallFee}; + use frame_support::{ + pallet_prelude::*, + traits::{DefensiveResult, EstimateCallFee}, + }; use frame_system::pallet_prelude::*; #[pallet::config] @@ -674,6 +663,13 @@ pub mod pallet { #[pallet::constant] type MaxElectableTargets: Get>; + /// The maximum number of winners that can be elected by this `ElectionProvider` + /// implementation. + /// + /// Note: This must always be greater or equal to `T::DataProvider::desired_targets()`. + #[pallet::constant] + type MaxWinners: Get; + /// Handler for the slashed deposits. type SlashHandler: OnUnbalanced>; @@ -691,6 +687,7 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Self::DataProvider, + MaxWinners = Self::MaxWinners, >; /// Configuration of the governance-only fallback. @@ -701,6 +698,7 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Self::DataProvider, + MaxWinners = Self::MaxWinners, >; /// OCW election solution miner algorithm implementation. @@ -968,9 +966,11 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); + // bound supports with T::MaxWinners + let supports = supports.try_into().map_err(|_| Error::::TooManyWinners)?; + // Note: we don't `rotate_round` at this point; the next call to // `ElectionProvider::elect` will succeed and take care of that. - let solution = ReadySolution { supports, score: Default::default(), @@ -1073,17 +1073,20 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(Self::current_phase().is_emergency(), >::CallNotAllowed); - let maybe_max_voters = maybe_max_voters.map(|x| x as usize); - let maybe_max_targets = maybe_max_targets.map(|x| x as usize); + let supports = + T::GovernanceFallback::instant_elect(maybe_max_voters, maybe_max_targets).map_err( + |e| { + log!(error, "GovernanceFallback failed: {:?}", e); + Error::::FallbackFailed + }, + )?; - let supports = T::GovernanceFallback::elect_with_bounds( - maybe_max_voters.unwrap_or(Bounded::max_value()), - maybe_max_targets.unwrap_or(Bounded::max_value()), - ) - .map_err(|e| { - log!(error, "GovernanceFallback failed: {:?}", e); - Error::::FallbackFailed - })?; + // transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into + // `BoundedVec<_, T::MaxWinners>` + let supports: BoundedVec<_, T::MaxWinners> = supports + .into_inner() + .try_into() + .defensive_map_err(|_| Error::::BoundNotMet)?; let solution = ReadySolution { supports, @@ -1154,6 +1157,10 @@ pub mod pallet { CallNotAllowed, /// The fallback failed FallbackFailed, + /// Some bound not met + BoundNotMet, + /// Submitted solution has too many winners + TooManyWinners, } #[pallet::validate_unsigned] @@ -1227,7 +1234,7 @@ pub mod pallet { /// Current best solution, signed or unsigned, queued to be returned upon `elect`. #[pallet::storage] #[pallet::getter(fn queued_solution)] - pub type QueuedSolution = StorageValue<_, ReadySolution>; + pub type QueuedSolution = StorageValue<_, ReadySolution>; /// Snapshot data of the round. /// @@ -1393,31 +1400,28 @@ impl Pallet { let targets = T::DataProvider::electable_targets(Some(target_limit)) .map_err(ElectionError::DataProvider)?; + let voters = T::DataProvider::electing_voters(Some(voter_limit)) .map_err(ElectionError::DataProvider)?; - let mut desired_targets = - T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; - // Defensive-only. if targets.len() > target_limit || voters.len() > voter_limit { - debug_assert!(false, "Snapshot limit has not been respected."); return Err(ElectionError::DataProvider("Snapshot too big for submission.")) } - // If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a - // warning - let max_len = targets - .len() - .try_into() - .map_err(|_| ElectionError::DataProvider("Failed to convert usize"))?; - if desired_targets > max_len { + let mut desired_targets = + T::DataProvider::desired_targets().map_err(ElectionError::DataProvider)?; + + // If `desired_targets` > `targets.len()`, cap `desired_targets` to that + // level and emit a warning + let max_desired_targets: u32 = (targets.len() as u32).min(T::MaxWinners::get()); + if desired_targets > max_desired_targets { log!( warn, "desired_targets: {} > targets.len(): {}, capping desired_targets", desired_targets, - max_len + max_desired_targets ); - desired_targets = max_len; + desired_targets = max_desired_targets; } Ok((targets, voters, desired_targets)) @@ -1466,7 +1470,7 @@ impl Pallet { pub fn feasibility_check( raw_solution: RawSolution>, compute: ElectionCompute, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let RawSolution { solution, score, round } = raw_solution; // First, check round. @@ -1479,6 +1483,11 @@ impl Pallet { Self::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); + // Fail early if targets requested by data provider exceed maximum winners supported. + ensure!( + desired_targets <= ::MaxWinners::get(), + FeasibilityError::TooManyDesiredTargets + ); // Ensure that the solution's score can pass absolute min-score. let submitted_score = raw_solution.score; @@ -1539,6 +1548,8 @@ impl Pallet { let known_score = supports.evaluate(); ensure!(known_score == score, FeasibilityError::InvalidScore); + // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. + let supports = supports.try_into().expect("checked desired_targets <= MaxWinners; qed"); Ok(ReadySolution { supports, compute, score }) } @@ -1558,7 +1569,7 @@ impl Pallet { Self::kill_snapshot(); } - fn do_elect() -> Result, ElectionError> { + fn do_elect() -> Result, ElectionError> { // We have to unconditionally try finalizing the signed phase here. There are only two // possibilities: // @@ -1570,13 +1581,15 @@ impl Pallet { >::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - ::elect() - .map(|supports| ReadySolution { - supports, - score: Default::default(), - compute: ElectionCompute::Fallback, - }) + T::Fallback::instant_elect(None, None) .map_err(|fe| ElectionError::Fallback(fe)) + .and_then(|supports| { + Ok(ReadySolution { + supports, + score: Default::default(), + compute: ElectionCompute::Fallback, + }) + }) }) .map(|ReadySolution { compute, score, supports }| { Self::deposit_event(Event::ElectionFinalized { compute, score }); @@ -1609,18 +1622,19 @@ impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type Error = ElectionError; + type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; +} +impl ElectionProvider for Pallet { fn ongoing() -> bool { match Self::current_phase() { Phase::Off => false, _ => true, } } -} -impl ElectionProvider for Pallet { - fn elect() -> Result, Self::Error> { + fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { // All went okay, record the weight, put sign to be Off, clean snapshot, etc. @@ -1636,6 +1650,7 @@ impl ElectionProvider for Pallet { } } } + /// convert a DispatchError to a custom InvalidTransaction with the inner code being the error /// number. pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { @@ -1854,7 +1869,6 @@ mod tests { }, Phase, }; - use frame_election_provider_support::ElectionProvider; use frame_support::{assert_noop, assert_ok}; use sp_npos_elections::{BalancingConfig, Support}; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index f645886d78899..8ab7e5bbf733d 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -19,7 +19,7 @@ use super::*; use crate::{self as multi_phase, unsigned::MinerConfig}; use frame_election_provider_support::{ data_provider, - onchain::{self, UnboundedExecution}, + onchain::{self}, ElectionDataProvider, NposSolution, SequentialPhragmen, }; pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; @@ -297,6 +297,7 @@ parameter_types! { pub static MockWeightInfo: MockedWeightInfo = MockedWeightInfo::Real; pub static MaxElectingVoters: VoterIndex = u32::max_value(); pub static MaxElectableTargets: TargetIndex = TargetIndex::max_value(); + pub static MaxWinners: u32 = 200; pub static EpochLength: u64 = 30; pub static OnChainFallback: bool = true; @@ -308,6 +309,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen, Balancing>; type DataProvider = StakingMock; type WeightInfo = (); + type MaxWinners = MaxWinners; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } pub struct MockFallback; @@ -316,30 +320,19 @@ impl ElectionProviderBase for MockFallback { type BlockNumber = u64; type Error = &'static str; type DataProvider = StakingMock; - - fn ongoing() -> bool { - false - } -} -impl ElectionProvider for MockFallback { - fn elect() -> Result, Self::Error> { - Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) - } + type MaxWinners = MaxWinners; } impl InstantElectionProvider for MockFallback { - fn elect_with_bounds( - max_voters: usize, - max_targets: usize, - ) -> Result, Self::Error> { + fn instant_elect( + max_voters: Option, + max_targets: Option, + ) -> Result, Self::Error> { if OnChainFallback::get() { - onchain::UnboundedExecution::::elect_with_bounds( - max_voters, - max_targets, - ) - .map_err(|_| "onchain::UnboundedExecution failed.") + onchain::OnChainExecution::::instant_elect(max_voters, max_targets) + .map_err(|_| "onchain::OnChainExecution failed.") } else { - super::NoFallback::::elect_with_bounds(max_voters, max_targets) + Err("NoFallback.") } } } @@ -404,10 +397,12 @@ impl crate::Config for Runtime { type WeightInfo = (); type BenchmarkingConfig = TestBenchmarkingConfig; type Fallback = MockFallback; - type GovernanceFallback = UnboundedExecution; + type GovernanceFallback = + frame_election_provider_support::onchain::OnChainExecution; type ForceOrigin = frame_system::EnsureRoot; type MaxElectingVoters = MaxElectingVoters; type MaxElectableTargets = MaxElectableTargets; + type MaxWinners = MaxWinners; type MinerConfig = Self; type Solver = SequentialPhragmen, Balancing>; } @@ -424,6 +419,8 @@ pub type Extrinsic = sp_runtime::testing::TestXt; parameter_types! { pub MaxNominations: u32 = ::LIMIT as u32; + // only used in testing to manipulate mock behaviour + pub static DataProviderAllowBadData: bool = false; } #[derive(Default)] @@ -438,7 +435,9 @@ impl ElectionDataProvider for StakingMock { fn electable_targets(maybe_max_len: Option) -> data_provider::Result> { let targets = Targets::get(); - if maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { + if !DataProviderAllowBadData::get() && + maybe_max_len.map_or(false, |max_len| targets.len() > max_len) + { return Err("Targets too big") } @@ -449,8 +448,10 @@ impl ElectionDataProvider for StakingMock { maybe_max_len: Option, ) -> data_provider::Result>> { let mut voters = Voters::get(); - if let Some(max_len) = maybe_max_len { - voters.truncate(max_len) + if !DataProviderAllowBadData::get() { + if let Some(max_len) = maybe_max_len { + voters.truncate(max_len) + } } Ok(voters) diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index cda87dd6c839a..9d629ad77fd79 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -462,7 +462,7 @@ impl Pallet { /// /// Infallible pub fn finalize_signed_phase_accept_solution( - ready_solution: ReadySolution, + ready_solution: ReadySolution, who: &T::AccountId, deposit: BalanceOf, call_fee: BalanceOf, @@ -537,7 +537,7 @@ impl Pallet { #[cfg(test)] mod tests { use super::*; - use crate::{mock::*, ElectionCompute, Error, Event, Perbill, Phase}; + use crate::{mock::*, ElectionCompute, ElectionError, Error, Event, Perbill, Phase}; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; #[test] @@ -557,6 +557,50 @@ mod tests { }) } + #[test] + fn data_provider_should_respect_target_limits() { + ExtBuilder::default().build_and_execute(|| { + // given a reduced expectation of maximum electable targets + MaxElectableTargets::set(2); + // and a data provider that does not respect limits + DataProviderAllowBadData::set(true); + + assert_noop!( + MultiPhase::create_snapshot(), + ElectionError::DataProvider("Snapshot too big for submission."), + ); + }) + } + + #[test] + fn data_provider_should_respect_voter_limits() { + ExtBuilder::default().build_and_execute(|| { + // given a reduced expectation of maximum electing voters + MaxElectingVoters::set(2); + // and a data provider that does not respect limits + DataProviderAllowBadData::set(true); + + assert_noop!( + MultiPhase::create_snapshot(), + ElectionError::DataProvider("Snapshot too big for submission."), + ); + }) + } + + #[test] + fn desired_targets_greater_than_max_winners() { + ExtBuilder::default().build_and_execute(|| { + // given desired_targets bigger than MaxWinners + DesiredTargets::set(4); + MaxWinners::set(3); + + let (_, _, actual_desired_targets) = MultiPhase::create_snapshot_external().unwrap(); + + // snapshot is created with min of desired_targets and MaxWinners + assert_eq!(actual_desired_targets, 3); + }) + } + #[test] fn should_pay_deposit() { ExtBuilder::default().build_and_execute(|| { diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 5ee65e102bd06..38924a18e2f54 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -82,6 +82,7 @@ //! # use frame_election_provider_support::{*, data_provider}; //! # use sp_npos_elections::{Support, Assignment}; //! # use frame_support::traits::ConstU32; +//! # use frame_support::bounded_vec; //! //! type AccountId = u64; //! type Balance = u64; @@ -137,15 +138,16 @@ //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; -//! fn ongoing() -> bool { false } -//! +//! type MaxWinners = ConstU32<{ u32::MAX }>; +//! //! } //! //! impl ElectionProvider for GenericElectionProvider { -//! fn elect() -> Result, Self::Error> { +//! fn ongoing() -> bool { false } +//! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") -//! .map(|t| vec![(t[0], Support::default())]) +//! .map(|t| bounded_vec![(t[0], Support::default())]) //! } //! } //! } @@ -354,11 +356,7 @@ pub trait ElectionDataProvider { fn clear() {} } -/// Base trait for [`ElectionProvider`] and [`BoundedElectionProvider`]. It is -/// meant to be used only with an extension trait that adds an election -/// functionality. -/// -/// Data can be bounded or unbounded and is fetched from [`Self::DataProvider`]. +/// Base trait for types that can provide election pub trait ElectionProviderBase { /// The account identifier type. type AccountId; @@ -369,90 +367,109 @@ pub trait ElectionProviderBase { /// The error type that is returned by the provider. type Error: Debug; + /// The upper bound on election winners that can be returned. + /// + /// # WARNING + /// + /// when communicating with the data provider, one must ensure that + /// `DataProvider::desired_targets` returns a value less than this bound. An + /// implementation can chose to either return an error and/or sort and + /// truncate the output to meet this bound. + type MaxWinners: Get; + /// The data provider of the election. type DataProvider: ElectionDataProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, >; - /// Indicate if this election provider is currently ongoing an asynchronous election or not. - fn ongoing() -> bool; + /// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds + /// [`Self::MaxWinners`]. + fn desired_targets_checked() -> data_provider::Result { + match Self::DataProvider::desired_targets() { + Ok(desired_targets) => + if desired_targets <= Self::MaxWinners::get() { + Ok(desired_targets) + } else { + Err("desired_targets should not be greater than MaxWinners") + }, + Err(e) => Err(e), + } + } } /// Elect a new set of winners, bounded by `MaxWinners`. /// -/// Returns a result in bounded, target major format, namely as -/// *BoundedVec<(AccountId, Vec), MaxWinners>*. -pub trait BoundedElectionProvider: ElectionProviderBase { - /// The upper bound on election winners. - type MaxWinners: Get; +/// It must always use [`ElectionProviderBase::DataProvider`] to fetch the data it needs. +/// +/// This election provider that could function asynchronously. This implies that this election might +/// needs data ahead of time (ergo, receives no arguments to `elect`), and might be `ongoing` at +/// times. +pub trait ElectionProvider: ElectionProviderBase { + /// Indicate if this election provider is currently ongoing an asynchronous election or not. + fn ongoing() -> bool; + /// Performs the election. This should be implemented as a self-weighing function. The /// implementor should register its appropriate weight at the end of execution with the /// system pallet directly. - fn elect() -> Result, Self::Error>; -} - -/// Same a [`BoundedElectionProvider`], but no bounds are imposed on the number -/// of winners. -/// -/// The result is returned in a target major format, namely as -///*Vec<(AccountId, Vec)>*. -pub trait ElectionProvider: ElectionProviderBase { - /// Performs the election. This should be implemented as a self-weighing - /// function, similar to [`BoundedElectionProvider::elect()`]. - fn elect() -> Result, Self::Error>; + fn elect() -> Result, Self::Error>; } -/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure -/// an election needs to happen instantly, not asynchronously. -/// -/// The same `DataProvider` is assumed to be used. +/// A (almost) marker trait that signifies an election provider as working synchronously. i.e. being +/// *instant*. /// -/// Consequently, allows for control over the amount of data that is being -/// fetched from the [`ElectionProviderBase::DataProvider`]. -pub trait InstantElectionProvider: ElectionProvider { - /// Elect a new set of winners, but unlike [`ElectionProvider::elect`] which cannot enforce - /// bounds, this trait method can enforce bounds on the amount of data provided by the - /// `DataProvider`. - /// - /// An implementing type, if itself bounded, should choose the minimum of the two bounds to - /// choose the final value of `max_voters` and `max_targets`. In other words, an implementation - /// should guarantee that `max_voter` and `max_targets` provided to this method are absolutely - /// respected. - fn elect_with_bounds( - max_voters: usize, - max_targets: usize, - ) -> Result, Self::Error>; +/// This must still use the same data provider as with [`ElectionProviderBase::DataProvider`]. +/// However, it can optionally overwrite the amount of voters and targets that are fetched from the +/// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`. +pub trait InstantElectionProvider: ElectionProviderBase { + fn instant_elect( + forced_input_voters_bound: Option, + forced_input_target_bound: Option, + ) -> Result, Self::Error>; } -/// An election provider to be used only for testing. -#[cfg(feature = "std")] +/// An election provider that does nothing whatsoever. pub struct NoElection(sp_std::marker::PhantomData); -#[cfg(feature = "std")] -impl ElectionProviderBase - for NoElection<(AccountId, BlockNumber, DataProvider)> +impl ElectionProviderBase + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> where DataProvider: ElectionDataProvider, + MaxWinners: Get, { type AccountId = AccountId; type BlockNumber = BlockNumber; type Error = &'static str; + type MaxWinners = MaxWinners; type DataProvider = DataProvider; +} +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> +where + DataProvider: ElectionDataProvider, + MaxWinners: Get, +{ fn ongoing() -> bool { false } + + fn elect() -> Result, Self::Error> { + Err("`NoElection` cannot do anything.") + } } -#[cfg(feature = "std")] -impl ElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider)> +impl InstantElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> where DataProvider: ElectionDataProvider, + MaxWinners: Get, { - fn elect() -> Result, Self::Error> { - Err(" cannot do anything.") + fn instant_elect( + _: Option, + _: Option, + ) -> Result, Self::Error> { + Err("`NoElection` cannot do anything.") } } @@ -650,3 +667,9 @@ pub type Voter = (AccountId, VoteWeight, BoundedVec = Voter<::AccountId, ::MaxVotesPerVoter>; + +/// Same as `BoundedSupports` but parameterized by a `ElectionProviderBase`. +pub type BoundedSupportsOf = BoundedSupports< + ::AccountId, + ::MaxWinners, +>; diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 88aa6ca7267a0..483c402fe249c 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,11 +20,13 @@ //! careful when using it onchain. use crate::{ - Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, - NposSolver, WeightInfo, + BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, + InstantElectionProvider, NposSolver, WeightInfo, }; use frame_support::{dispatch::DispatchClass, traits::Get}; -use sp_npos_elections::*; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight, +}; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; /// Errors of the on-chain election. @@ -34,6 +36,9 @@ pub enum Error { NposElections(sp_npos_elections::Error), /// Errors from the data provider. DataProvider(&'static str), + /// Configurational error caused by `desired_targets` requested by data provider exceeding + /// `MaxWinners`. + TooManyWinners, } impl From for Error { @@ -44,65 +49,71 @@ impl From for Error { /// A simple on-chain implementation of the election provider trait. /// -/// This will accept voting data on the fly and produce the results immediately. -/// -/// The [`ElectionProvider`] implementation of this type does not impose any dynamic limits on the -/// number of voters and targets that are fetched. This could potentially make this unsuitable for -/// execution onchain. One could, however, impose bounds on it by using `BoundedExecution` using the -/// `MaxVoters` and `MaxTargets` bonds in the `BoundedConfig` trait. -/// -/// On the other hand, the [`InstantElectionProvider`] implementation does limit these inputs -/// dynamically. If you use `elect_with_bounds` along with `InstantElectionProvider`, the bound that -/// would be used is the minimum of the dynamic bounds given as arguments to `elect_with_bounds` and -/// the trait bounds (`MaxVoters` and `MaxTargets`). +/// This implements both `ElectionProvider` and `InstantElectionProvider`. /// -/// Please use `BoundedExecution` at all times except at genesis or for testing, with thoughtful -/// bounds in order to bound the potential execution time. Limit the use `UnboundedExecution` at -/// genesis or for testing, as it does not bound the inputs. However, this can be used with -/// `[InstantElectionProvider::elect_with_bounds`] that dynamically imposes limits. -pub struct BoundedExecution(PhantomData); +/// This type has some utilities to make it safe. Nonetheless, it should be used with utmost care. A +/// thoughtful value must be set as [`Config::VotersBound`] and [`Config::TargetsBound`] to ensure +/// the size of the input is sensible. +pub struct OnChainExecution(PhantomData); -/// An unbounded variant of [`BoundedExecution`]. -/// -/// ### Warning -/// -/// This can be very expensive to run frequently on-chain. Use with care. -pub struct UnboundedExecution(PhantomData); +#[deprecated(note = "use OnChainExecution, which is bounded by default")] +pub type BoundedExecution = OnChainExecution; /// Configuration trait for an onchain election execution. pub trait Config { /// Needed for weight registration. type System: frame_system::Config; + /// `NposSolver` that should be used, an example would be `PhragMMS`. type Solver: NposSolver< AccountId = ::AccountId, Error = sp_npos_elections::Error, >; + /// Something that provides the data for election. type DataProvider: ElectionDataProvider< AccountId = ::AccountId, BlockNumber = ::BlockNumber, >; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; -} -pub trait BoundedConfig: Config { - /// Bounds the number of voters. + /// Upper bound on maximum winners from electable targets. + /// + /// As noted in the documentation of [`ElectionProviderBase::MaxWinners`], this value should + /// always be more than `DataProvider::desired_target`. + type MaxWinners: Get; + + /// Bounds the number of voters, when calling into [`Config::DataProvider`]. It might be + /// overwritten in the `InstantElectionProvider` impl. type VotersBound: Get; - /// Bounds the number of targets. + + /// Bounds the number of targets, when calling into [`Config::DataProvider`]. It might be + /// overwritten in the `InstantElectionProvider` impl. type TargetsBound: Get; } -fn elect_with( +/// Same as `BoundedSupportsOf` but for `onchain::Config`. +pub type OnChainBoundedSupportsOf = BoundedSupports< + <::System as frame_system::Config>::AccountId, + ::MaxWinners, +>; + +fn elect_with_input_bounds( maybe_max_voters: Option, maybe_max_targets: Option, -) -> Result::AccountId>, Error> { +) -> Result, Error> { let voters = T::DataProvider::electing_voters(maybe_max_voters).map_err(Error::DataProvider)?; let targets = T::DataProvider::electable_targets(maybe_max_targets).map_err(Error::DataProvider)?; let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; + if desired_targets > T::MaxWinners::get() { + // early exit + return Err(Error::TooManyWinners) + } + let voters_len = voters.len() as u32; let targets_len = targets.len() as u32; @@ -130,69 +141,43 @@ fn elect_with( DispatchClass::Mandatory, ); - Ok(to_supports(&staked)) -} - -impl ElectionProvider for UnboundedExecution { - fn elect() -> Result, Self::Error> { - // This should not be called if not in `std` mode (and therefore neither in genesis nor in - // testing) - if cfg!(not(feature = "std")) { - frame_support::log::error!( - "Please use `InstantElectionProvider` instead to provide bounds on election if not in \ - genesis or testing mode" - ); - } + // defensive: Since npos solver returns a result always bounded by `desired_targets`, this is + // never expected to happen as long as npos solver does what is expected for it to do. + let supports: OnChainBoundedSupportsOf = + to_supports(&staked).try_into().map_err(|_| Error::TooManyWinners)?; - elect_with::(None, None) - } + Ok(supports) } -impl ElectionProviderBase for UnboundedExecution { +impl ElectionProviderBase for OnChainExecution { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; type Error = Error; + type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; - - fn ongoing() -> bool { - false - } } -impl InstantElectionProvider for UnboundedExecution { - fn elect_with_bounds( - max_voters: usize, - max_targets: usize, - ) -> Result, Self::Error> { - elect_with::(Some(max_voters), Some(max_targets)) +impl InstantElectionProvider for OnChainExecution { + fn instant_elect( + forced_input_voters_bound: Option, + forced_input_target_bound: Option, + ) -> Result, Self::Error> { + elect_with_input_bounds::( + Some(T::VotersBound::get().min(forced_input_voters_bound.unwrap_or(u32::MAX)) as usize), + Some(T::TargetsBound::get().min(forced_input_target_bound.unwrap_or(u32::MAX)) as usize), + ) } } -impl ElectionProviderBase for BoundedExecution { - type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; - type Error = Error; - type DataProvider = T::DataProvider; - +impl ElectionProvider for OnChainExecution { fn ongoing() -> bool { false } -} - -impl ElectionProvider for BoundedExecution { - fn elect() -> Result, Self::Error> { - elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) - } -} -impl InstantElectionProvider for BoundedExecution { - fn elect_with_bounds( - max_voters: usize, - max_targets: usize, - ) -> Result, Self::Error> { - elect_with::( - Some(max_voters.min(T::VotersBound::get() as usize)), - Some(max_targets.min(T::TargetsBound::get() as usize)), + fn elect() -> Result, Self::Error> { + elect_with_input_bounds::( + Some(T::VotersBound::get() as usize), + Some(T::TargetsBound::get() as usize), ) } } @@ -200,8 +185,8 @@ impl InstantElectionProvider for BoundedExecution { #[cfg(test)] mod tests { use super::*; - use crate::{PhragMMS, SequentialPhragmen}; - use frame_support::traits::ConstU32; + use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; + use frame_support::{assert_noop, parameter_types, traits::ConstU32}; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -251,14 +236,17 @@ mod tests { struct PhragmenParams; struct PhragMMSParams; + parameter_types! { + pub static MaxWinners: u32 = 10; + pub static DesiredTargets: u32 = 2; + } + impl Config for PhragmenParams { type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); - } - - impl BoundedConfig for PhragmenParams { + type MaxWinners = MaxWinners; type VotersBound = ConstU32<600>; type TargetsBound = ConstU32<400>; } @@ -268,9 +256,7 @@ mod tests { type Solver = PhragMMS; type DataProvider = mock_data_provider::DataProvider; type WeightInfo = (); - } - - impl BoundedConfig for PhragMMSParams { + type MaxWinners = MaxWinners; type VotersBound = ConstU32<600>; type TargetsBound = ConstU32<400>; } @@ -299,7 +285,7 @@ mod tests { } fn desired_targets() -> data_provider::Result { - Ok(2) + Ok(DesiredTargets::get()) } fn next_election_prediction(_: BlockNumber) -> BlockNumber { @@ -312,7 +298,7 @@ mod tests { fn onchain_seq_phragmen_works() { sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - BoundedExecution::::elect().unwrap(), + as ElectionProvider>::elect().unwrap(), vec![ (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) @@ -321,11 +307,25 @@ mod tests { }) } + #[test] + fn too_many_winners_when_desired_targets_exceed_max_winners() { + sp_io::TestExternalities::new_empty().execute_with(|| { + // given desired targets larger than max winners + DesiredTargets::set(10); + MaxWinners::set(9); + + assert_noop!( + as ElectionProvider>::elect(), + Error::TooManyWinners, + ); + }) + } + #[test] fn onchain_phragmms_works() { sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - BoundedExecution::::elect().unwrap(), + as ElectionProvider>::elect().unwrap(), vec![ (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index bac2d9aa9c8e4..d66f4ba5663d9 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -107,22 +107,23 @@ parameter_types! { pub static BondingDuration: u32 = 3; pub static CurrentEra: u32 = 0; pub static Ongoing: bool = false; + pub static MaxWinners: u32 = 100; } pub struct MockElection; impl frame_election_provider_support::ElectionProviderBase for MockElection { type AccountId = AccountId; type BlockNumber = BlockNumber; + type MaxWinners = MaxWinners; type DataProvider = Staking; type Error = (); +} +impl frame_election_provider_support::ElectionProvider for MockElection { fn ongoing() -> bool { Ongoing::get() } -} - -impl frame_election_provider_support::ElectionProvider for MockElection { - fn elect() -> Result, Self::Error> { + fn elect() -> Result, Self::Error> { Err(()) } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 573a74d2bfae8..1a97b1345fe5d 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -182,6 +182,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -203,7 +206,7 @@ impl pallet_staking::Config for Test { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/frame/nomination-pools/benchmarking/src/mock.rs b/frame/nomination-pools/benchmarking/src/mock.rs index db01989f2b563..6959aa9783ee5 100644 --- a/frame/nomination-pools/benchmarking/src/mock.rs +++ b/frame/nomination-pools/benchmarking/src/mock.rs @@ -110,7 +110,7 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/frame/nomination-pools/test-staking/src/mock.rs b/frame/nomination-pools/test-staking/src/mock.rs index 5758b884e348d..568dec7b3a340 100644 --- a/frame/nomination-pools/test-staking/src/mock.rs +++ b/frame/nomination-pools/test-staking/src/mock.rs @@ -124,7 +124,7 @@ impl pallet_staking::Config for Runtime { type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking)>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index c4c1d2aca8d07..e022d81c5b5bd 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -156,6 +156,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -177,7 +180,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs index 3f0a26afc1358..65bfcad4b26fc 100644 --- a/frame/root-offences/src/mock.rs +++ b/frame/root-offences/src/mock.rs @@ -145,6 +145,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } pub struct OnStakerSlashMock(core::marker::PhantomData); @@ -188,7 +191,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index f86ddfeedc08b..2db7eb385111c 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -150,6 +150,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } impl pallet_staking::Config for Test { @@ -171,7 +174,7 @@ impl pallet_staking::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = (); - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; type MaxUnlockingChunks = ConstU32<32>; type HistoryDepth = ConstU32<84>; diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index a0144463540be..0f5b8e0123ab6 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -333,6 +333,10 @@ macro_rules! log { }; } +/// Maximum number of winners (aka. active validators), as defined in the election provider of this +/// pallet. +pub type MaxWinnersOf = <::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners; + /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 24e5aa97160ee..16e4e5ddd7aa2 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -238,6 +238,7 @@ parameter_types! { pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub static MaxWinners: u32 = 100; } type VoterBagsListInstance = pallet_bags_list::Instance1; @@ -256,6 +257,9 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = MaxWinners; + type VotersBound = ConstU32<{ u32::MAX }>; + type TargetsBound = ConstU32<{ u32::MAX }>; } pub struct MockReward {} @@ -295,7 +299,7 @@ impl crate::pallet::pallet::Config for Test { type NextNewSession = Session; type MaxNominatorRewardedPerValidator = ConstU32<64>; type OffendingValidatorsThreshold = OffendingValidatorsThreshold; - type ElectionProvider = onchain::UnboundedExecution; + type ElectionProvider = onchain::OnChainExecution; type GenesisElectionProvider = Self::ElectionProvider; // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index a9e9899b9761a..9be01dd823104 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -18,15 +18,15 @@ //! Implementations for the Staking FRAME Pallet. use frame_election_provider_support::{ - data_provider, ElectionDataProvider, ElectionProvider, ElectionProviderBase, ScoreProvider, - SortedListProvider, Supports, VoteWeight, VoterOf, + data_provider, BoundedSupportsOf, ElectionDataProvider, ElectionProvider, ScoreProvider, + SortedListProvider, VoteWeight, VoterOf, }; use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ Currency, CurrencyToVote, Defensive, DefensiveResult, EstimateNextNewSession, Get, - Imbalance, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, + Imbalance, LockableCurrency, OnUnbalanced, TryCollect, UnixTime, WithdrawReasons, }, weights::Weight, }; @@ -44,7 +44,7 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; use crate::{ log, slashing, weights::WeightInfo, ActiveEraInfo, BalanceOf, EraPayout, Exposure, ExposureOf, - Forcing, IndividualExposure, Nominations, PositiveImbalanceOf, RewardDestination, + Forcing, IndividualExposure, MaxWinnersOf, Nominations, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, }; @@ -267,7 +267,10 @@ impl Pallet { } /// Plan a new session potentially trigger a new era. - fn new_session(session_index: SessionIndex, is_genesis: bool) -> Option> { + fn new_session( + session_index: SessionIndex, + is_genesis: bool, + ) -> Option>> { if let Some(current_era) = Self::current_era() { // Initial era has been set. let current_era_start_session_index = Self::eras_start_session_index(current_era) @@ -426,8 +429,11 @@ impl Pallet { /// Returns the new validator set. pub fn trigger_new_era( start_session_index: SessionIndex, - exposures: Vec<(T::AccountId, Exposure>)>, - ) -> Vec { + exposures: BoundedVec< + (T::AccountId, Exposure>), + MaxWinnersOf, + >, + ) -> BoundedVec> { // Increment or set current era. let new_planned_era = CurrentEra::::mutate(|s| { *s = Some(s.map(|s| s + 1).unwrap_or(0)); @@ -453,19 +459,26 @@ impl Pallet { pub(crate) fn try_trigger_new_era( start_session_index: SessionIndex, is_genesis: bool, - ) -> Option> { - let election_result = if is_genesis { - T::GenesisElectionProvider::elect().map_err(|e| { + ) -> Option>> { + let election_result: BoundedVec<_, MaxWinnersOf> = if is_genesis { + let result = ::elect().map_err(|e| { log!(warn, "genesis election provider failed due to {:?}", e); Self::deposit_event(Event::StakingElectionFailed); - }) + }); + + result + .ok()? + .into_inner() + .try_into() + // both bounds checked in integrity test to be equal + .defensive_unwrap_or_default() } else { - T::ElectionProvider::elect().map_err(|e| { + let result = ::elect().map_err(|e| { log!(warn, "election provider failed due to {:?}", e); Self::deposit_event(Event::StakingElectionFailed); - }) - } - .ok()?; + }); + result.ok()? + }; let exposures = Self::collect_exposures(election_result); if (exposures.len() as u32) < Self::minimum_validator_count().max(1) { @@ -502,10 +515,19 @@ impl Pallet { /// /// Store staking information for the new planned era pub fn store_stakers_info( - exposures: Vec<(T::AccountId, Exposure>)>, + exposures: BoundedVec< + (T::AccountId, Exposure>), + MaxWinnersOf, + >, new_planned_era: EraIndex, - ) -> Vec { - let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + ) -> BoundedVec> { + let elected_stashes: BoundedVec<_, MaxWinnersOf> = exposures + .iter() + .cloned() + .map(|(x, _)| x) + .collect::>() + .try_into() + .expect("since we only map through exposures, size of elected_stashes is always same as exposures; qed"); // Populate stakers, exposures, and the snapshot of validator prefs. let mut total_stake: BalanceOf = Zero::zero(); @@ -543,11 +565,11 @@ impl Pallet { elected_stashes } - /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a + /// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a /// [`Exposure`]. fn collect_exposures( - supports: Supports, - ) -> Vec<(T::AccountId, Exposure>)> { + supports: BoundedSupportsOf, + ) -> BoundedVec<(T::AccountId, Exposure>), MaxWinnersOf> { let total_issuance = T::Currency::total_issuance(); let to_currency = |e: frame_election_provider_support::ExtendedBalance| { T::CurrencyToVote::to_currency(e, total_issuance) @@ -576,7 +598,8 @@ impl Pallet { let exposure = Exposure { own, others, total }; (validator, exposure) }) - .collect::)>>() + .try_collect() + .expect("we only map through support vector which cannot change the size; qed") } /// Remove all associated data of a stash account from the staking system. @@ -1085,12 +1108,12 @@ impl pallet_session::SessionManager for Pallet { fn new_session(new_index: SessionIndex) -> Option> { log!(trace, "planning new session {}", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index, false) + Self::new_session(new_index, false).map(|v| v.into_inner()) } fn new_session_genesis(new_index: SessionIndex) -> Option> { log!(trace, "planning new session {} at genesis", new_index); CurrentPlannedSession::::put(new_index); - Self::new_session(new_index, true) + Self::new_session(new_index, true).map(|v| v.into_inner()) } fn start_session(start_index: SessionIndex) { log!(trace, "starting session {}", start_index); @@ -1500,7 +1523,7 @@ impl StakingInterface for Pallet { } fn election_ongoing() -> bool { - ::ongoing() + T::ElectionProvider::ongoing() } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { @@ -1626,6 +1649,12 @@ impl Pallet { Nominators::::count() + Validators::::count(), "wrong external count" ); + + ensure!( + ValidatorCount::::get() <= + ::MaxWinners::get(), + "validator count exceeded election max winners" + ); Ok(()) } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index df90873ab2e59..8fddba2150370 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -17,7 +17,9 @@ //! Staking FRAME Pallet. -use frame_election_provider_support::{SortedListProvider, VoteWeight}; +use frame_election_provider_support::{ + ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight, +}; use frame_support::{ dispatch::Codec, pallet_prelude::*, @@ -32,7 +34,7 @@ use frame_support::{ use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::{ traits::{CheckedSub, SaturatedConversion, StaticLookup, Zero}, - Perbill, Percent, + ArithmeticError, Perbill, Percent, }; use sp_staking::{EraIndex, SessionIndex}; use sp_std::prelude::*; @@ -107,7 +109,7 @@ pub mod pallet { type CurrencyToVote: CurrencyToVote>; /// Something that provides the election functionality. - type ElectionProvider: frame_election_provider_support::ElectionProvider< + type ElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, // we only accept an election provider that has staking as data provider. @@ -115,7 +117,7 @@ pub mod pallet { >; /// Something that provides the election functionality at genesis. - type GenesisElectionProvider: frame_election_provider_support::ElectionProvider< + type GenesisElectionProvider: ElectionProvider< AccountId = Self::AccountId, BlockNumber = Self::BlockNumber, DataProvider = Pallet, @@ -646,6 +648,10 @@ pub mod pallet { ), _ => Ok(()), }); + assert!( + ValidatorCount::::get() <= + ::MaxWinners::get() + ); } // all voters are reported to the `VoterList`. @@ -743,8 +749,8 @@ pub mod pallet { /// There are too many nominators in the system. Governance needs to adjust the staking /// settings to keep things safe for the runtime. TooManyNominators, - /// There are too many validators in the system. Governance needs to adjust the staking - /// settings to keep things safe for the runtime. + /// There are too many validator candidates in the system. Governance needs to adjust the + /// staking settings to keep things safe for the runtime. TooManyValidators, /// Commission is too low. Must be at least `MinCommission`. CommissionTooLow, @@ -782,6 +788,12 @@ pub mod pallet { // and that MaxNominations is always greater than 1, since we count on this. assert!(!T::MaxNominations::get().is_zero()); + // ensure election results are always bounded with the same value + assert!( + ::MaxWinners::get() == + ::MaxWinners::get() + ); + sp_std::if_std! { sp_io::TestExternalities::new_empty().execute_with(|| assert!( @@ -1264,11 +1276,18 @@ pub mod pallet { #[pallet::compact] new: u32, ) -> DispatchResult { ensure_root(origin)?; + // ensure new validator count does not exceed maximum winners + // support by election provider. + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); ValidatorCount::::put(new); Ok(()) } - /// Increments the ideal number of validators. + /// Increments the ideal number of validators upto maximum of + /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. /// @@ -1281,11 +1300,19 @@ pub mod pallet { #[pallet::compact] additional: u32, ) -> DispatchResult { ensure_root(origin)?; - ValidatorCount::::mutate(|n| *n += additional); + let old = ValidatorCount::::get(); + let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?; + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); + + ValidatorCount::::put(new); Ok(()) } - /// Scale up the ideal number of validators by a factor. + /// Scale up the ideal number of validators by a factor upto maximum of + /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. /// @@ -1295,7 +1322,15 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_validator_count())] pub fn scale_validator_count(origin: OriginFor, factor: Percent) -> DispatchResult { ensure_root(origin)?; - ValidatorCount::::mutate(|n| *n += factor * *n); + let old = ValidatorCount::::get(); + let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?; + + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); + + ValidatorCount::::put(new); Ok(()) } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 4812c105c0d80..6609b9087637d 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -5624,3 +5624,57 @@ fn reducing_max_unlocking_chunks_abrupt() { MaxUnlockingChunks::set(2); }) } + +#[test] +fn cannot_set_unsupported_validator_count() { + ExtBuilder::default().build_and_execute(|| { + MaxWinners::set(50); + // set validator count works + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30)); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50)); + // setting validator count above 100 does not work + assert_noop!( + Staking::set_validator_count(RuntimeOrigin::root(), 51), + Error::::TooManyValidators, + ); + }) +} + +#[test] +fn increase_validator_count_errors() { + ExtBuilder::default().build_and_execute(|| { + MaxWinners::set(50); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40)); + + // increase works + assert_ok!(Staking::increase_validator_count(RuntimeOrigin::root(), 6)); + assert_eq!(ValidatorCount::::get(), 46); + + // errors + assert_noop!( + Staking::increase_validator_count(RuntimeOrigin::root(), 5), + Error::::TooManyValidators, + ); + }) +} + +#[test] +fn scale_validator_count_errors() { + ExtBuilder::default().build_and_execute(|| { + MaxWinners::set(50); + assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20)); + + // scale value works + assert_ok!(Staking::scale_validator_count( + RuntimeOrigin::root(), + Percent::from_percent(200) + )); + assert_eq!(ValidatorCount::::get(), 40); + + // errors + assert_noop!( + Staking::scale_validator_count(RuntimeOrigin::root(), Percent::from_percent(126)), + Error::::TooManyValidators, + ); + }) +} diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 514ded67ad38b..d0c9ed18caddc 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -450,10 +450,10 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; -/// Same as `Supports` bounded by `MaxWinners`. +/// Same as `Supports` but bounded by `B`. /// /// To note, the inner `Support` is still unbounded. -pub type BoundedSupports = BoundedVec<(A, Support), MaxWinners>; +pub type BoundedSupports = BoundedVec<(A, Support), B>; /// Linkage from a winner to their [`Support`]. /// From c460f4be081eda7cacdf30feeb65f26a44a3a1b0 Mon Sep 17 00:00:00 2001 From: Qinxuan Chen Date: Wed, 9 Nov 2022 19:46:00 +0800 Subject: [PATCH 334/348] Update some dependencies to prune duplicated crates with different version (#12560) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * sc-client-babe/sp-arithmetic-fuzzer: update num-bigint and num-rational to v0.4 * update lru 0.7.5 ==> v0.8.1 * pallet-example-offchain-worker: update lite-json v0.1.3 ==> v0.2.0 * update hyper 0.14.16 ==> 0.14.20, num-fromat 0.4.0 ==> 0.4.3 * pallet-mmr: update ckb-merkle-mountain-range v0.3.2 ==> v0.5.2 * update handlebars v4.2.2 ==> v4.3.5 * `runtime_cache_size` must always be at least 1 Signed-off-by: koushiro * default cache size with .min(1) Signed-off-by: koushiro * update hyper 0.14.20 ==> 0.14.22 Signed-off-by: koushiro * update lru 0.8.0 ==> 0.8.1 Signed-off-by: koushiro * Apply suggestions from code review * Apply suggestions from code review * Fix Cargo.lock Signed-off-by: koushiro Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher --- Cargo.lock | 364 +++++++----------- client/consensus/babe/Cargo.toml | 9 +- client/consensus/babe/src/authorship.rs | 2 +- client/consensus/babe/src/tests.rs | 6 +- client/executor/Cargo.toml | 2 +- client/executor/src/wasm_runtime.rs | 11 +- client/network-gossip/Cargo.toml | 2 +- client/network-gossip/src/state_machine.rs | 8 +- client/network/Cargo.toml | 2 +- client/network/src/protocol.rs | 14 +- client/network/sync/Cargo.toml | 6 +- .../network/sync/src/block_request_handler.rs | 5 +- .../network/sync/src/state_request_handler.rs | 5 +- frame/examples/offchain-worker/Cargo.toml | 2 +- frame/merkle-mountain-range/Cargo.toml | 2 +- frame/merkle-mountain-range/src/mmr/mod.rs | 4 +- primitives/arithmetic/fuzzer/Cargo.toml | 2 +- primitives/blockchain/Cargo.toml | 2 +- primitives/blockchain/src/header_metadata.rs | 6 +- primitives/trie/Cargo.toml | 2 +- primitives/trie/src/cache/shared_cache.rs | 2 +- utils/frame/generate-bags/Cargo.toml | 2 +- 22 files changed, 191 insertions(+), 269 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bec6b0bd976e0..d65411de0125c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,7 +42,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", "opaque-debug 0.3.0", @@ -124,15 +124,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - [[package]] name = "arrayvec" version = "0.5.2" @@ -267,7 +258,7 @@ checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c" dependencies = [ "async-io", "blocking", - "cfg-if 1.0.0", + "cfg-if", "event-listener", "futures-lite", "libc", @@ -402,7 +393,7 @@ checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object 0.27.1", @@ -632,7 +623,7 @@ dependencies = [ "arrayref", "arrayvec 0.7.2", "cc", - "cfg-if 1.0.0", + "cfg-if", "constant_time_eq", ] @@ -851,12 +842,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -875,7 +860,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", "zeroize", @@ -945,11 +930,11 @@ dependencies = [ [[package]] name = "ckb-merkle-mountain-range" -version = "0.3.2" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" +checksum = "56ccb671c5921be8a84686e6212ca184cb1d7c51cadcdbfcbd1cc3f042f5dfb8" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", ] [[package]] @@ -1093,7 +1078,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "glob", ] @@ -1276,7 +1261,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1323,7 +1308,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -1333,7 +1318,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] @@ -1344,7 +1329,7 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", "lazy_static", "memoffset", @@ -1357,7 +1342,7 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "lazy_static", ] @@ -1671,7 +1656,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -1710,7 +1695,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ "byteorder", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -2048,7 +2033,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "winapi", @@ -2095,7 +2080,7 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crc32fast", "libc", "libz-sys", @@ -2150,7 +2135,7 @@ dependencies = [ "linregress", "log", "parity-scale-codec", - "paste 1.0.6", + "paste", "rusty-fork", "scale-info", "serde", @@ -2294,7 +2279,7 @@ version = "15.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", "serde", @@ -2315,7 +2300,7 @@ dependencies = [ "once_cell", "parity-scale-codec", "parity-util-mem", - "paste 1.0.6", + "paste", "pretty_assertions", "scale-info", "serde", @@ -2676,7 +2661,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -2689,7 +2674,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.10.0+wasi-snapshot-preview1", ] @@ -2809,16 +2794,16 @@ checksum = "62aca2aba2d62b4a7f5b33f3712cb1b0692779a56fb510499d5c0aa594daeaf3" [[package]] name = "handlebars" -version = "4.2.2" +version = "4.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d6a30320f094710245150395bc763ad23128d6a1ebbad7594dc4164b62c56b" +checksum = "433e4ab33f1213cdc25b5fa45c76881240cfe79284cf2b395e8b9e312a30a2fd" dependencies = [ "log", "pest", "pest_derive", - "quick-error 2.0.0", "serde", "serde_json", + "thiserror", ] [[package]] @@ -2945,7 +2930,7 @@ checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa 1.0.4", ] [[package]] @@ -2961,9 +2946,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2979,9 +2964,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" dependencies = [ "bytes", "futures-channel", @@ -2992,7 +2977,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa 1.0.4", "pin-project-lite 0.2.6", "socket2", "tokio", @@ -3107,7 +3092,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -3166,9 +3151,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" @@ -3338,7 +3323,7 @@ version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3636d281d46c3b64182eb3a0a42b7b483191a2ecc3f05301fa67403f7c9bc949" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa", "elliptic-curve", "sha2 0.10.2", @@ -3542,7 +3527,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -3646,7 +3631,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "lru 0.8.1", + "lru", "prost", "prost-build", "prost-codec", @@ -4002,20 +3987,20 @@ checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" [[package]] name = "lite-json" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0460d985423a026b4d9b828a7c6eed1bcf606f476322f3f9b507529686a61715" +checksum = "cd0e787ffe1153141a0f6f6d759fdf1cc34b1226e088444523812fd412a5cca2" dependencies = [ "lite-parser", ] [[package]] name = "lite-parser" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c50092e40e0ccd1bf2015a10333fde0502ff95b832b0895dc1ca0d7ac6c52f6" +checksum = "c3d5f9dc37c52d889a21fd701983d02bb6a84f852c5140a6c80ef4557f7dc29e" dependencies = [ - "paste 0.1.18", + "paste", ] [[package]] @@ -4033,7 +4018,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "value-bag", ] @@ -4058,15 +4043,6 @@ dependencies = [ "syn", ] -[[package]] -name = "lru" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32613e41de4c47ab04970c348ca7ae7382cf116625755af070b008a15516a889" -dependencies = [ - "hashbrown 0.11.2", -] - [[package]] name = "lru" version = "0.8.1" @@ -4114,12 +4090,6 @@ dependencies = [ "libc", ] -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - [[package]] name = "match_cfg" version = "0.1.0" @@ -4256,7 +4226,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -4271,7 +4241,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "proc-macro2", "quote", "syn", @@ -4373,7 +4343,7 @@ dependencies = [ "matrixmultiply", "nalgebra-macros", "num-complex", - "num-rational 0.4.0", + "num-rational", "num-traits", "rand 0.8.5", "rand_distr", @@ -4435,7 +4405,7 @@ checksum = "25af9cf0dc55498b7bd94a1508af7a78706aa0ab715a73c5169273e03c84845e" dependencies = [ "anyhow", "byteorder", - "paste 1.0.6", + "paste", "thiserror", ] @@ -4475,7 +4445,7 @@ checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6" dependencies = [ "bitflags", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "memoffset", ] @@ -4487,7 +4457,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", - "cfg-if 1.0.0", + "cfg-if", "libc", ] @@ -4828,12 +4798,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - [[package]] name = "nohash-hasher" version = "0.2.0" @@ -4857,17 +4821,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.4.3" @@ -4890,12 +4843,12 @@ dependencies = [ [[package]] name = "num-format" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +checksum = "54b862ff8df690cf089058c98b183676a7ed0f974cc08b426800093227cbff3b" dependencies = [ - "arrayvec 0.4.12", - "itoa 0.4.8", + "arrayvec 0.7.2", + "itoa 1.0.4", ] [[package]] @@ -4910,24 +4863,12 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint 0.2.6", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", - "num-bigint 0.4.3", + "num-bigint", "num-integer", "num-traits", ] @@ -6488,7 +6429,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown 0.12.3", "impl-trait-for-tuples", "parity-util-mem-derive", @@ -6548,7 +6489,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall", @@ -6562,7 +6503,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -6571,28 +6512,9 @@ dependencies = [ [[package]] name = "paste" -version = "0.1.18" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" - -[[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = [ - "proc-macro-hack", -] +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pbkdf2" @@ -6626,18 +6548,19 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pest" -version = "2.1.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" dependencies = [ + "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.1.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +checksum = "60b75706b9642ebcb34dab3bc7750f811609a0eb1dd8b88c2d15bf628c1c65b2" dependencies = [ "pest", "pest_generator", @@ -6645,9 +6568,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.1.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +checksum = "f4f9272122f5979a6511a749af9db9bfc810393f63119970d7085fed1c4ea0db" dependencies = [ "pest", "pest_meta", @@ -6658,13 +6581,13 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.1.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +checksum = "4c8717927f9b79515e565a64fe46c38b8cd0427e64c40680b14a7365ab09ac8d" dependencies = [ - "maplit", + "once_cell", "pest", - "sha-1 0.8.2", + "sha1", ] [[package]] @@ -6767,14 +6690,15 @@ dependencies = [ [[package]] name = "polling" -version = "2.0.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" +checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2" dependencies = [ - "cfg-if 0.1.10", + "autocfg", + "cfg-if", "libc", "log", - "wepoll-sys", + "wepoll-ffi", "winapi", ] @@ -6795,7 +6719,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug 0.3.0", "universal-hash", @@ -6896,12 +6820,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - [[package]] name = "proc-macro2" version = "1.0.46" @@ -6917,7 +6835,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fnv", "lazy_static", "memchr", @@ -6932,7 +6850,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" dependencies = [ "dtoa", - "itoa 1.0.1", + "itoa 1.0.4", "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] @@ -7049,12 +6967,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" - [[package]] name = "quickcheck" version = "1.0.3" @@ -7366,7 +7278,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -7551,7 +7463,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error 1.2.3", + "quick-error", "tempfile", ] @@ -7867,12 +7779,11 @@ dependencies = [ "futures", "log", "merlin", - "num-bigint 0.2.6", - "num-rational 0.2.4", + "num-bigint", + "num-rational", "num-traits", "parity-scale-codec", "parking_lot 0.12.1", - "rand 0.7.3", "rand_chacha 0.2.2", "sc-block-builder", "sc-client-api", @@ -8050,11 +7961,11 @@ dependencies = [ "criterion", "env_logger", "lazy_static", - "lru 0.7.5", + "lru", "num_cpus", "parity-scale-codec", "parking_lot 0.12.1", - "paste 1.0.6", + "paste", "regex", "sc-executor-common", "sc-executor-wasmi", @@ -8117,13 +8028,13 @@ dependencies = [ name = "sc-executor-wasmtime" version = "0.10.0-dev" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "once_cell", "parity-scale-codec", "parity-wasm", - "paste 1.0.6", + "paste", "rustix", "sc-allocator", "sc-executor-common", @@ -8262,7 +8173,7 @@ dependencies = [ "linked-hash-map", "linked_hash_set", "log", - "lru 0.7.5", + "lru", "parity-scale-codec", "parking_lot 0.12.1", "pin-project", @@ -8356,7 +8267,7 @@ dependencies = [ "futures-timer", "libp2p", "log", - "lru 0.7.5", + "lru", "quickcheck", "sc-network-common", "sc-peerset", @@ -8396,7 +8307,7 @@ dependencies = [ "futures", "libp2p", "log", - "lru 0.7.5", + "lru", "mockall", "parity-scale-codec", "prost", @@ -8625,7 +8536,7 @@ dependencies = [ name = "sc-runtime-test" version = "2.0.0" dependencies = [ - "paste 1.0.6", + "paste", "sp-core", "sp-io", "sp-runtime", @@ -8920,7 +8831,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8980cafbe98a7ee7a9cc16b32ebce542c77883f512d83fbf2ddc8f6a85ea74c9" dependencies = [ "bitvec", - "cfg-if 1.0.0", + "cfg-if", "derive_more", "parity-scale-codec", "scale-info-derive", @@ -9137,7 +9048,7 @@ version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.4", "ryu", "serde", ] @@ -9151,18 +9062,6 @@ dependencies = [ "serde", ] -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha-1" version = "0.9.4" @@ -9170,12 +9069,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", ] +[[package]] +name = "sha1" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006769ba83e921b3085caa8334186b00cf92b4cb1a6cf4632fbccc8eff5c7549" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.3", +] + [[package]] name = "sha2" version = "0.8.2" @@ -9195,7 +9105,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug 0.3.0", @@ -9207,7 +9117,7 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.3", ] @@ -9275,7 +9185,7 @@ dependencies = [ "approx", "num-complex", "num-traits", - "paste 1.0.6", + "paste", ] [[package]] @@ -9342,7 +9252,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", - "sha-1 0.9.4", + "sha-1", ] [[package]] @@ -9442,7 +9352,7 @@ name = "sp-arithmetic-fuzzer" version = "2.0.0" dependencies = [ "honggfuzz", - "num-bigint 0.2.6", + "num-bigint", "primitive-types", "sp-arithmetic", ] @@ -9487,7 +9397,7 @@ version = "4.0.0-dev" dependencies = [ "futures", "log", - "lru 0.7.5", + "lru", "parity-scale-codec", "parking_lot 0.12.1", "sp-api", @@ -9865,7 +9775,7 @@ dependencies = [ "log", "parity-scale-codec", "parity-util-mem", - "paste 1.0.6", + "paste", "rand 0.7.3", "scale-info", "serde", @@ -10128,7 +10038,7 @@ dependencies = [ "hash-db", "hashbrown 0.12.3", "lazy_static", - "lru 0.7.5", + "lru", "memory-db", "nohash-hasher", "parity-scale-codec", @@ -10472,7 +10382,7 @@ version = "2.0.0" dependencies = [ "beefy-merkle-tree", "beefy-primitives", - "cfg-if 1.0.0", + "cfg-if", "frame-support", "frame-system", "frame-system-rpc-runtime-api", @@ -10661,7 +10571,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "libc", "redox_syscall", @@ -10908,7 +10818,7 @@ version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite 0.2.6", "tracing-attributes", @@ -11051,7 +10961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", @@ -11074,7 +10984,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", @@ -11148,7 +11058,7 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -11363,7 +11273,7 @@ version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -11388,7 +11298,7 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -11494,7 +11404,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f727a39e7161f7438ddb8eafe571b67c576a8c2fb459f666d9053b5bba4afdea" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "indexmap", "js-sys", "loupe", @@ -11614,7 +11524,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccd7fdc60e252a795c849b3f78a81a134783051407e7e279c10b7019139ef8dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "enum-iterator", "enumset", "leb128", @@ -11639,7 +11549,7 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcff0cd2c01a8de6009fd863b14ea883132a468a24f2d2ee59dc34453d3a31b5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "enum-iterator", "enumset", "leb128", @@ -11686,7 +11596,7 @@ checksum = "afdc46158517c2769f9938bc222a7d41b3bb330824196279d8aa2d667cd40641" dependencies = [ "backtrace", "cc", - "cfg-if 1.0.0", + "cfg-if", "enum-iterator", "indexmap", "libc", @@ -11730,7 +11640,7 @@ dependencies = [ "downcast-rs", "libm", "memory_units", - "num-rational 0.4.0", + "num-rational", "num-traits", ] @@ -11757,13 +11667,13 @@ checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" dependencies = [ "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "indexmap", "libc", "log", "object 0.29.0", "once_cell", - "paste 1.0.6", + "paste", "psm", "rayon", "serde", @@ -11783,7 +11693,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -11855,7 +11765,7 @@ dependencies = [ "addr2line", "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "cpp_demangle", "gimli 0.26.1", "log", @@ -11890,14 +11800,14 @@ checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" dependencies = [ "anyhow", "cc", - "cfg-if 1.0.0", + "cfg-if", "indexmap", "libc", "log", "mach", "memfd", "memoffset", - "paste 1.0.6", + "paste", "rand 0.8.5", "rustix", "thiserror", @@ -11967,10 +11877,10 @@ dependencies = [ ] [[package]] -name = "wepoll-sys" -version = "3.0.1" +name = "wepoll-ffi" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" +checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" dependencies = [ "cc", ] diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 54da2d6e49e39..6eefc60552388 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -15,17 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-trait = "0.1.57" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ - "derive", -] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" log = "0.4.17" merlin = "2.0" -num-bigint = "0.2.3" -num-rational = "0.2.2" +num-bigint = "0.4.3" +num-rational = "0.4.1" num-traits = "0.2.8" parking_lot = "0.12.1" -rand = "0.7.2" schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 896bfaeda1dc9..b39153faa6d1a 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -85,7 +85,7 @@ pub(super) fn calculate_primary_threshold( qed.", ); - ((BigUint::one() << 128) * numer / denom).to_u128().expect( + ((BigUint::one() << 128usize) * numer / denom).to_u128().expect( "returns None if the underlying value cannot be represented with 128 bits; \ we start with 2^128 which is one more than can be represented with 128 bits; \ we multiple by p which is defined in [0, 1); \ diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index a2886663b2c8f..8bef1b38b929d 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -22,8 +22,10 @@ use super::*; use authorship::claim_slot; use futures::executor::block_on; use log::debug; -use rand::RngCore; -use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents, Finalizer}; use sc_consensus::{BoxBlockImport, BoxJustificationImport}; diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index bda2992392707..1a6c281c77ff5 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] lazy_static = "1.4.0" -lru = "0.7.5" +lru = "0.8.1" parking_lot = "0.12.1" tracing = "0.1.29" wasmi = "0.13" diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 1dee739c50f9e..991802340db61 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -32,6 +32,7 @@ use sc_executor_common::{ use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; use sp_version::RuntimeVersion; use std::{ + num::NonZeroUsize, panic::AssertUnwindSafe, path::{Path, PathBuf}, sync::Arc, @@ -179,17 +180,15 @@ impl RuntimeCache { /// for caching. /// /// `runtime_cache_size` specifies the number of different runtimes versions preserved in an - /// in-memory cache. + /// in-memory cache, must always be at least 1. pub fn new( max_runtime_instances: usize, cache_path: Option, runtime_cache_size: u8, ) -> RuntimeCache { - RuntimeCache { - runtimes: Mutex::new(LruCache::new(runtime_cache_size.into())), - max_runtime_instances, - cache_path, - } + let cap = + NonZeroUsize::new(runtime_cache_size.max(1) as usize).expect("cache size is not zero"); + RuntimeCache { runtimes: Mutex::new(LruCache::new(cap)), max_runtime_instances, cache_path } } /// Prepares a WASM module instance and executes given function for it. diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index e84013fbfe436..95c281456396d 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" futures-timer = "3.0.1" libp2p = { version = "0.49.0", default-features = false } log = "0.4.17" -lru = "0.7.5" +lru = "0.8.1" tracing = "0.1.29" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 600383cf5f70d..001f2c6136a00 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -24,7 +24,7 @@ use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::protocol::{role::ObservedRole, ProtocolName}; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; +use std::{collections::HashMap, iter, num::NonZeroUsize, sync::Arc, time, time::Instant}; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 // NOTE: The current value is adjusted based on largest production network deployment (Kusama) and @@ -180,7 +180,11 @@ impl ConsensusGossip { ConsensusGossip { peers: HashMap::new(), messages: Default::default(), - known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), + known_messages: { + let cap = NonZeroUsize::new(KNOWN_MESSAGES_CACHE_SIZE) + .expect("cache capacity is not zero"); + LruCache::new(cap) + }, protocol, validator, next_broadcast: Instant::now() + REBROADCAST_INTERVAL, diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 81b684af433d2..4637a2a5105e5 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -30,7 +30,7 @@ libp2p = { version = "0.49.0", features = ["async-std", "dns", "identify", "kad" linked_hash_set = "0.1.3" linked-hash-map = "0.5.4" log = "0.4.17" -lru = "0.7.5" +lru = "0.8.1" parking_lot = "0.12.1" pin-project = "1.0.12" prost = "0.11" diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index a04dba79f7fd2..63d060f423773 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -31,6 +31,7 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, log, trace, warn, Level}; +use lru::LruCache; use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; @@ -200,7 +201,7 @@ pub struct Protocol { /// The `PeerId`'s of all boot nodes. boot_node_ids: HashSet, /// A cache for the data that was associated to a block announcement. - block_announce_data_cache: lru::LruCache>, + block_announce_data_cache: LruCache>, } #[derive(Debug)] @@ -356,10 +357,13 @@ where ) }; - let block_announce_data_cache = lru::LruCache::new( - network_config.default_peers_set.in_peers as usize + - network_config.default_peers_set.out_peers as usize, - ); + let cache_capacity = NonZeroUsize::new( + (network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize) + .max(1), + ) + .expect("cache capacity is not zero"); + let block_announce_data_cache = LruCache::new(cache_capacity); let protocol = Self { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 441b02fb22b8a..bcd6cf10275fe 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -18,13 +18,11 @@ prost-build = "0.11" [dependencies] array-bytes = "4.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ - "derive", -] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" libp2p = "0.49.0" log = "0.4.17" -lru = "0.7.5" +lru = "0.8.1" mockall = "0.11.2" prost = "0.11" smallvec = "1.8.0" diff --git a/client/network/sync/src/block_request_handler.rs b/client/network/sync/src/block_request_handler.rs index 467d898489b61..b5f8b6b73bce9 100644 --- a/client/network/sync/src/block_request_handler.rs +++ b/client/network/sync/src/block_request_handler.rs @@ -41,6 +41,7 @@ use sp_runtime::{ use std::{ cmp::min, hash::{Hash, Hasher}, + num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -164,7 +165,9 @@ where ); protocol_config.inbound_queue = Some(tx); - let seen_requests = LruCache::new(num_peer_hint * 2); + let capacity = + NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); + let seen_requests = LruCache::new(capacity); (Self { client, request_receiver, seen_requests }, protocol_config) } diff --git a/client/network/sync/src/state_request_handler.rs b/client/network/sync/src/state_request_handler.rs index 441400ef439b7..98f1ed34d0581 100644 --- a/client/network/sync/src/state_request_handler.rs +++ b/client/network/sync/src/state_request_handler.rs @@ -35,6 +35,7 @@ use sc_network_common::{ use sp_runtime::traits::Block as BlockT; use std::{ hash::{Hash, Hasher}, + num::NonZeroUsize, sync::Arc, time::Duration, }; @@ -144,7 +145,9 @@ where ); protocol_config.inbound_queue = Some(tx); - let seen_requests = LruCache::new(num_peer_hint * 2); + let capacity = + NonZeroUsize::new(num_peer_hint.max(1) * 2).expect("cache capacity is not zero"); + let seen_requests = LruCache::new(capacity); (Self { client, request_receiver, seen_requests }, protocol_config) } diff --git a/frame/examples/offchain-worker/Cargo.toml b/frame/examples/offchain-worker/Cargo.toml index e63b82757c030..bc5c0ac036021 100644 --- a/frame/examples/offchain-worker/Cargo.toml +++ b/frame/examples/offchain-worker/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -lite-json = { version = "0.1", default-features = false } +lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 91ec19d731094..9a3ee517e7d42 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.3.2", default-features = false } +mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } frame-benchmarking = { version = "4.0.0-dev", default-features = false, optional = true, path = "../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/merkle-mountain-range/src/mmr/mod.rs b/frame/merkle-mountain-range/src/mmr/mod.rs index 04fdfa199e72b..19fb7b34382bd 100644 --- a/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/frame/merkle-mountain-range/src/mmr/mod.rs @@ -36,10 +36,10 @@ pub struct Hasher(sp_std::marker::PhantomData<(H, L)>); impl mmr_lib::Merge for Hasher { type Item = Node; - fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { + fn merge(left: &Self::Item, right: &Self::Item) -> mmr_lib::Result { let mut concat = left.hash().as_ref().to_vec(); concat.extend_from_slice(right.hash().as_ref()); - Node::Hash(::hash(&concat)) + Ok(Node::Hash(::hash(&concat))) } } diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index b1ffa746b6b63..f39e59034dcd0 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5.49" -num-bigint = "0.2" +num-bigint = "0.4.3" primitive-types = "0.12.0" sp-arithmetic = { version = "5.0.0", path = ".." } diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 4d4718b4038d4..454bc4d7c1103 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.21" log = "0.4.17" -lru = "0.7.5" +lru = "0.8.1" parking_lot = "0.12.1" thiserror = "1.0.30" sp-api = { version = "4.0.0-dev", path = "../api" } diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 0ced264d5c786..1db37b47e4d44 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -21,6 +21,7 @@ use lru::LruCache; use parking_lot::RwLock; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One}; +use std::num::NonZeroUsize; /// Set to the expected max difference between `best` and `finalized` blocks at sync. const LRU_CACHE_SIZE: usize = 5_000; @@ -239,14 +240,15 @@ pub struct HeaderMetadataCache { impl HeaderMetadataCache { /// Creates a new LRU header metadata cache with `capacity`. - pub fn new(capacity: usize) -> Self { + pub fn new(capacity: NonZeroUsize) -> Self { HeaderMetadataCache { cache: RwLock::new(LruCache::new(capacity)) } } } impl Default for HeaderMetadataCache { fn default() -> Self { - HeaderMetadataCache { cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)) } + let cap = NonZeroUsize::new(LRU_CACHE_SIZE).expect("cache capacity is not zero"); + HeaderMetadataCache { cache: RwLock::new(LruCache::new(cap)) } } } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 2636648f40387..211e071c073af 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } lazy_static = { version = "1.4.0", optional = true } -lru = { version = "0.7.5", optional = true } +lru = { version = "0.8.1", optional = true } memory-db = { version = "0.30.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } diff --git a/primitives/trie/src/cache/shared_cache.rs b/primitives/trie/src/cache/shared_cache.rs index abac8c9f946ca..9d4d36b83a28a 100644 --- a/primitives/trie/src/cache/shared_cache.rs +++ b/primitives/trie/src/cache/shared_cache.rs @@ -35,7 +35,7 @@ lazy_static::lazy_static! { } /// No hashing [`LruCache`]. -type NoHashingLruCache = lru::LruCache>; +type NoHashingLruCache = LruCache>; /// The shared node cache. /// diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index 18f668485114b..b8ad97cc6b6fa 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -22,4 +22,4 @@ sp-io = { version = "6.0.0", path = "../../../primitives/io" } # third party chrono = { version = "0.4.19" } git2 = { version = "0.14.2", default-features = false } -num-format = { version = "0.4.0" } +num-format = "0.4.3" From a6da808575fe403ab2bd7f6cd009896cdc6fd71a Mon Sep 17 00:00:00 2001 From: Robert Hambrock Date: Wed, 9 Nov 2022 15:41:32 +0100 Subject: [PATCH 335/348] Consolidate and deduplicate MMR API methods (#12530) * histor. batch proof: make best block arg optional * correct testing range * make generate_batch_proof stub for historical * merge generate_{historical_}batch_proof functions * merge generate_{batch_}proof functions * merge verify_{batch_}proof functions * merge verify_{batch_}proof_stateless functions * remove {Leaf}Proof Not utilized by API anymore, so superfluous. Removal consistent with prior changes to just use "batch" proof API. * rename BatchProof->Proof no need to qualify if only one universal proof type. * cleanup * expose verify_proof rpc api * document verify_proof * expose verify_proof_stateless rpc api * add optional BlockHash to mmr_root rpc api * fixup! expose verify_proof rpc api * fix documentation phrasing Co-authored-by: Adrian Catangiu * documentation grammar Co-authored-by: Adrian Catangiu * define mmr error msgs together with error enum Co-authored-by: Serban Iorga * fixup! define mmr error msgs together with error enum * map decoding errors to CallError::InvalidParams Co-authored-by: Serban Iorga * fixup! map decoding errors to CallError::InvalidParams Co-authored-by: Adrian Catangiu Co-authored-by: parity-processbot <> Co-authored-by: Serban Iorga --- Cargo.lock | 2 + bin/node/runtime/src/lib.rs | 58 +--- client/beefy/src/tests.rs | 36 +-- frame/beefy-mmr/primitives/src/lib.rs | 2 +- frame/merkle-mountain-range/rpc/Cargo.toml | 1 + frame/merkle-mountain-range/rpc/src/lib.rs | 287 +++++++++--------- frame/merkle-mountain-range/src/lib.rs | 50 ++- frame/merkle-mountain-range/src/mmr/mmr.rs | 16 +- .../merkle-mountain-range/src/mmr/storage.rs | 2 +- frame/merkle-mountain-range/src/tests.rs | 82 +++-- primitives/beefy/src/mmr.rs | 2 +- primitives/merkle-mountain-range/Cargo.toml | 1 + primitives/merkle-mountain-range/src/lib.rs | 97 ++---- 13 files changed, 249 insertions(+), 387 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d65411de0125c..a959bc42def38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5667,6 +5667,7 @@ dependencies = [ name = "pallet-mmr-rpc" version = "3.0.0" dependencies = [ + "anyhow", "jsonrpsee", "parity-scale-codec", "serde", @@ -9707,6 +9708,7 @@ dependencies = [ "sp-debug-derive", "sp-runtime", "sp-std", + "thiserror", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d94a31422be8b..270386be2bbd3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2055,59 +2055,15 @@ impl_runtime_apis! { mmr::Hash, BlockNumber, > for Runtime { - fn generate_proof(block_number: BlockNumber) - -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> - { - Mmr::generate_batch_proof(vec![block_number]).and_then(|(leaves, proof)| - Ok(( - mmr::EncodableOpaqueLeaf::from_leaf(&leaves[0]), - mmr::BatchProof::into_single_leaf_proof(proof)? - )) - ) - } - - fn verify_proof(leaf: mmr::EncodableOpaqueLeaf, proof: mmr::Proof) - -> Result<(), mmr::Error> - { - let leaf: mmr::Leaf = leaf - .into_opaque_leaf() - .try_decode() - .ok_or(mmr::Error::Verify)?; - Mmr::verify_leaves(vec![leaf], mmr::Proof::into_batch_proof(proof)) - } - - fn verify_proof_stateless( - root: mmr::Hash, - leaf: mmr::EncodableOpaqueLeaf, - proof: mmr::Proof - ) -> Result<(), mmr::Error> { - let node = mmr::DataOrHash::Data(leaf.into_opaque_leaf()); - pallet_mmr::verify_leaves_proof::(root, vec![node], mmr::Proof::into_batch_proof(proof)) - } - fn mmr_root() -> Result { Ok(Mmr::mmr_root()) } - fn generate_batch_proof( - block_numbers: Vec, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_batch_proof(block_numbers).map(|(leaves, proof)| { - ( - leaves - .into_iter() - .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) - .collect(), - proof, - ) - }) - } - - fn generate_historical_batch_proof( + fn generate_proof( block_numbers: Vec, - best_known_block_number: BlockNumber, - ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { - Mmr::generate_historical_batch_proof(block_numbers, best_known_block_number).map( + best_known_block_number: Option, + ) -> Result<(Vec, mmr::Proof), mmr::Error> { + Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( leaves @@ -2120,7 +2076,7 @@ impl_runtime_apis! { ) } - fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) + fn verify_proof(leaves: Vec, proof: mmr::Proof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2130,10 +2086,10 @@ impl_runtime_apis! { Mmr::verify_leaves(leaves, proof) } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::BatchProof + proof: mmr::Proof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 2f0306209071e..1d5da4aaefba3 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -43,7 +43,7 @@ use beefy_primitives::{ KEY_TYPE as BeefyKeyType, }; use sc_network::{config::RequestResponseConfig, ProtocolName}; -use sp_mmr_primitives::{BatchProof, EncodableOpaqueLeaf, Error as MmrError, MmrApi, Proof}; +use sp_mmr_primitives::{EncodableOpaqueLeaf, Error as MmrError, MmrApi, Proof}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_consensus::BlockOrigin; @@ -246,47 +246,25 @@ macro_rules! create_test_api { } impl MmrApi> for RuntimeApi { - fn generate_proof(_block_number: u64) - -> Result<(EncodableOpaqueLeaf, Proof), MmrError> { - unimplemented!() - } - - fn verify_proof(_leaf: EncodableOpaqueLeaf, _proof: Proof) - -> Result<(), MmrError> { - unimplemented!() - } - - fn verify_proof_stateless( - _root: MmrRootHash, - _leaf: EncodableOpaqueLeaf, - _proof: Proof - ) -> Result<(), MmrError> { - unimplemented!() - } - fn mmr_root() -> Result { Ok($mmr_root) } - fn generate_batch_proof(_block_numbers: Vec) -> Result<(Vec, BatchProof), MmrError> { - unimplemented!() - } - - fn generate_historical_batch_proof( + fn generate_proof( _block_numbers: Vec, - _best_known_block_number: u64 - ) -> Result<(Vec, BatchProof), MmrError> { + _best_known_block_number: Option + ) -> Result<(Vec, Proof), MmrError> { unimplemented!() } - fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { + fn verify_proof(_leaves: Vec, _proof: Proof) -> Result<(), MmrError> { unimplemented!() } - fn verify_batch_proof_stateless( + fn verify_proof_stateless( _root: MmrRootHash, _leaves: Vec, - _proof: BatchProof + _proof: Proof ) -> Result<(), MmrError> { unimplemented!() } diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index f56be8bcafe5b..f88fb89acaaab 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -29,7 +29,7 @@ //! Inner nodes are created by concatenating child hashes and hashing again. The implementation //! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! -//! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. +//! If the number of leaves is not even, last leaf (hash of) is promoted to the upper layer. pub use sp_runtime::traits::Keccak256; use sp_runtime::{app_crypto::sp_core, sp_std, traits::Hash as HashT}; diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 3da6678a39bf2..eb2e1e8b53d9e 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -21,6 +21,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-mmr-primitives = { version = "4.0.0-dev", path = "../../../primitives/merkle-mountain-range" } sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } +anyhow = "1" [dev-dependencies] serde_json = "1.0.85" diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index ffc7ac2da56bf..8476d82f3e70d 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -22,7 +22,7 @@ use std::{marker::PhantomData, sync::Arc}; -use codec::{Codec, Encode}; +use codec::{Codec, Decode, Encode}; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, @@ -33,58 +33,33 @@ use serde::{Deserialize, Serialize}; use sp_api::{NumberFor, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_mmr_primitives::{BatchProof, Error as MmrError, Proof}; +use sp_mmr_primitives::{Error as MmrError, Proof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; const RUNTIME_ERROR: i32 = 8000; const MMR_ERROR: i32 = 8010; -const LEAF_NOT_FOUND_ERROR: i32 = MMR_ERROR + 1; -const GENERATE_PROOF_ERROR: i32 = MMR_ERROR + 2; - -/// Retrieved MMR leaf and its proof. -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct LeafProof { - /// Block hash the proof was generated for. - pub block_hash: BlockHash, - /// SCALE-encoded leaf data. - pub leaf: Bytes, - /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. - pub proof: Bytes, -} - -impl LeafProof { - /// Create new `LeafProof` from given concrete `leaf` and `proof`. - pub fn new(block_hash: BlockHash, leaf: Leaf, proof: Proof) -> Self - where - Leaf: Encode, - MmrHash: Encode, - { - Self { block_hash, leaf: Bytes(leaf.encode()), proof: Bytes(proof.encode()) } - } -} /// Retrieved MMR leaves and their proof. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] -pub struct LeafBatchProof { +pub struct LeavesProof { /// Block hash the proof was generated for. pub block_hash: BlockHash, /// SCALE-encoded vector of `LeafData`. pub leaves: Bytes, - /// SCALE-encoded proof data. See [sp_mmr_primitives::BatchProof]. + /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. pub proof: Bytes, } -impl LeafBatchProof { - /// Create new `LeafBatchProof` from a given vector of `Leaf` and a - /// [sp_mmr_primitives::BatchProof]. +impl LeavesProof { + /// Create new `LeavesProof` from a given vector of `Leaf` and a + /// [sp_mmr_primitives::Proof]. pub fn new( block_hash: BlockHash, leaves: Vec, - proof: BatchProof, + proof: Proof, ) -> Self where Leaf: Encode, @@ -96,63 +71,59 @@ impl LeafBatchProof { /// MMR RPC methods. #[rpc(client, server)] -pub trait MmrApi { - /// Generate MMR proof for given block number. - /// - /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for a block with a specified `block_number`. - /// Optionally, a block hash at which the runtime should be queried can be specified. - /// - /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of - /// the leaf). Both parameters are SCALE-encoded. - #[method(name = "mmr_generateProof")] - fn generate_proof( - &self, - block_number: BlockNumber, - at: Option, - ) -> RpcResult>; +pub trait MmrApi { + /// Get the MMR root hash for the current best block. + #[method(name = "mmr_root")] + fn mmr_root(&self, at: Option) -> RpcResult; - /// Generate MMR proof for the given block numbers. + /// Generate an MMR proof for the given `block_numbers`. /// /// This method calls into a runtime with MMR pallet included and attempts to generate - /// MMR proof for a set of blocks with the specific `block_numbers`. - /// Optionally, a block hash at which the runtime should be queried can be specified. + /// an MMR proof for the set of blocks that have the given `block_numbers` with the MMR root at + /// `best_known_block_number`. `best_known_block_number` must be larger than all the + /// `block_numbers` for the function to succeed. + /// + /// Optionally via `at`, a block hash at which the runtime should be queried can be specified. + /// Optionally via `best_known_block_number`, the proof can be generated using the MMR's state + /// at a specific best block. Note that if `best_known_block_number` is provided, then also + /// specifying the block hash via `at` isn't super-useful here, unless you're generating proof + /// using non-finalized blocks where there are several competing forks. That's because MMR state + /// will be fixed to the state with `best_known_block_number`, which already points to + /// some historical block. /// - /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// Returns the (full) leaves and a proof for these leaves (compact encoding, i.e. hash of /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct /// is the same as the order of the entries in `block_numbers` supplied - #[method(name = "mmr_generateBatchProof")] - fn generate_batch_proof( + #[method(name = "mmr_generateProof")] + fn generate_proof( &self, block_numbers: Vec, + best_known_block_number: Option, at: Option, - ) -> RpcResult>; + ) -> RpcResult>; - /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. + /// Verify an MMR `proof`. /// - /// This method calls into a runtime with MMR pallet included and attempts to generate - /// a MMR proof for the set of blocks that have the given `block_numbers` with MMR given the - /// `best_known_block_number`. `best_known_block_number` must be larger than all the - /// `block_numbers` for the function to succeed. + /// This method calls into a runtime with MMR pallet included and attempts to verify + /// an MMR proof. /// - /// Optionally, a block hash at which the runtime should be queried can be specified. - /// Note that specifying the block hash isn't super-useful here, unless you're generating - /// proof using non-finalized blocks where there are several competing forks. That's because - /// MMR state will be fixed to the state with `best_known_block_number`, which already points to - /// some historical block. + /// Returns `true` if the proof is valid, else returns the verification error. + #[method(name = "mmr_verifyProof")] + fn verify_proof(&self, proof: LeavesProof) -> RpcResult; + + /// Verify an MMR `proof` statelessly given an `mmr_root`. /// - /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of - /// the leaves). Both parameters are SCALE-encoded. - /// The order of entries in the `leaves` field of the returned struct - /// is the same as the order of the entries in `block_numbers` supplied - #[method(name = "mmr_generateHistoricalBatchProof")] - fn generate_historical_batch_proof( + /// This method calls into a runtime with MMR pallet included and attempts to verify + /// an MMR proof against a provided MMR root. + /// + /// Returns `true` if the proof is valid, else returns the verification error. + #[method(name = "mmr_verifyProofStateless")] + fn verify_proof_stateless( &self, - block_numbers: Vec, - best_known_block_number: BlockNumber, - at: Option, - ) -> RpcResult>; + mmr_root: MmrHash, + proof: LeavesProof, + ) -> RpcResult; } /// MMR RPC methods. @@ -169,7 +140,7 @@ impl Mmr { } #[async_trait] -impl MmrApiServer<::Hash, NumberFor> +impl MmrApiServer<::Hash, NumberFor, MmrHash> for Mmr where Block: BlockT, @@ -177,89 +148,102 @@ where Client::Api: MmrRuntimeApi>, MmrHash: Codec + Send + Sync + 'static, { - fn generate_proof( - &self, - block_number: NumberFor, - at: Option<::Hash>, - ) -> RpcResult> { + fn mmr_root(&self, at: Option<::Hash>) -> RpcResult { + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); let api = self.client.runtime_api(); - let block_hash = at.unwrap_or_else(|| self.client.info().best_hash); - - let (leaf, proof) = api - .generate_proof_with_context( - &BlockId::hash(block_hash), - sp_core::ExecutionContext::OffchainCall(None), - block_number, - ) + let mmr_root = api + .mmr_root(&BlockId::Hash(block_hash)) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; - - Ok(LeafProof::new(block_hash, leaf, proof)) + Ok(mmr_root) } - fn generate_batch_proof( + fn generate_proof( &self, block_numbers: Vec>, + best_known_block_number: Option>, at: Option<::Hash>, - ) -> RpcResult::Hash>> { + ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); let block_hash = at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash); let (leaves, proof) = api - .generate_batch_proof_with_context( + .generate_proof_with_context( &BlockId::hash(block_hash), sp_core::ExecutionContext::OffchainCall(None), block_numbers, + best_known_block_number, ) .map_err(runtime_error_into_rpc_error)? .map_err(mmr_error_into_rpc_error)?; - Ok(LeafBatchProof::new(block_hash, leaves, proof)) + Ok(LeavesProof::new(block_hash, leaves, proof)) + } + + fn verify_proof(&self, proof: LeavesProof<::Hash>) -> RpcResult { + let api = self.client.runtime_api(); + + let leaves = Decode::decode(&mut &proof.leaves.0[..]) + .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; + + let decoded_proof = Decode::decode(&mut &proof.proof.0[..]) + .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; + + api.verify_proof_with_context( + &BlockId::hash(proof.block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaves, + decoded_proof, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(true) } - fn generate_historical_batch_proof( + fn verify_proof_stateless( &self, - block_numbers: Vec>, - best_known_block_number: NumberFor, - at: Option<::Hash>, - ) -> RpcResult::Hash>> { + mmr_root: MmrHash, + proof: LeavesProof<::Hash>, + ) -> RpcResult { let api = self.client.runtime_api(); - let block_hash = at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash); - let (leaves, proof) = api - .generate_historical_batch_proof_with_context( - &BlockId::hash(block_hash), - sp_core::ExecutionContext::OffchainCall(None), - block_numbers, - best_known_block_number, - ) - .map_err(runtime_error_into_rpc_error)? - .map_err(mmr_error_into_rpc_error)?; + let leaves = Decode::decode(&mut &proof.leaves.0[..]) + .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; - Ok(LeafBatchProof::new(block_hash, leaves, proof)) + let decoded_proof = Decode::decode(&mut &proof.proof.0[..]) + .map_err(|e| CallError::InvalidParams(anyhow::Error::new(e)))?; + + api.verify_proof_stateless( + &BlockId::hash(proof.block_hash), + mmr_root, + leaves, + decoded_proof, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(true) } } -/// Converts a mmr-specific error into a [`CallError`]. +/// Converts an mmr-specific error into a [`CallError`]. fn mmr_error_into_rpc_error(err: MmrError) -> CallError { - let data = format!("{:?}", err); - match err { - MmrError::LeafNotFound => CallError::Custom(ErrorObject::owned( - LEAF_NOT_FOUND_ERROR, - "Leaf was not found", - Some(data), - )), - MmrError::GenerateProof => CallError::Custom(ErrorObject::owned( - GENERATE_PROOF_ERROR, - "Error while generating the proof", - Some(data), - )), - _ => CallError::Custom(ErrorObject::owned(MMR_ERROR, "Unexpected MMR error", Some(data))), - } + let error_code = MMR_ERROR + + match err { + MmrError::LeafNotFound => 1, + MmrError::GenerateProof => 2, + MmrError::Verify => 3, + MmrError::BlockNumToLeafIndex => 4, + MmrError::InvalidBestKnownBlock => 5, + _ => 0, + }; + + CallError::Custom(ErrorObject::owned(error_code, err.to_string(), Some(format!("{:?}", err)))) } /// Converts a runtime trap into a [`CallError`]. @@ -281,12 +265,12 @@ mod tests { // given let leaf = vec![1_u8, 2, 3, 4]; let proof = Proof { - leaf_index: 1, + leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], }; - let leaf_proof = LeafProof::new(H256::repeat_byte(0), leaf, proof); + let leaf_proof = LeavesProof::new(H256::repeat_byte(0), vec![leaf], proof); // when let actual = serde_json::to_string(&leaf_proof).unwrap(); @@ -294,21 +278,22 @@ mod tests { // then assert_eq!( actual, - r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaf":"0x1001020304","proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x041001020304","proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# ); } #[test] - fn should_serialize_leaf_batch_proof() { + fn should_serialize_leaves_proof() { // given - let leaf = vec![1_u8, 2, 3, 4]; - let proof = BatchProof { - leaf_indices: vec![1], + let leaf_a = vec![1_u8, 2, 3, 4]; + let leaf_b = vec![2_u8, 2, 3, 4]; + let proof = Proof { + leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], }; - let leaf_proof = LeafBatchProof::new(H256::repeat_byte(0), vec![leaf], proof); + let leaf_proof = LeavesProof::new(H256::repeat_byte(0), vec![leaf_a, leaf_b], proof); // when let actual = serde_json::to_string(&leaf_proof).unwrap(); @@ -316,19 +301,19 @@ mod tests { // then assert_eq!( actual, - r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x041001020304","proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# + r#"{"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000","leaves":"0x0810010203041002020304","proof":"0x080100000000000000020000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202"}"# ); } #[test] fn should_deserialize_leaf_proof() { // given - let expected = LeafProof { + let expected = LeavesProof { block_hash: H256::repeat_byte(0), - leaf: Bytes(vec![1_u8, 2, 3, 4].encode()), + leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), proof: Bytes( Proof { - leaf_index: 1, + leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], } @@ -337,10 +322,10 @@ mod tests { }; // when - let actual: LeafProof = serde_json::from_str(r#"{ + let actual: LeavesProof = serde_json::from_str(r#"{ "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "leaf":"0x1001020304", - "proof":"0x010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + "leaves":"0x041001020304", + "proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" }"#).unwrap(); // then @@ -348,14 +333,14 @@ mod tests { } #[test] - fn should_deserialize_leaf_batch_proof() { + fn should_deserialize_leaves_proof() { // given - let expected = LeafBatchProof { + let expected = LeavesProof { block_hash: H256::repeat_byte(0), - leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), + leaves: Bytes(vec![vec![1_u8, 2, 3, 4], vec![2_u8, 2, 3, 4]].encode()), proof: Bytes( - BatchProof { - leaf_indices: vec![1], + Proof { + leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], } @@ -364,10 +349,10 @@ mod tests { }; // when - let actual: LeafBatchProof = serde_json::from_str(r#"{ + let actual: LeavesProof = serde_json::from_str(r#"{ "blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "leaves":"0x041001020304", - "proof":"0x04010000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" + "leaves":"0x0810010203041002020304", + "proof":"0x080100000000000000020000000000000009000000000000000801010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202" }"#).unwrap(); // then diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 92ab8702b65c0..a2d42417ae5dc 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -22,7 +22,7 @@ //! Details on Merkle Mountain Ranges (MMRs) can be found here: //! //! -//! The MMR pallet constructs a MMR from leaf data obtained on every block from +//! The MMR pallet constructs an MMR from leaf data obtained on every block from //! `LeafDataProvider`. MMR nodes are stored both in: //! - on-chain storage - hashes only; not full leaf content) //! - off-chain storage - via Indexing API we push full leaf content (and all internal nodes as @@ -50,7 +50,7 @@ //! //! Secondary use case is to archive historical data, but still be able to retrieve them on-demand //! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point -//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned +//! in time to provide an MMR proof about some past block hash, while this data can be safely pruned //! from on-chain storage. //! //! NOTE This pallet is experimental and not proven to work in production. @@ -103,7 +103,7 @@ pub trait WeightInfo { fn on_initialize(peaks: NodeIndex) -> Weight; } -/// A MMR specific to the pallet. +/// An MMR specific to the pallet. type ModuleMmr = mmr::Mmr>; /// Leaf data. @@ -287,15 +287,15 @@ pub mod pallet { /// Stateless MMR proof verification for batch of leaves. /// -/// This function can be used to verify received MMR [primitives::BatchProof] (`proof`) +/// This function can be used to verify received MMR [primitives::Proof] (`proof`) /// for given leaves set (`leaves`) against a known MMR root hash (`root`). /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::BatchProof]. +/// [primitives::Proof]. pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::BatchProof, + proof: primitives::Proof, ) -> Result<(), primitives::Error> where H: traits::Hash, @@ -381,37 +381,23 @@ impl, I: 'static> Pallet { Ok(leaf_idx) } - /// Generate a MMR proof for the given `block_numbers`. + /// Generate an MMR proof for the given `block_numbers`. + /// If `best_known_block_number = Some(n)`, this generates a historical proof for + /// the chain with head at height `n`. + /// Else it generates a proof for the MMR at the current block height. /// /// Note this method can only be used from an off-chain context /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. - pub fn generate_batch_proof( + pub fn generate_proof( block_numbers: Vec, - ) -> Result< - (Vec>, primitives::BatchProof<>::Hash>), - primitives::Error, - > { - Self::generate_historical_batch_proof( - block_numbers, - >::block_number(), - ) - } + best_known_block_number: Option, + ) -> Result<(Vec>, primitives::Proof<>::Hash>), primitives::Error> { + // check whether best_known_block_number provided, else use current best block + let best_known_block_number = + best_known_block_number.unwrap_or_else(|| >::block_number()); - /// Generate a MMR proof for the given `block_numbers` given the `best_known_block_number`. - /// - /// Note this method can only be used from an off-chain context - /// (Offchain Worker or Runtime API call), since it requires - /// all the leaves to be present. - /// It may return an error or panic if used incorrectly. - pub fn generate_historical_batch_proof( - block_numbers: Vec, - best_known_block_number: T::BlockNumber, - ) -> Result< - (Vec>, primitives::BatchProof<>::Hash>), - primitives::Error, - > { let leaves_count = Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); @@ -424,7 +410,7 @@ impl, I: 'static> Pallet { .collect::, _>>()?; let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); - mmr.generate_batch_proof(leaf_indices) + mmr.generate_proof(leaf_indices) } /// Return the on-chain MMR root hash. @@ -440,7 +426,7 @@ impl, I: 'static> Pallet { /// or the proof is invalid. pub fn verify_leaves( leaves: Vec>, - proof: primitives::BatchProof<>::Hash>, + proof: primitives::Proof<>::Hash>, ) -> Result<(), primitives::Error> { if proof.leaf_count > Self::mmr_leaves() || proof.leaf_count == 0 || diff --git a/frame/merkle-mountain-range/src/mmr/mmr.rs b/frame/merkle-mountain-range/src/mmr/mmr.rs index 44e684c1bdcac..1f5a5bdae380b 100644 --- a/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -29,11 +29,11 @@ use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::BatchProof] +/// [primitives::Proof] pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::BatchProof, + proof: primitives::Proof, ) -> Result where H: sp_runtime::traits::Hash, @@ -60,7 +60,7 @@ where .map_err(|e| Error::Verify.log_debug(e)) } -/// A wrapper around a MMR library to expose limited functionality. +/// A wrapper around an MMR library to expose limited functionality. /// /// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) /// vs [Off-chain](crate::mmr::storage::OffchainStorage)). @@ -91,11 +91,11 @@ where /// Verify proof for a set of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have /// the same position in both the `leaves` vector and the `leaf_indices` vector contained in the - /// [primitives::BatchProof] + /// [primitives::Proof] pub fn verify_leaves_proof( &self, leaves: Vec, - proof: primitives::BatchProof<>::Hash>, + proof: primitives::Proof<>::Hash>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), @@ -163,10 +163,10 @@ where /// /// Proof generation requires all the nodes (or their hashes) to be available in the storage. /// (i.e. you can't run the function in the pruned storage). - pub fn generate_batch_proof( + pub fn generate_proof( &self, leaf_indices: Vec, - ) -> Result<(Vec, primitives::BatchProof<>::Hash>), Error> { + ) -> Result<(Vec, primitives::Proof<>::Hash>), Error> { let positions = leaf_indices .iter() .map(|index| mmr_lib::leaf_index_to_pos(*index)) @@ -184,7 +184,7 @@ where self.mmr .gen_proof(positions) .map_err(|e| Error::GenerateProof.log_error(e)) - .map(|p| primitives::BatchProof { + .map(|p| primitives::Proof { leaf_indices, leaf_count, items: p.proof_items().iter().map(|x| x.hash()).collect(), diff --git a/frame/merkle-mountain-range/src/mmr/storage.rs b/frame/merkle-mountain-range/src/mmr/storage.rs index 870ce81226bd2..d16ca8cf1e5c8 100644 --- a/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/frame/merkle-mountain-range/src/mmr/storage.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! A MMR storage implementations. +//! An MMR storage implementation. use codec::Encode; use frame_support::log::{debug, error, trace}; diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index dbbdc12c8e1d5..e47f1b3b2e63a 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -27,7 +27,7 @@ use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{BatchProof, Compact}; +use sp_mmr_primitives::{Compact, Proof}; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() @@ -236,18 +236,18 @@ fn should_generate_proofs_correctly() { // when generate proofs for all leaves. let proofs = (1_u64..=best_block_number) .into_iter() - .map(|block_num| crate::Pallet::::generate_batch_proof(vec![block_num]).unwrap()) + .map(|block_num| crate::Pallet::::generate_proof(vec![block_num], None).unwrap()) .collect::>(); // when generate historical proofs for all leaves let historical_proofs = (1_u64..best_block_number) .into_iter() .map(|block_num| { let mut proofs = vec![]; - for leaves_count in block_num..=num_blocks { + for historical_best_block in block_num..=num_blocks { proofs.push( - crate::Pallet::::generate_historical_batch_proof( + crate::Pallet::::generate_proof( vec![block_num], - leaves_count, + Some(historical_best_block), ) .unwrap(), ) @@ -261,7 +261,7 @@ fn should_generate_proofs_correctly() { proofs[0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - BatchProof { + Proof { leaf_indices: vec![0], leaf_count: 7, items: vec![ @@ -276,7 +276,7 @@ fn should_generate_proofs_correctly() { historical_proofs[0][0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - BatchProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + Proof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } ) ); @@ -292,7 +292,7 @@ fn should_generate_proofs_correctly() { proofs[2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - BatchProof { + Proof { leaf_indices: vec![2], leaf_count: 7, items: vec![ @@ -312,7 +312,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][0], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - BatchProof { + Proof { leaf_indices: vec![2], leaf_count: 3, items: vec![hex( @@ -332,7 +332,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - BatchProof { + Proof { leaf_indices: vec![2], leaf_count: 5, items: vec![ @@ -350,7 +350,7 @@ fn should_generate_proofs_correctly() { ( // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - BatchProof { + Proof { leaf_indices: vec![4], leaf_count: 7, items: vec![ @@ -365,7 +365,7 @@ fn should_generate_proofs_correctly() { historical_proofs[4][0], ( vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - BatchProof { + Proof { leaf_indices: vec![4], leaf_count: 5, items: vec![hex( @@ -380,7 +380,7 @@ fn should_generate_proofs_correctly() { proofs[6], ( vec![Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),))], - BatchProof { + Proof { leaf_indices: vec![6], leaf_count: 7, items: vec![ @@ -407,11 +407,11 @@ fn should_generate_batch_proof_correctly() { register_offchain_ext(&mut ext); ext.execute_with(|| { // when generate proofs for a batch of leaves - let (.., proof) = crate::Pallet::::generate_batch_proof(vec![1, 5, 6]).unwrap(); + let (.., proof) = crate::Pallet::::generate_proof(vec![1, 5, 6], None).unwrap(); // then assert_eq!( proof, - BatchProof { + Proof { // the leaf indices are equivalent to the above specified block numbers - 1. leaf_indices: vec![0, 4, 5], leaf_count: 7, @@ -425,11 +425,11 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 6).unwrap(); + crate::Pallet::::generate_proof(vec![1, 5, 6], Some(6)).unwrap(); // then assert_eq!( historical_proof, - BatchProof { + Proof { leaf_indices: vec![0, 4, 5], leaf_count: 6, items: vec![ @@ -441,7 +441,7 @@ fn should_generate_batch_proof_correctly() { // when generate historical proofs for a batch of leaves let (.., historical_proof) = - crate::Pallet::::generate_historical_batch_proof(vec![1, 5, 6], 7).unwrap(); + crate::Pallet::::generate_proof(vec![1, 5, 6], None).unwrap(); // then assert_eq!(historical_proof, proof); }); @@ -462,15 +462,15 @@ fn should_verify() { register_offchain_ext(&mut ext); let (leaves, proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_batch_proof(vec![5]).unwrap() + crate::Pallet::::generate_proof(vec![5], None).unwrap() }); let (simple_historical_leaves, simple_historical_proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + crate::Pallet::::generate_proof(vec![5], Some(6)).unwrap() }); let (advanced_historical_leaves, advanced_historical_proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![5], 7).unwrap() + crate::Pallet::::generate_proof(vec![5], Some(7)).unwrap() }); ext.execute_with(|| { @@ -502,22 +502,18 @@ fn should_verify_batch_proofs() { blocks_to_add: usize, ) { let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_batch_proof(block_numbers.to_vec()).unwrap() + crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() }); - let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); - let min_mmr_size = block_numbers.iter().max().unwrap() + 1; + let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); + let min_block_number = block_numbers.iter().max().unwrap(); - // generate historical proofs for all possible mmr sizes, - // lower bound being index of highest leaf to be proven - let historical_proofs = (min_mmr_size..=mmr_size) - .map(|mmr_size| { + // generate all possible historical proofs for the given blocks + let historical_proofs = (*min_block_number..=max_block_number) + .map(|best_block| { ext.execute_with(|| { - crate::Pallet::::generate_historical_batch_proof( - block_numbers.to_vec(), - mmr_size, - ) - .unwrap() + crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) + .unwrap() }) }) .collect::>(); @@ -602,11 +598,11 @@ fn verification_should_be_stateless() { register_offchain_ext(&mut ext); let (leaves, proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_batch_proof(vec![5]).unwrap() + crate::Pallet::::generate_proof(vec![5], None).unwrap() }); let (_, historical_proof5) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + crate::Pallet::::generate_proof(vec![5], Some(6)).unwrap() }); // Verify proof without relying on any on-chain data. @@ -650,11 +646,11 @@ fn should_verify_batch_proof_statelessly() { register_offchain_ext(&mut ext); let (leaves, proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_batch_proof(vec![1, 4, 5]).unwrap() + crate::Pallet::::generate_proof(vec![1, 4, 5], None).unwrap() }); let (historical_leaves, historical_proof) = ext.execute_with(|| { // when - crate::Pallet::::generate_historical_batch_proof(vec![1, 4, 5], 6).unwrap() + crate::Pallet::::generate_proof(vec![1, 4, 5], Some(6)).unwrap() }); // Verify proof without relying on any on-chain data. @@ -694,7 +690,7 @@ fn should_verify_on_the_next_block_since_there_is_no_pruning_yet() { ext.execute_with(|| { // when - let (leaves, proof5) = crate::Pallet::::generate_batch_proof(vec![5]).unwrap(); + let (leaves, proof5) = crate::Pallet::::generate_proof(vec![5], None).unwrap(); new_block(); // then @@ -928,7 +924,7 @@ fn should_verify_canonicalized() { // Generate proofs for some blocks. let (leaves, proofs) = - ext.execute_with(|| crate::Pallet::::generate_batch_proof(vec![1, 4, 5, 7]).unwrap()); + ext.execute_with(|| crate::Pallet::::generate_proof(vec![1, 4, 5, 7], None).unwrap()); // Verify all previously generated proofs. ext.execute_with(|| { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); @@ -936,7 +932,7 @@ fn should_verify_canonicalized() { // Generate proofs for some new blocks. let (leaves, proofs) = ext.execute_with(|| { - crate::Pallet::::generate_batch_proof(vec![block_hash_size + 7]).unwrap() + crate::Pallet::::generate_proof(vec![block_hash_size + 7], None).unwrap() }); // Add some more blocks then verify all previously generated proofs. ext.execute_with(|| { @@ -960,19 +956,19 @@ fn does_not_panic_when_generating_historical_proofs() { ext.execute_with(|| { // when leaf index is invalid assert_eq!( - crate::Pallet::::generate_historical_batch_proof(vec![10], 7), + crate::Pallet::::generate_proof(vec![10], None), Err(Error::BlockNumToLeafIndex), ); // when leaves count is invalid assert_eq!( - crate::Pallet::::generate_historical_batch_proof(vec![3], 100), + crate::Pallet::::generate_proof(vec![3], Some(100)), Err(Error::BlockNumToLeafIndex), ); // when both leaf index and leaves count are invalid assert_eq!( - crate::Pallet::::generate_historical_batch_proof(vec![10], 100), + crate::Pallet::::generate_proof(vec![10], Some(100)), Err(Error::BlockNumToLeafIndex), ); }); diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 78ceef0b6b396..549d2edbdf287 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -64,7 +64,7 @@ pub struct MmrLeaf { pub leaf_extra: ExtraData, } -/// A MMR leaf versioning scheme. +/// An MMR leaf versioning scheme. /// /// Version is a single byte that constist of two components: /// - `major` - 3 bits diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index 0be53132f3eec..e857974ba898c 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -21,6 +21,7 @@ sp-core = { version = "6.0.0", default-features = false, path = "../core" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } +thiserror = "1.0" [dev-dependencies] array-bytes = "4.1" diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 06bc1f4bffe84..d46cb73c3c5e8 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -23,9 +23,9 @@ use scale_info::TypeInfo; use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; +use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; -use sp_std::{fmt, vec}; /// A type to describe node position in the MMR (node index). pub type NodeIndex = u64; @@ -69,17 +69,6 @@ impl OnNewRoot for () { fn on_new_root(_root: &Hash) {} } -/// A MMR proof data for one of the leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct Proof { - /// The index of the leaf the proof is for. - pub leaf_index: LeafIndex, - /// Number of leaves in MMR, when the proof was generated. - pub leaf_count: NodeIndex, - /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). - pub items: Vec, -} - /// A full leaf content stored in the offchain-db. pub trait FullLeaf: Clone + PartialEq + fmt::Debug { /// Encode the leaf either in its full or compact form. @@ -352,9 +341,9 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); -/// A MMR proof data for a group of leaves. +/// An MMR proof data for a group of leaves. #[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct BatchProof { +pub struct Proof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, /// Number of leaves in MMR, when the proof was generated. @@ -363,49 +352,39 @@ pub struct BatchProof { pub items: Vec, } -impl BatchProof { - /// Converts batch proof to single leaf proof - pub fn into_single_leaf_proof(proof: BatchProof) -> Result, Error> { - Ok(Proof { - leaf_index: *proof.leaf_indices.get(0).ok_or(Error::InvalidLeafIndex)?, - leaf_count: proof.leaf_count, - items: proof.items, - }) - } -} - -impl Proof { - /// Converts a single leaf proof into a batch proof - pub fn into_batch_proof(proof: Proof) -> BatchProof { - BatchProof { - leaf_indices: vec![proof.leaf_index], - leaf_count: proof.leaf_count, - items: proof.items, - } - } -} /// Merkle Mountain Range operation error. +#[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq)] pub enum Error { /// Error during translation of a block number into a leaf index. + #[cfg_attr(feature = "std", error("Error translation block number into leaf index"))] BlockNumToLeafIndex, /// Error while pushing new node. + #[cfg_attr(feature = "std", error("Error pushing new node"))] Push, /// Error getting the new root. + #[cfg_attr(feature = "std", error("Error getting new root"))] GetRoot, - /// Error commiting changes. + /// Error committing changes. + #[cfg_attr(feature = "std", error("Error committing changes"))] Commit, /// Error during proof generation. + #[cfg_attr(feature = "std", error("Error generating proof"))] GenerateProof, /// Proof verification error. + #[cfg_attr(feature = "std", error("Invalid proof"))] Verify, /// Leaf not found in the storage. + #[cfg_attr(feature = "std", error("Leaf was not found"))] LeafNotFound, /// Mmr Pallet not included in runtime + #[cfg_attr(feature = "std", error("MMR pallet not included in runtime"))] PalletNotIncluded, /// Cannot find the requested leaf index + #[cfg_attr(feature = "std", error("Requested leaf index invalid"))] InvalidLeafIndex, /// The provided best know block number is invalid. + #[cfg_attr(feature = "std", error("Provided best known block number invalid"))] InvalidBestKnownBlock, } @@ -437,53 +416,31 @@ impl Error { sp_api::decl_runtime_apis! { /// API to interact with MMR pallet. pub trait MmrApi { - /// Generate MMR proof for a block with a specified `block_number`. - fn generate_proof(block_number: BlockNumber) -> Result<(EncodableOpaqueLeaf, Proof), Error>; - - /// Verify MMR proof against on-chain MMR. - /// - /// Note this function will use on-chain MMR root hash and check if the proof - /// matches the hash. - /// See [Self::verify_proof_stateless] for a stateless verifier. - fn verify_proof(leaf: EncodableOpaqueLeaf, proof: Proof) -> Result<(), Error>; - - /// Verify MMR proof against given root hash. - /// - /// Note this function does not require any on-chain storage - the - /// proof is verified against given MMR root hash. - /// - /// The leaf data is expected to be encoded in its compact form. - fn verify_proof_stateless(root: Hash, leaf: EncodableOpaqueLeaf, proof: Proof) - -> Result<(), Error>; - /// Return the on-chain MMR root hash. fn mmr_root() -> Result; - /// Generate MMR proof for a series of blocks with the specified block numbers. - fn generate_batch_proof(block_numbers: Vec) -> Result<(Vec, BatchProof), Error>; - - /// Generate MMR proof for a series of `block_numbers`, given the `best_known_block_number`. - fn generate_historical_batch_proof( + /// Generate MMR proof for a series of block numbers. If `best_known_block_number = Some(n)`, + /// use historical MMR state at given block height `n`. Else, use current MMR state. + fn generate_proof( block_numbers: Vec, - best_known_block_number: BlockNumber - ) -> Result<(Vec, BatchProof), Error>; + best_known_block_number: Option + ) -> Result<(Vec, Proof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// - /// Note this function will use on-chain MMR root hash and check if the proof - /// matches the hash. + /// Note this function will use on-chain MMR root hash and check if the proof matches the hash. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] - fn verify_batch_proof(leaves: Vec, proof: BatchProof) -> Result<(), Error>; + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] + fn verify_proof(leaves: Vec, proof: Proof) -> Result<(), Error>; - /// Verify MMR proof against given root hash or a batch of leaves. + /// Verify MMR proof against given root hash for a batch of leaves. /// /// Note this function does not require any on-chain storage - the /// proof is verified against given MMR root hash. /// /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [BatchProof] - fn verify_batch_proof_stateless(root: Hash, leaves: Vec, proof: BatchProof) + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] + fn verify_proof_stateless(root: Hash, leaves: Vec, proof: Proof) -> Result<(), Error>; } } @@ -508,7 +465,7 @@ mod tests { fn should_encode_decode_proof() { // given let proof: TestProof = Proof { - leaf_index: 5, + leaf_indices: vec![5], leaf_count: 10, items: vec![ hex("c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd"), From 970fe0fa35b88c7a8fa7a20ba3b60ac6b55699a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Nov 2022 19:31:41 +0000 Subject: [PATCH 336/348] Bump ss58-registry from 1.29.0 to 1.34.0 (#12659) Bumps [ss58-registry](https://github.com/paritytech/ss58-registry) from 1.29.0 to 1.34.0. - [Release notes](https://github.com/paritytech/ss58-registry/releases) - [Changelog](https://github.com/paritytech/ss58-registry/blob/main/CHANGELOG.md) - [Commits](https://github.com/paritytech/ss58-registry/compare/v1.29.0...v1.34.0) --- updated-dependencies: - dependency-name: ss58-registry dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- primitives/core/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a959bc42def38..af300dd049d9c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10129,9 +10129,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.29.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0837b5d62f42082c9d56cd946495ae273a3c68083b637b9153341d5e465146d" +checksum = "37a9821878e1f13aba383aa40a86fb1b33c7265774ec91e32563cb1dd1577496" dependencies = [ "Inflector", "num-format", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 12cc9e2f0b60f..7196598056fe5 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -57,7 +57,7 @@ schnorrkel = { version = "0.9.1", features = [ libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } -ss58-registry = { version = "1.29.0", default-features = false } +ss58-registry = { version = "1.34.0", default-features = false } sp-core-hashing = { version = "4.0.0", path = "./hashing", default-features = false, optional = true } sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } From 087ec5a5b2c9a960fe0f8dc658ab5b21fe8199b3 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Wed, 9 Nov 2022 20:59:40 +0100 Subject: [PATCH 337/348] Add `CreateOrigin` to Assets Pallet (#12586) * add CreateOrigin to Assets pallet * fix asset-tx-payment test * use AccountId > u64 in test * Update frame/assets/src/benchmarking.rs Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> Co-authored-by: Jegor Sidorenko <5252494+jsidorenko@users.noreply.github.com> --- bin/node/runtime/src/lib.rs | 1 + frame/assets/src/benchmarking.rs | 8 +++--- frame/assets/src/lib.rs | 27 ++++++++++++------- frame/assets/src/mock.rs | 3 ++- .../asset-tx-payment/src/tests.rs | 3 ++- frame/uniques/src/lib.rs | 2 +- 6 files changed, 29 insertions(+), 15 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 270386be2bbd3..eab40634ff241 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1438,6 +1438,7 @@ impl pallet_assets::Config for Runtime { type Balance = u128; type AssetId = u32; type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; type AssetDeposit = AssetDeposit; type AssetAccountDeposit = ConstU128; diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index ca891c2f42e4a..2bde2b0c98945 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -150,12 +150,14 @@ fn assert_event, I: 'static>(generic_event: >::Runti benchmarks_instance_pallet! { create { - let caller: T::AccountId = whitelisted_caller(); + let asset_id = Default::default(); + let origin = T::CreateOrigin::successful_origin(&asset_id); + let caller = T::CreateOrigin::ensure_origin(origin, &asset_id).unwrap(); let caller_lookup = T::Lookup::unlookup(caller.clone()); T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); - }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1u32.into()) + }: _(SystemOrigin::Signed(caller.clone()), asset_id, caller_lookup, 1u32.into()) verify { - assert_last_event::(Event::Created { asset_id: Default::default(), creator: caller.clone(), owner: caller }.into()); + assert_last_event::(Event::Created { asset_id, creator: caller.clone(), owner: caller }.into()); } force_create { diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 28743f917d323..7e7d68fa6c7dd 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -36,15 +36,15 @@ //! //! ### Terminology //! -//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and it's +//! * **Admin**: An account ID uniquely privileged to be able to unfreeze (thaw) an account and its //! assets, as well as forcibly transfer a particular class of assets between arbitrary accounts //! and reduce the balance of a particular class of assets of arbitrary accounts. //! * **Asset issuance/minting**: The creation of a new asset, whose total supply will belong to the -//! account that issues the asset. This is a privileged operation. +//! account designated as the beneficiary of the asset. This is a privileged operation. //! * **Asset transfer**: The reduction of the balance of an asset of one account with the //! corresponding increase in the balance of another. -//! * **Asset destruction**: The process of reduce the balance of an asset of one account. This is a -//! privileged operation. +//! * **Asset destruction**: The process of reducing the balance of an asset of one account. This is +//! a privileged operation. //! * **Fungible asset**: An asset whose units are interchangeable. //! * **Issuer**: An account ID uniquely privileged to be able to mint a particular class of assets. //! * **Freezer**: An account ID uniquely privileged to be able to freeze an account from @@ -63,12 +63,12 @@ //! //! The assets system in Substrate is designed to make the following possible: //! -//! * Issue a new assets in a permissioned or permissionless way, if permissionless, then with a +//! * Issue new assets in a permissioned or permissionless way, if permissionless, then with a //! deposit required. //! * Allow accounts to be delegated the ability to transfer assets without otherwise existing //! on-chain (*approvals*). //! * Move assets between accounts. -//! * Update the asset's total supply. +//! * Update an asset class's total supply. //! * Allow administrative activities by specially privileged accounts including freezing account //! balances and minting/burning assets. //! @@ -92,6 +92,7 @@ //! * `force_cancel_approval`: Rescind a previous approval. //! //! ### Privileged Functions +//! //! * `destroy`: Destroys an entire asset class; called by the asset class's Owner. //! * `mint`: Increases the asset balance of an account; called by the asset class's Issuer. //! * `burn`: Decreases the asset balance of an account; called by the asset class's Admin. @@ -156,7 +157,7 @@ use frame_support::{ traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, - Currency, ReservableCurrency, StoredMap, + Currency, EnsureOriginWithArg, ReservableCurrency, StoredMap, }, }; use frame_system::Config as SystemConfig; @@ -206,6 +207,14 @@ pub mod pallet { /// The currency mechanism. type Currency: ReservableCurrency; + /// Standard asset class creation is only allowed if the origin attempting it and the + /// asset class are in this set. + type CreateOrigin: EnsureOriginWithArg< + Self::RuntimeOrigin, + Self::AssetId, + Success = Self::AccountId, + >; + /// The origin which may forcibly create or destroy an asset or otherwise alter privileged /// attributes. type ForceOrigin: EnsureOrigin; @@ -485,7 +494,7 @@ pub mod pallet { /// /// This new asset class has no assets initially and its owner is the origin. /// - /// The origin must be Signed and the sender must have sufficient funds free. + /// The origin must conform to the configured `CreateOrigin` and have sufficient funds free. /// /// Funds of sender are reserved by `AssetDeposit`. /// @@ -507,7 +516,7 @@ pub mod pallet { admin: AccountIdLookupOf, min_balance: T::Balance, ) -> DispatchResult { - let owner = ensure_signed(origin)?; + let owner = T::CreateOrigin::ensure_origin(origin, &id)?; let admin = T::Lookup::lookup(admin)?; ensure!(!Asset::::contains_key(id), Error::::InUse); diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 1f105eb6b4fbc..21fb52c9cd931 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_assets; use frame_support::{ construct_runtime, parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{AsEnsureOriginWithArg, ConstU32, ConstU64, GenesisBuild}, }; use sp_core::H256; use sp_runtime::{ @@ -89,6 +89,7 @@ impl Config for Test { type Balance = u64; type AssetId = u32; type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = frame_system::EnsureRoot; type AssetDeposit = ConstU64<1>; type AssetAccountDeposit = ConstU64<10>; diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index e775f3aa92990..cfed1c33c9b24 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -21,7 +21,7 @@ use frame_support::{ dispatch::{DispatchClass, DispatchInfo, PostDispatchInfo}, pallet_prelude::*, parameter_types, - traits::{fungibles::Mutate, ConstU32, ConstU64, ConstU8, FindAuthor}, + traits::{fungibles::Mutate, AsEnsureOriginWithArg, ConstU32, ConstU64, ConstU8, FindAuthor}, weights::{Weight, WeightToFee as WeightToFeeT}, ConsensusEngineId, }; @@ -157,6 +157,7 @@ impl pallet_assets::Config for Runtime { type Balance = Balance; type AssetId = u32; type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; type ForceOrigin = EnsureRoot; type AssetDeposit = ConstU64<2>; type AssetAccountDeposit = ConstU64<2>; diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index d519eedd2787f..185d8fc0c8edd 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -437,7 +437,7 @@ pub mod pallet { /// /// This new collection has no items initially and its owner is the origin. /// - /// The origin must be Signed and the sender must have sufficient funds free. + /// The origin must conform to the configured `CreateOrigin` and have sufficient funds free. /// /// `ItemDeposit` funds of sender are reserved. /// From 2badd7203b915ae80499544056b3dc5496ccd80e Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Wed, 9 Nov 2022 21:30:50 +0000 Subject: [PATCH 338/348] `sp-runtime`: make `parity-util-mem` dependency optional (#12657) * `sp-runtime`: make `parity-util-mem` dependency optional * Use default-features = false for sp-runtime in sp-keyring * Remove parity-util-mem from sp-core * Cargo.lock * Restore default-features for keyring dependency --- Cargo.lock | 1 - primitives/core/Cargo.toml | 2 -- primitives/runtime/Cargo.toml | 2 +- primitives/runtime/src/transaction_validity.rs | 5 ++--- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af300dd049d9c..9fc220e099a5c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9525,7 +9525,6 @@ dependencies = [ "merlin", "num-traits", "parity-scale-codec", - "parity-util-mem", "parking_lot 0.12.1", "primitive-types", "rand 0.7.3", diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 7196598056fe5..3e8bac51289b8 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -40,7 +40,6 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } @@ -78,7 +77,6 @@ bench = false [features] default = ["std"] std = [ - "parity-util-mem/std", "merlin?/std", "full_crypto", "log/std", diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 01594ed69e312..8d7b5b2b93354 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -19,7 +19,7 @@ either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"], optional = true } paste = "1.0" rand = { version = "0.7.2", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 7cc8b70df9f96..4646808b8c8e3 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -226,9 +226,8 @@ impl From for TransactionValidity { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive( - Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf, -)] +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] pub enum TransactionSource { /// Transaction is already included in block. /// From abb217d85f4eb53e176518648ae223b4ca644f70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 9 Nov 2022 23:05:13 +0100 Subject: [PATCH 339/348] GrandpaJustification: Feature gate `Debug` (#12664) The grandpa crate is deriving `Debug` only when the `std` feature is enabled. `RuntimeDebug` can be forced to derive `Debug` also in `no_std` and that doesn't work together. So, we should feature gate `Debug` on `no_std`. --- primitives/finality-grandpa/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 2adfce1bea241..f1584dc673228 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -129,7 +129,8 @@ pub type CompactCommit
= grandpa::CompactCommit< /// /// This is meant to be stored in the db and passed around the network to other /// nodes, and are used by syncing nodes to prove authority set handoffs. -#[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] +#[derive(Clone, Encode, Decode, PartialEq, Eq)] +#[cfg_attr(feature = "std", derive(Debug))] pub struct GrandpaJustification { pub round: u64, pub commit: Commit
, From 78f8f708781da6fd340ef2a1fc288060a35295ad Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 10 Nov 2022 02:34:00 +0000 Subject: [PATCH 340/348] More testing and fuzzing and docs for pools (#12624) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * move pools fuzzing to hongfuzz * merge more small fixes * fix all tests * Update frame/nomination-pools/fuzzer/src/call.rs Co-authored-by: Gonçalo Pestana * remove transactional * fmt * fix CI * fmt * fix build again * fix CI Co-authored-by: Gonçalo Pestana --- Cargo.lock | 33 +- Cargo.toml | 1 + frame/balances/src/lib.rs | 2 +- frame/nomination-pools/Cargo.toml | 7 +- frame/nomination-pools/fuzzer/Cargo.toml | 33 ++ frame/nomination-pools/fuzzer/src/call.rs | 353 ++++++++++++++++++++ frame/nomination-pools/src/lib.rs | 167 +++++----- frame/nomination-pools/src/mock.rs | 23 +- frame/nomination-pools/src/tests.rs | 379 +++------------------- 9 files changed, 562 insertions(+), 436 deletions(-) create mode 100644 frame/nomination-pools/fuzzer/Cargo.toml create mode 100644 frame/nomination-pools/fuzzer/src/call.rs diff --git a/Cargo.lock b/Cargo.lock index 9fc220e099a5c..a13768a64a4ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2902,13 +2902,14 @@ dependencies = [ [[package]] name = "honggfuzz" -version = "0.5.54" +version = "0.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" +checksum = "848e9c511092e0daa0a35a63e8e6e475a3e8f870741448b9f6028d69b142f18e" dependencies = [ "arbitrary", "lazy_static", - "memmap", + "memmap2", + "rustc_version 0.4.0", ] [[package]] @@ -4135,16 +4136,6 @@ dependencies = [ "rustix", ] -[[package]] -name = "memmap" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "memmap2" version = "0.5.0" @@ -5735,7 +5726,6 @@ dependencies = [ "log", "pallet-balances", "parity-scale-codec", - "rand 0.8.5", "scale-info", "sp-core", "sp-io", @@ -5769,6 +5759,21 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-nomination-pools-fuzzer" +version = "2.0.0" +dependencies = [ + "frame-support", + "frame-system", + "honggfuzz", + "log", + "pallet-nomination-pools", + "rand 0.8.5", + "sp-io", + "sp-runtime", + "sp-tracing", +] + [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 45aa5f9854d25..4ba2663cdb409 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "frame/preimage", "frame/proxy", "frame/nomination-pools", + "frame/nomination-pools/fuzzer", "frame/nomination-pools/benchmarking", "frame/nomination-pools/test-staking", "frame/nomination-pools/runtime-api", diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 67976cbaf06ac..d3085152eba6c 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -477,7 +477,7 @@ pub mod pallet { VestingBalance, /// Account liquidity restrictions prevent withdrawal LiquidityRestrictions, - /// Balance too low to send value + /// Balance too low to send value. InsufficientBalance, /// Value too low to create account due to existential deposit ExistentialDeposit, diff --git a/frame/nomination-pools/Cargo.toml b/frame/nomination-pools/Cargo.toml index 459ee5f440a12..2db0b234b726d 100644 --- a/frame/nomination-pools/Cargo.toml +++ b/frame/nomination-pools/Cargo.toml @@ -26,13 +26,17 @@ sp-core = { version = "6.0.0", default-features = false, path = "../../primitive sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } log = { version = "0.4.0", default-features = false } +# Optional: usef for testing and/or fuzzing +pallet-balances = { version = "4.0.0-dev", path = "../balances", optional = true } +sp-tracing = { version = "5.0.0", path = "../../primitives/tracing", optional = true } + [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } -rand = { version = "0.8.5", features = ["small_rng"] } [features] default = ["std"] +fuzzing = ["pallet-balances", "sp-tracing"] std = [ "codec/std", "scale-info/std", @@ -51,4 +55,3 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime" ] -fuzz-test = [] diff --git a/frame/nomination-pools/fuzzer/Cargo.toml b/frame/nomination-pools/fuzzer/Cargo.toml new file mode 100644 index 0000000000000..7dde8733e3f60 --- /dev/null +++ b/frame/nomination-pools/fuzzer/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-nomination-pools-fuzzer" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Fuzzer for fixed point arithmetic primitives." +documentation = "https://docs.rs/sp-arithmetic-fuzzer" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +honggfuzz = "0.5.54" + +pallet-nomination-pools = { path = "..", features = ["fuzzing"] } + +frame-system = { path = "../../system" } +frame-support = { path = "../../support" } + +sp-runtime = { path = "../../../primitives/runtime" } +sp-io = { path = "../../../primitives/io" } +sp-tracing = { path = "../../../primitives/tracing" } + +rand = { version = "0.8.5", features = ["small_rng"] } +log = "0.4.17" + +[[bin]] +name = "call" +path = "src/call.rs" diff --git a/frame/nomination-pools/fuzzer/src/call.rs b/frame/nomination-pools/fuzzer/src/call.rs new file mode 100644 index 0000000000000..b07903609e8ab --- /dev/null +++ b/frame/nomination-pools/fuzzer/src/call.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run call`. `honggfuzz` CLI +//! options can be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/call/*.fuzz`. + +use frame_support::{ + assert_ok, + traits::{Currency, GetCallName, UnfilteredDispatchable}, +}; +use honggfuzz::fuzz; +use pallet_nomination_pools::{ + log, + mock::*, + pallet as pools, + pallet::{BondedPools, Call as PoolsCall, Event as PoolsEvents, PoolMembers}, + BondExtra, BondedPool, LastPoolId, MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, + MinCreateBond, MinJoinBond, PoolId, +}; +use rand::{seq::SliceRandom, Rng}; +use sp_runtime::{assert_eq_error_rate, Perquintill}; + +const ERA: BlockNumber = 1000; +const MAX_ED_MULTIPLE: Balance = 10_000; +const MIN_ED_MULTIPLE: Balance = 10; + +// not quite elegant, just to make it available in random_signed_origin. +const REWARD_AGENT_ACCOUNT: AccountId = 42; + +/// Grab random accounts, either known ones, or new ones. +fn random_signed_origin(rng: &mut R) -> (RuntimeOrigin, AccountId) { + let count = PoolMembers::::count(); + if rng.gen::() && count > 0 { + // take an existing account. + let skip = rng.gen_range(0..count as usize); + + // this is tricky: the account might be our reward agent, which we never want to be + // randomly chosen here. Try another one, or, if it is only our agent, return a random + // one nonetheless. + let candidate = PoolMembers::::iter_keys().skip(skip).take(1).next().unwrap(); + let acc = + if candidate == REWARD_AGENT_ACCOUNT { rng.gen::() } else { candidate }; + + (RuntimeOrigin::signed(acc), acc) + } else { + // create a new account + let acc = rng.gen::(); + (RuntimeOrigin::signed(acc), acc) + } +} + +fn random_ed_multiple(rng: &mut R) -> Balance { + let multiple = rng.gen_range(MIN_ED_MULTIPLE..MAX_ED_MULTIPLE); + ExistentialDeposit::get() * multiple +} + +fn fund_account(rng: &mut R, account: &AccountId) { + let target_amount = random_ed_multiple(rng); + if let Some(top_up) = target_amount.checked_sub(Balances::free_balance(account)) { + let _ = Balances::deposit_creating(account, top_up); + } + assert!(Balances::free_balance(account) >= target_amount); +} + +fn random_existing_pool(mut rng: &mut R) -> Option { + BondedPools::::iter_keys().collect::>().choose(&mut rng).map(|x| *x) +} + +fn random_call(mut rng: &mut R) -> (pools::Call, RuntimeOrigin) { + let op = rng.gen::(); + let mut op_count = as GetCallName>::get_call_names().len(); + // Exclude set_state, set_metadata, set_configs, update_roles and chill. + op_count -= 5; + + match op % op_count { + 0 => { + // join + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let (origin, who) = random_signed_origin(&mut rng); + fund_account(&mut rng, &who); + let amount = random_ed_multiple(&mut rng); + (PoolsCall::::join { amount, pool_id }, origin) + }, + 1 => { + // bond_extra + let (origin, who) = random_signed_origin(&mut rng); + let extra = if rng.gen::() { + BondExtra::Rewards + } else { + fund_account(&mut rng, &who); + let amount = random_ed_multiple(&mut rng); + BondExtra::FreeBalance(amount) + }; + (PoolsCall::::bond_extra { extra }, origin) + }, + 2 => { + // claim_payout + let (origin, _) = random_signed_origin(&mut rng); + (PoolsCall::::claim_payout {}, origin) + }, + 3 => { + // unbond + let (origin, who) = random_signed_origin(&mut rng); + let amount = random_ed_multiple(&mut rng); + (PoolsCall::::unbond { member_account: who, unbonding_points: amount }, origin) + }, + 4 => { + // pool_withdraw_unbonded + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let (origin, _) = random_signed_origin(&mut rng); + (PoolsCall::::pool_withdraw_unbonded { pool_id, num_slashing_spans: 0 }, origin) + }, + 5 => { + // withdraw_unbonded + let (origin, who) = random_signed_origin(&mut rng); + ( + PoolsCall::::withdraw_unbonded { member_account: who, num_slashing_spans: 0 }, + origin, + ) + }, + 6 => { + // create + let (origin, who) = random_signed_origin(&mut rng); + let amount = random_ed_multiple(&mut rng); + fund_account(&mut rng, &who); + let root = who; + let state_toggler = who; + let nominator = who; + (PoolsCall::::create { amount, root, state_toggler, nominator }, origin) + }, + 7 => { + // nominate + let (origin, _) = random_signed_origin(&mut rng); + let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); + let validators = Default::default(); + (PoolsCall::::nominate { pool_id, validators }, origin) + }, + _ => unreachable!(), + } +} + +#[derive(Default)] +struct RewardAgent { + who: AccountId, + pool_id: Option, + expected_reward: Balance, +} + +// TODO: inject some slashes into the game. +impl RewardAgent { + fn new(who: AccountId) -> Self { + Self { who, ..Default::default() } + } + + fn join(&mut self) { + if self.pool_id.is_some() { + return + } + let pool_id = LastPoolId::::get(); + let amount = 10 * ExistentialDeposit::get(); + let origin = RuntimeOrigin::signed(self.who); + let _ = Balances::deposit_creating(&self.who, 10 * amount); + self.pool_id = Some(pool_id); + log::info!(target: "reward-agent", "🤖 reward agent joining in {} with {}", pool_id, amount); + assert_ok!(PoolsCall::join:: { amount, pool_id }.dispatch_bypass_filter(origin)); + } + + fn claim_payout(&mut self) { + // 10 era later, we claim our payout. We expect our income to be roughly what we + // calculated. + if !PoolMembers::::contains_key(&self.who) { + log!(warn, "reward agent is not in the pool yet, cannot claim"); + return + } + let pre = Balances::free_balance(&42); + let origin = RuntimeOrigin::signed(42); + assert_ok!(PoolsCall::::claim_payout {}.dispatch_bypass_filter(origin)); + let post = Balances::free_balance(&42); + + let income = post - pre; + log::info!( + target: "reward-agent", "🤖 CLAIM: actual: {}, expected: {}", + income, + self.expected_reward, + ); + assert_eq_error_rate!(income, self.expected_reward, 10); + self.expected_reward = 0; + } +} + +fn main() { + let mut reward_agent = RewardAgent::new(REWARD_AGENT_ACCOUNT); + sp_tracing::try_init_simple(); + let mut ext = sp_io::TestExternalities::new_empty(); + let mut events_histogram = Vec::<(PoolsEvents, u32)>::default(); + let mut iteration = 0 as BlockNumber; + let mut ok = 0; + let mut err = 0; + + let dot: Balance = (10 as Balance).pow(10); + ExistentialDeposit::set(dot); + BondingDuration::set(8); + + ext.execute_with(|| { + MaxPoolMembers::::set(Some(10_000)); + MaxPoolMembersPerPool::::set(Some(1000)); + MaxPools::::set(Some(1_000)); + + MinCreateBond::::set(10 * ExistentialDeposit::get()); + MinJoinBond::::set(5 * ExistentialDeposit::get()); + System::set_block_number(1); + }); + + loop { + fuzz!(|seed: [u8; 32]| { + use ::rand::{rngs::SmallRng, SeedableRng}; + let mut rng = SmallRng::from_seed(seed); + + ext.execute_with(|| { + let (call, origin) = random_call(&mut rng); + let outcome = call.clone().dispatch_bypass_filter(origin.clone()); + iteration += 1; + match outcome { + Ok(_) => ok += 1, + Err(_) => err += 1, + }; + + log!( + trace, + "iteration {}, call {:?}, origin {:?}, outcome: {:?}, so far {} ok {} err", + iteration, + call, + origin, + outcome, + ok, + err, + ); + + // possibly join the reward_agent + if iteration > ERA / 2 && BondedPools::::count() > 0 { + reward_agent.join(); + } + // and possibly roughly every 4 era, trigger payout for the agent. Doing this more + // frequent is also harmless. + if rng.gen_range(0..(4 * ERA)) == 0 { + reward_agent.claim_payout(); + } + + // execute sanity checks at a fixed interval, possibly on every block. + if iteration % + (std::env::var("SANITY_CHECK_INTERVAL") + .ok() + .and_then(|x| x.parse::().ok())) + .unwrap_or(1) == 0 + { + log!(info, "running sanity checks at {}", iteration); + Pools::do_try_state(u8::MAX).unwrap(); + } + + // collect and reset events. + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let pallet_nomination_pools::mock::RuntimeEvent::Pools(inner) = e { + Some(inner) + } else { + None + } + }) + .for_each(|e| { + if let Some((_, c)) = events_histogram + .iter_mut() + .find(|(x, _)| std::mem::discriminant(x) == std::mem::discriminant(&e)) + { + *c += 1; + } else { + events_histogram.push((e, 1)) + } + }); + System::reset_events(); + + // trigger an era change, and check the status of the reward agent. + if iteration % ERA == 0 { + CurrentEra::mutate(|c| *c += 1); + BondedPools::::iter().for_each(|(id, _)| { + let amount = random_ed_multiple(&mut rng); + let _ = + Balances::deposit_creating(&Pools::create_reward_account(id), amount); + // if we just paid out the reward agent, let's calculate how much we expect + // our reward agent to have earned. + if reward_agent.pool_id.map_or(false, |mid| mid == id) { + let all_points = BondedPool::::get(id).map(|p| p.points).unwrap(); + let member_points = + PoolMembers::::get(reward_agent.who).map(|m| m.points).unwrap(); + let agent_share = Perquintill::from_rational(member_points, all_points); + log::info!( + target: "reward-agent", + "🤖 REWARD = amount = {:?}, ratio: {:?}, share {:?}", + amount, + agent_share, + agent_share * amount, + ); + reward_agent.expected_reward += agent_share * amount; + } + }); + + log!( + info, + "iteration {}, {} pools, {} members, {} ok {} err, events = {:?}", + iteration, + BondedPools::::count(), + PoolMembers::::count(), + ok, + err, + events_histogram + .iter() + .map(|(x, c)| ( + format!("{:?}", x) + .split(" ") + .map(|x| x.to_string()) + .collect::>() + .first() + .cloned() + .unwrap(), + c, + )) + .collect::>(), + ); + } + }) + }) + } +} diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 3661a7f70b48a..9ca9539b3dca8 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -24,9 +24,10 @@ //! //! * [Key terms](#key-terms) //! * [Usage](#usage) +//! * [Implementor's Guide](#implementors-guide) //! * [Design](#design) //! -//! ## Key terms +//! ## Key Terms //! //! * pool id: A unique identifier of each pool. Set to u32. //! * bonded pool: Tracks the distribution of actively staked funds. See [`BondedPool`] and @@ -88,7 +89,11 @@ //! in the aforementioned range of eras will be affected by the slash. A member is slashed pro-rata //! based on its stake relative to the total slash amount. //! -//! For design docs see the [slashing](#slashing) section. +//! Slashing does not change any single member's balance. Instead, the slash will only reduce the +//! balance associated with a particular pool. But, we never change the total *points* of a pool +//! because of slashing. Therefore, when a slash happens, the ratio of points to balance changes in +//! a pool. In other words, the value of one point, which is initially 1-to-1 against a unit of +//! balance, is now less than one balance because of the slash. //! //! ### Administration //! @@ -96,6 +101,10 @@ //! user must call [`Call::nominate`] to start nominating. [`Call::nominate`] can be called at //! anytime to update validator selection. //! +//! Similar to [`Call::nominate`], [`Call::chill`] will chill to pool in the staking system, and +//! [`Call::pool_withdraw_unbonded`] will withdraw any unbonding chunks of the pool bonded account. +//! The latter call is permissionless and can be called by anyone at any time. +//! //! To help facilitate pool administration the pool has one of three states (see [`PoolState`]): //! //! * Open: Anyone can join the pool and no members can be permissionlessly removed. @@ -121,10 +130,52 @@ //! //! 1. First, all members need to fully unbond and withdraw. If the pool state is set to //! `Destroying`, this can happen permissionlessly. -//! 2. The depositor itself fully unbonds and withdraws. Note that at this point, based on the -//! requirements of the staking system, the pool's bonded account's stake might not be able to ge -//! below a certain threshold as a nominator. At this point, the pool should `chill` itself to -//! allow the depositor to leave. +//! 2. The depositor itself fully unbonds and withdraws. +//! +//! > Note that at this point, based on the requirements of the staking system, the pool's bonded +//! > account's stake might not be able to ge below a certain threshold as a nominator. At this +//! > point, the pool should `chill` itself to allow the depositor to leave. See [`Call::chill`]. +//! +//! ## Implementor's Guide +//! +//! Some notes and common mistakes that wallets/apps wishing to implement this pallet should be +//! aware of: +//! +//! +//! ### Pool Members +//! +//! * In general, whenever a pool member changes their total point, the chain will automatically +//! claim all their pending rewards for them. This is not optional, and MUST happen for the reward +//! calculation to remain correct (see the documentation of `bond` as an example). So, make sure +//! you are warning your users about it. They might be surprised if they see that they bonded an +//! extra 100 DOTs, and now suddenly their 5.23 DOTs in pending reward is gone. It is not gone, it +//! has been paid out to you! +//! * Joining a pool implies transferring funds to the pool account. So it might be (based on which +//! wallet that you are using) that you no longer see the funds that are moved to the pool in your +//! “free balance” section. Make sure the user is aware of this, and not surprised by seeing this. +//! Also, the transfer that happens here is configured to to never accidentally destroy the sender +//! account. So to join a Pool, your sender account must remain alive with 1 DOT left in it. This +//! means, with 1 DOT as existential deposit, and 1 DOT as minimum to join a pool, you need at +//! least 2 DOT to join a pool. Consequently, if you are suggesting members to join a pool with +//! “Maximum possible value”, you must subtract 1 DOT to remain in the sender account to not +//! accidentally kill it. +//! * Points and balance are not the same! Any pool member, at any point in time, can have points in +//! either the bonded pool or any of the unbonding pools. The crucial fact is that in any of these +//! pools, the ratio of point to balance is different and might not be 1. Each pool starts with a +//! ratio of 1, but as time goes on, for reasons such as slashing, the ratio gets broken. Over +//! time, 100 points in a bonded pool can be worth 90 DOTs. Make sure you are either representing +//! points as points (not as DOTs), or even better, always display both: “You have x points in +//! pool y which is worth z DOTs”. See here and here for examples of how to calculate point to +//! balance ratio of each pool (it is almost trivial ;)) +//! +//! ### Pool Management +//! +//! * The pool will be seen from the perspective of the rest of the system as a single nominator. +//! Ergo, This nominator must always respect the `staking.minNominatorBond` limit. Similar to a +//! normal nominator, who has to first `chill` before fully unbonding, the pool must also do the +//! same. The pool’s bonded account will be fully unbonded only when the depositor wants to leave +//! and dismantle the pool. All that said, the message is: the depositor can only leave the chain +//! when they chill the pool first. //! //! ## Design //! @@ -277,14 +328,13 @@ use frame_support::{ Currency, Defensive, DefensiveOption, DefensiveResult, DefensiveSaturating, ExistenceRequirement, Get, }, - transactional, CloneNoBound, DefaultNoBound, RuntimeDebugNoBound, + DefaultNoBound, }; use scale_info::TypeInfo; use sp_core::U256; use sp_runtime::{ traits::{ - AccountIdConversion, Bounded, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, - Zero, + AccountIdConversion, CheckedAdd, CheckedSub, Convert, Saturating, StaticLookup, Zero, }, FixedPointNumber, }; @@ -299,14 +349,14 @@ pub const LOG_TARGET: &'static str = "runtime::nomination-pools"; macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { log::$level!( - target: crate::LOG_TARGET, + target: $crate::LOG_TARGET, concat!("[{:?}] 🏊‍♂️ ", $patter), >::block_number() $(, $values)* ) }; } -#[cfg(test)] -mod mock; +#[cfg(any(test, feature = "fuzzing"))] +pub mod mock; #[cfg(test)] mod tests; @@ -322,8 +372,6 @@ pub type BalanceOf = /// Type used for unique identifier of each pool. pub type PoolId = u32; -type UnbondingPoolsWithEra = BoundedBTreeMap, TotalUnbondingPools>; - type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; pub const POINTS_TO_BALANCE_INIT_RATIO: u32 = 1; @@ -398,16 +446,12 @@ impl PoolMember { // // rc * 10^20 / 10^18 = rc * 100 // - // meaning that as long as reward_counter's value is less than 1/100th of its max capacity - // (u128::MAX_VALUE), `checked_mul_int` won't saturate. - // - // given the nature of reward counter being 'pending_rewards / pool_total_point', the only - // (unrealistic) way that super high values can be achieved is for a pool to suddenly - // receive massive rewards with a very very small amount of stake. In all normal pools, as - // the points increase, so does the rewards. Moreover, as long as rewards are not - // accumulated for astronomically large durations, - // `current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)` - // won't be extremely big. + // the implementation of `multiply_by_rational_with_rounding` shows that it will only fail + // if the final division is not enough to fit in u128. In other words, if `rc * 100` is more + // than u128::max. Given that RC is interpreted as reward per unit of point, and unit of + // point is equal to balance (normally), and rewards are usually a proportion of the points + // in the pool, the likelihood of rc reaching near u128::MAX is near impossible. + (current_reward_counter.defensive_saturating_sub(self.last_recorded_reward_counter)) .checked_mul_int(self.active_points()) .ok_or(Error::::OverflowRisk) @@ -417,7 +461,7 @@ impl PoolMember { /// /// This is derived from the ratio of points in the pool to which the member belongs to. /// Might return different values based on the pool state for the same member and points. - fn active_stake(&self) -> BalanceOf { + fn active_balance(&self) -> BalanceOf { if let Some(pool) = BondedPool::::get(self.pool_id).defensive() { pool.points_to_balance(self.points) } else { @@ -594,7 +638,7 @@ impl BondedPool { } /// Get [`Self`] from storage. Returns `None` if no entry for `pool_account` exists. - fn get(id: PoolId) -> Option { + pub fn get(id: PoolId) -> Option { BondedPools::::try_get(id).ok().map(|inner| Self { id, inner }) } @@ -734,7 +778,7 @@ impl BondedPool { /// Whether or not the pool is ok to be in `PoolSate::Open`. If this returns an `Err`, then the /// pool is unrecoverable and should be in the destroying state. - fn ok_to_be_open(&self, new_funds: BalanceOf) -> Result<(), DispatchError> { + fn ok_to_be_open(&self) -> Result<(), DispatchError> { ensure!(!self.is_destroying(), Error::::CanNotChangeState); let bonded_balance = @@ -755,12 +799,6 @@ impl BondedPool { points_to_balance_ratio_floor < max_points_to_balance.into(), Error::::OverflowRisk ); - // while restricting the balance to `max_points_to_balance` of max total issuance, - let next_bonded_balance = bonded_balance.saturating_add(new_funds); - ensure!( - next_bonded_balance < BalanceOf::::max_value().div(max_points_to_balance.into()), - Error::::OverflowRisk - ); // then we can be decently confident the bonding pool points will not overflow // `BalanceOf`. Note that these are just heuristics. @@ -769,9 +807,9 @@ impl BondedPool { } /// Check that the pool can accept a member with `new_funds`. - fn ok_to_join(&self, new_funds: BalanceOf) -> Result<(), DispatchError> { + fn ok_to_join(&self) -> Result<(), DispatchError> { ensure!(self.state == PoolState::Open, Error::::NotOpen); - self.ok_to_be_open(new_funds)?; + self.ok_to_be_open()?; Ok(()) } @@ -791,7 +829,7 @@ impl BondedPool { target_member.active_points().saturating_sub(unbonding_points); let mut target_member_after_unbond = (*target_member).clone(); target_member_after_unbond.points = new_depositor_points; - target_member_after_unbond.active_stake() + target_member_after_unbond.active_balance() }; // any partial unbonding is only ever allowed if this unbond is permissioned. @@ -1073,7 +1111,7 @@ pub struct SubPools { /// older then `current_era - TotalUnbondingPools`. no_era: UnbondPool, /// Map of era in which a pool becomes unbonded in => unbond pools. - with_era: UnbondingPoolsWithEra, + with_era: BoundedBTreeMap, TotalUnbondingPools>, } impl SubPools { @@ -1105,7 +1143,7 @@ impl SubPools { } /// The sum of all unbonding balance, regardless of whether they are actually unlocked or not. - #[cfg(any(feature = "try-runtime", test, debug_assertions))] + #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] fn sum_unbonding_balance(&self) -> BalanceOf { self.no_era.balance.saturating_add( self.with_era @@ -1122,8 +1160,8 @@ pub struct TotalUnbondingPools(PhantomData); impl Get for TotalUnbondingPools { fn get() -> u32 { // NOTE: this may be dangerous in the scenario bonding_duration gets decreased because - // we would no longer be able to decode `UnbondingPoolsWithEra`, which uses - // `TotalUnbondingPools` as the bound + // we would no longer be able to decode `BoundedBTreeMap::, + // TotalUnbondingPools>`, which uses `TotalUnbondingPools` as the bound T::Staking::bonding_duration() + T::PostUnbondingPoolsWindow::get() } } @@ -1469,7 +1507,6 @@ pub mod pallet { /// `existential deposit + amount` in their account. /// * Only a pool with [`PoolState::Open`] can be joined #[pallet::weight(T::WeightInfo::join())] - #[transactional] pub fn join( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1482,7 +1519,7 @@ pub mod pallet { ensure!(!PoolMembers::::contains_key(&who), Error::::AccountBelongsToOtherPool); let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - bonded_pool.ok_to_join(amount)?; + bonded_pool.ok_to_join()?; let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; @@ -1530,7 +1567,6 @@ pub mod pallet { T::WeightInfo::bond_extra_transfer() .max(T::WeightInfo::bond_extra_reward()) )] - #[transactional] pub fn bond_extra(origin: OriginFor, extra: BondExtra>) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1538,10 +1574,6 @@ pub mod pallet { // payout related stuff: we must claim the payouts, and updated recorded payout data // before updating the bonded pool points, similar to that of `join` transaction. reward_pool.update_records(bonded_pool.id, bonded_pool.points)?; - // TODO: optimize this to not touch the free balance of `who ` at all in benchmarks. - // Currently, bonding rewards is like a batch. In the same PR, also make this function - // take a boolean argument that make it either 100% pure (no storage update), or make it - // also emit event and do the transfer. #11671 let claimed = Self::do_reward_payout(&who, &mut member, &mut bonded_pool, &mut reward_pool)?; @@ -1552,8 +1584,9 @@ pub mod pallet { (bonded_pool.try_bond_funds(&who, claimed, BondType::Later)?, claimed), }; - bonded_pool.ok_to_be_open(bonded)?; - member.points = member.points.saturating_add(points_issued); + bonded_pool.ok_to_be_open()?; + member.points = + member.points.checked_add(&points_issued).ok_or(Error::::OverflowRisk)?; Self::deposit_event(Event::::Bonded { member: who.clone(), @@ -1573,7 +1606,6 @@ pub mod pallet { /// The member will earn rewards pro rata based on the members stake vs the sum of the /// members in the pools stake. Rewards do not "expire". #[pallet::weight(T::WeightInfo::claim_payout())] - #[transactional] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&who)?; @@ -1613,7 +1645,6 @@ pub mod pallet { /// there are too many unlocking chunks, the result of this call will likely be the /// `NoMoreChunks` error from the staking system. #[pallet::weight(T::WeightInfo::unbond())] - #[transactional] pub fn unbond( origin: OriginFor, member_account: AccountIdLookupOf, @@ -1689,7 +1720,6 @@ pub mod pallet { /// would probably see an error like `NoMoreChunks` emitted from the staking system when /// they attempt to unbond. #[pallet::weight(T::WeightInfo::pool_withdraw_unbonded(*num_slashing_spans))] - #[transactional] pub fn pool_withdraw_unbonded( origin: OriginFor, pool_id: PoolId, @@ -1726,7 +1756,6 @@ pub mod pallet { #[pallet::weight( T::WeightInfo::withdraw_unbonded_kill(*num_slashing_spans) )] - #[transactional] pub fn withdraw_unbonded( origin: OriginFor, member_account: AccountIdLookupOf, @@ -1749,7 +1778,7 @@ pub mod pallet { let withdrawn_points = member.withdraw_unlocked(current_era); ensure!(!withdrawn_points.is_empty(), Error::::CannotWithdrawAny); - // Before calculate the `balance_to_unbond`, with call withdraw unbonded to ensure the + // Before calculating the `balance_to_unbond`, we call withdraw unbonded to ensure the // `transferrable_balance` is correct. let stash_killed = T::Staking::withdraw_unbonded(bonded_pool.bonded_account(), num_slashing_spans)?; @@ -1778,13 +1807,13 @@ pub mod pallet { accumulator.saturating_add(sub_pools.no_era.dissolve(*unlocked_points)) } }) - // A call to this function may cause the pool's stash to get dusted. If this happens - // before the last member has withdrawn, then all subsequent withdraws will be 0. - // However the unbond pools do no get updated to reflect this. In the aforementioned - // scenario, this check ensures we don't try to withdraw funds that don't exist. - // This check is also defensive in cases where the unbond pool does not update its - // balance (e.g. a bug in the slashing hook.) We gracefully proceed in order to - // ensure members can leave the pool and it can be destroyed. + // A call to this transaction may cause the pool's stash to get dusted. If this + // happens before the last member has withdrawn, then all subsequent withdraws will + // be 0. However the unbond pools do no get updated to reflect this. In the + // aforementioned scenario, this check ensures we don't try to withdraw funds that + // don't exist. This check is also defensive in cases where the unbond pool does not + // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in + // order to ensure members can leave the pool and it can be destroyed. .min(bonded_pool.transferrable_balance()); T::Currency::transfer( @@ -1846,7 +1875,6 @@ pub mod pallet { /// In addition to `amount`, the caller will transfer the existential deposit; so the caller /// needs at have at least `amount + existential_deposit` transferrable. #[pallet::weight(T::WeightInfo::create())] - #[transactional] pub fn create( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1871,7 +1899,6 @@ pub mod pallet { /// same as `create` with the inclusion of /// * `pool_id` - `A valid PoolId. #[pallet::weight(T::WeightInfo::create())] - #[transactional] pub fn create_with_pool_id( origin: OriginFor, #[pallet::compact] amount: BalanceOf, @@ -1896,7 +1923,6 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::nominate(validators.len() as u32))] - #[transactional] pub fn nominate( origin: OriginFor, pool_id: PoolId, @@ -1919,7 +1945,6 @@ pub mod pallet { /// 2. if the pool conditions to be open are NOT met (as described by `ok_to_be_open`), and /// then the state of the pool can be permissionlessly changed to `Destroying`. #[pallet::weight(T::WeightInfo::set_state())] - #[transactional] pub fn set_state( origin: OriginFor, pool_id: PoolId, @@ -1931,9 +1956,7 @@ pub mod pallet { if bonded_pool.can_toggle_state(&who) { bonded_pool.set_state(state); - } else if bonded_pool.ok_to_be_open(Zero::zero()).is_err() && - state == PoolState::Destroying - { + } else if bonded_pool.ok_to_be_open().is_err() && state == PoolState::Destroying { // If the pool has bad properties, then anyone can set it as destroying bonded_pool.set_state(PoolState::Destroying); } else { @@ -1950,7 +1973,6 @@ pub mod pallet { /// The dispatch origin of this call must be signed by the state toggler, or the root role /// of the pool. #[pallet::weight(T::WeightInfo::set_metadata(metadata.len() as u32))] - #[transactional] pub fn set_metadata( origin: OriginFor, pool_id: PoolId, @@ -1982,7 +2004,6 @@ pub mod pallet { /// * `max_members` - Set [`MaxPoolMembers`]. /// * `max_members_per_pool` - Set [`MaxPoolMembersPerPool`]. #[pallet::weight(T::WeightInfo::set_configs())] - #[transactional] pub fn set_configs( origin: OriginFor, min_join_bond: ConfigOp>, @@ -2019,7 +2040,6 @@ pub mod pallet { /// It emits an event, notifying UIs of the role change. This event is quite relevant to /// most pool members and they should be informed of changes to pool roles. #[pallet::weight(T::WeightInfo::update_roles())] - #[transactional] pub fn update_roles( origin: OriginFor, pool_id: PoolId, @@ -2072,7 +2092,6 @@ pub mod pallet { /// This directly forward the call to the staking pallet, on behalf of the pool bonded /// account. #[pallet::weight(T::WeightInfo::chill())] - #[transactional] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; @@ -2297,6 +2316,8 @@ impl Pallet { &bonded_pool.reward_account(), &member_account, pending_rewards, + // defensive: the depositor has put existential deposit into the pool and it stays + // untouched, reward account shall not die. ExistenceRequirement::AllowDeath, )?; @@ -2414,7 +2435,7 @@ impl Pallet { /// To cater for tests that want to escape parts of these checks, this function is split into /// multiple `level`s, where the higher the level, the more checks we performs. So, /// `try_state(255)` is the strongest sanity check, and `0` performs no checks. - #[cfg(any(feature = "try-runtime", test, debug_assertions))] + #[cfg(any(feature = "try-runtime", feature = "fuzzing", test, debug_assertions))] pub fn do_try_state(level: u8) -> Result<(), &'static str> { if level.is_zero() { return Ok(()) diff --git a/frame/nomination-pools/src/mock.rs b/frame/nomination-pools/src/mock.rs index e1af456e31fd4..99d521df3241b 100644 --- a/frame/nomination-pools/src/mock.rs +++ b/frame/nomination-pools/src/mock.rs @@ -259,44 +259,45 @@ impl Default for ExtBuilder { } } +#[cfg_attr(feature = "fuzzing", allow(dead_code))] impl ExtBuilder { // Add members to pool 0. - pub(crate) fn add_members(mut self, members: Vec<(AccountId, Balance)>) -> Self { + pub fn add_members(mut self, members: Vec<(AccountId, Balance)>) -> Self { self.members = members; self } - pub(crate) fn ed(self, ed: Balance) -> Self { + pub fn ed(self, ed: Balance) -> Self { ExistentialDeposit::set(ed); self } - pub(crate) fn min_bond(self, min: Balance) -> Self { + pub fn min_bond(self, min: Balance) -> Self { StakingMinBond::set(min); self } - pub(crate) fn min_join_bond(self, min: Balance) -> Self { + pub fn min_join_bond(self, min: Balance) -> Self { MinJoinBondConfig::set(min); self } - pub(crate) fn with_check(self, level: u8) -> Self { + pub fn with_check(self, level: u8) -> Self { CheckLevel::set(level); self } - pub(crate) fn max_members(mut self, max: Option) -> Self { + pub fn max_members(mut self, max: Option) -> Self { self.max_members = max; self } - pub(crate) fn max_members_per_pool(mut self, max: Option) -> Self { + pub fn max_members_per_pool(mut self, max: Option) -> Self { self.max_members_per_pool = max; self } - pub(crate) fn build(self) -> sp_io::TestExternalities { + pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -339,7 +340,7 @@ impl ExtBuilder { } } -pub(crate) fn unsafe_set_state(pool_id: PoolId, state: PoolState) { +pub fn unsafe_set_state(pool_id: PoolId, state: PoolState) { BondedPools::::try_mutate(pool_id, |maybe_bonded_pool| { maybe_bonded_pool.as_mut().ok_or(()).map(|bonded_pool| { bonded_pool.state = state; @@ -354,7 +355,7 @@ parameter_types! { } /// All events of this pallet. -pub(crate) fn pool_events_since_last_call() -> Vec> { +pub fn pool_events_since_last_call() -> Vec> { let events = System::events() .into_iter() .map(|r| r.event) @@ -366,7 +367,7 @@ pub(crate) fn pool_events_since_last_call() -> Vec> { } /// All events of the `Balances` pallet. -pub(crate) fn balances_events_since_last_call() -> Vec> { +pub fn balances_events_since_last_call() -> Vec> { let events = System::events() .into_iter() .map(|r| r.event) diff --git a/frame/nomination-pools/src/tests.rs b/frame/nomination-pools/src/tests.rs index aadd4871d737e..7d5d418bbf2c8 100644 --- a/frame/nomination-pools/src/tests.rs +++ b/frame/nomination-pools/src/tests.rs @@ -25,7 +25,7 @@ macro_rules! unbonding_pools_with_era { ($($k:expr => $v:expr),* $(,)?) => {{ use sp_std::iter::{Iterator, IntoIterator}; let not_bounded: BTreeMap<_, _> = Iterator::collect(IntoIterator::into_iter([$(($k, $v),)*])); - UnbondingPoolsWithEra::try_from(not_bounded).unwrap() + BoundedBTreeMap::, TotalUnbondingPools>::try_from(not_bounded).unwrap() }}; } @@ -213,31 +213,30 @@ mod bonded_pool { // Simulate a 100% slashed pool StakingMock::set_bonded_balance(pool.bonded_account(), 0); - assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); + assert_noop!(pool.ok_to_join(), Error::::OverflowRisk); // Simulate a slashed pool at `MaxPointsToBalance` + 1 slashed pool StakingMock::set_bonded_balance( pool.bonded_account(), max_points_to_balance.saturating_add(1).into(), ); - assert_ok!(pool.ok_to_join(0)); + assert_ok!(pool.ok_to_join()); // Simulate a slashed pool at `MaxPointsToBalance` StakingMock::set_bonded_balance(pool.bonded_account(), max_points_to_balance); - assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); + assert_noop!(pool.ok_to_join(), Error::::OverflowRisk); StakingMock::set_bonded_balance( pool.bonded_account(), Balance::MAX / max_points_to_balance, ); - // New bonded balance would be over threshold of Balance type - assert_noop!(pool.ok_to_join(0), Error::::OverflowRisk); + // and a sanity check StakingMock::set_bonded_balance( pool.bonded_account(), Balance::MAX / max_points_to_balance - 1, ); - assert_ok!(pool.ok_to_join(0)); + assert_ok!(pool.ok_to_join()); }); } } @@ -437,7 +436,7 @@ mod join { roles: DEFAULT_ROLES, }, }; - ExtBuilder::default().build_and_execute(|| { + ExtBuilder::default().with_check(0).build_and_execute(|| { // Given Balances::make_free_balance_be(&11, ExistentialDeposit::get() + 2); assert!(!PoolMembers::::contains_key(&11)); @@ -545,7 +544,7 @@ mod join { // Balance needs to be gt Balance::MAX / `MaxPointsToBalance` assert_noop!( Pools::join(RuntimeOrigin::signed(11), 5, 123), - Error::::OverflowRisk + pallet_balances::Error::::InsufficientBalance, ); StakingMock::set_bonded_balance(Pools::create_bonded_account(1), max_points_to_balance); @@ -4283,7 +4282,7 @@ mod set_state { fn set_state_works() { ExtBuilder::default().build_and_execute(|| { // Given - assert_ok!(BondedPool::::get(1).unwrap().ok_to_be_open(0)); + assert_ok!(BondedPool::::get(1).unwrap().ok_to_be_open()); // Only the root and state toggler can change the state when the pool is ok to be open. assert_noop!( @@ -4831,6 +4830,41 @@ mod reward_counter_precision { }) } + #[test] + fn massive_reward_in_small_pool() { + let tiny_bond = 1000 * DOT; + ExtBuilder::default().ed(DOT).min_bond(tiny_bond).build_and_execute(|| { + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id: 1 }, + Event::Bonded { member: 10, pool_id: 1, bonded: 10000000000000, joined: true } + ] + ); + + Balances::make_free_balance_be(&20, tiny_bond); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), tiny_bond / 2, 1)); + + // Suddenly, add a shit ton of rewards. + assert_ok!( + Balances::mutate_account(&default_reward_account(), |a| a.free += inflation(1)) + ); + + // now claim. + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(20))); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Bonded { member: 20, pool_id: 1, bonded: 5000000000000, joined: true }, + Event::PaidOut { member: 10, pool_id: 1, payout: 7333333333333333333 }, + Event::PaidOut { member: 20, pool_id: 1, payout: 3666666666666666666 } + ] + ); + }) + } + #[test] fn reward_counter_calc_wont_fail_in_normal_polkadot_future() { // create a pool that has roughly half of the polkadot issuance in 10 years. @@ -5066,328 +5100,3 @@ mod reward_counter_precision { }); } } - -// NOTE: run this with debug_assertions, but in release mode. -#[cfg(feature = "fuzz-test")] -mod fuzz_test { - use super::*; - use crate::pallet::{Call as PoolsCall, Event as PoolsEvents}; - use frame_support::traits::UnfilteredDispatchable; - use rand::{seq::SliceRandom, thread_rng, Rng}; - use sp_runtime::{assert_eq_error_rate, Perquintill}; - - const ERA: BlockNumber = 1000; - const MAX_ED_MULTIPLE: Balance = 10_000; - const MIN_ED_MULTIPLE: Balance = 10; - - // not quite elegant, just to make it available in random_signed_origin. - const REWARD_AGENT_ACCOUNT: AccountId = 42; - - /// Grab random accounts, either known ones, or new ones. - fn random_signed_origin(rng: &mut R) -> (RuntimeOrigin, AccountId) { - let count = PoolMembers::::count(); - if rng.gen::() && count > 0 { - // take an existing account. - let skip = rng.gen_range(0..count as usize); - - // this is tricky: the account might be our reward agent, which we never want to be - // randomly chosen here. Try another one, or, if it is only our agent, return a random - // one nonetheless. - let candidate = PoolMembers::::iter_keys().skip(skip).take(1).next().unwrap(); - let acc = - if candidate == REWARD_AGENT_ACCOUNT { rng.gen::() } else { candidate }; - - (RuntimeOrigin::signed(acc), acc) - } else { - // create a new account - let acc = rng.gen::(); - (RuntimeOrigin::signed(acc), acc) - } - } - - fn random_ed_multiple(rng: &mut R) -> Balance { - let multiple = rng.gen_range(MIN_ED_MULTIPLE..MAX_ED_MULTIPLE); - ExistentialDeposit::get() * multiple - } - - fn fund_account(rng: &mut R, account: &AccountId) { - let target_amount = random_ed_multiple(rng); - if let Some(top_up) = target_amount.checked_sub(Balances::free_balance(account)) { - let _ = Balances::deposit_creating(account, top_up); - } - assert!(Balances::free_balance(account) >= target_amount); - } - - fn random_existing_pool(mut rng: &mut R) -> Option { - BondedPools::::iter_keys().collect::>().choose(&mut rng).map(|x| *x) - } - - fn random_call(mut rng: &mut R) -> (crate::pallet::Call, RuntimeOrigin) { - let op = rng.gen::(); - let mut op_count = - as frame_support::dispatch::GetCallName>::get_call_names() - .len(); - // Exclude set_state, set_metadata, set_configs, update_roles and chill. - op_count -= 5; - - match op % op_count { - 0 => { - // join - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let (origin, who) = random_signed_origin(&mut rng); - fund_account(&mut rng, &who); - let amount = random_ed_multiple(&mut rng); - (PoolsCall::::join { amount, pool_id }, origin) - }, - 1 => { - // bond_extra - let (origin, who) = random_signed_origin(&mut rng); - let extra = if rng.gen::() { - BondExtra::Rewards - } else { - fund_account(&mut rng, &who); - let amount = random_ed_multiple(&mut rng); - BondExtra::FreeBalance(amount) - }; - (PoolsCall::::bond_extra { extra }, origin) - }, - 2 => { - // claim_payout - let (origin, _) = random_signed_origin(&mut rng); - (PoolsCall::::claim_payout {}, origin) - }, - 3 => { - // unbond - let (origin, who) = random_signed_origin(&mut rng); - let amount = random_ed_multiple(&mut rng); - (PoolsCall::::unbond { member_account: who, unbonding_points: amount }, origin) - }, - 4 => { - // pool_withdraw_unbonded - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let (origin, _) = random_signed_origin(&mut rng); - (PoolsCall::::pool_withdraw_unbonded { pool_id, num_slashing_spans: 0 }, origin) - }, - 5 => { - // withdraw_unbonded - let (origin, who) = random_signed_origin(&mut rng); - ( - PoolsCall::::withdraw_unbonded { - member_account: who, - num_slashing_spans: 0, - }, - origin, - ) - }, - 6 => { - // create - let (origin, who) = random_signed_origin(&mut rng); - let amount = random_ed_multiple(&mut rng); - fund_account(&mut rng, &who); - let root = who.clone(); - let state_toggler = who.clone(); - let nominator = who.clone(); - (PoolsCall::::create { amount, root, state_toggler, nominator }, origin) - }, - 7 => { - // nominate - let (origin, _) = random_signed_origin(&mut rng); - let pool_id = random_existing_pool(&mut rng).unwrap_or_default(); - let validators = Default::default(); - (PoolsCall::::nominate { pool_id, validators }, origin) - }, - _ => unreachable!(), - } - } - - #[derive(Default)] - struct RewardAgent { - who: AccountId, - pool_id: Option, - expected_reward: Balance, - } - - // TODO: inject some slashes into the game. - impl RewardAgent { - fn new(who: AccountId) -> Self { - Self { who, ..Default::default() } - } - - fn join(&mut self) { - if self.pool_id.is_some() { - return - } - let pool_id = LastPoolId::::get(); - let amount = 10 * ExistentialDeposit::get(); - let origin = RuntimeOrigin::signed(self.who); - let _ = Balances::deposit_creating(&self.who, 10 * amount); - self.pool_id = Some(pool_id); - log::info!(target: "reward-agent", "🤖 reward agent joining in {} with {}", pool_id, amount); - assert_ok!(PoolsCall::join:: { amount, pool_id }.dispatch_bypass_filter(origin)); - } - - fn claim_payout(&mut self) { - // 10 era later, we claim our payout. We expect our income to be roughly what we - // calculated. - if !PoolMembers::::contains_key(&self.who) { - log!(warn, "reward agent is not in the pool yet, cannot claim"); - return - } - let pre = Balances::free_balance(&42); - let origin = RuntimeOrigin::signed(42); - assert_ok!(PoolsCall::::claim_payout {}.dispatch_bypass_filter(origin)); - let post = Balances::free_balance(&42); - - let income = post - pre; - log::info!( - target: "reward-agent", "🤖 CLAIM: actual: {}, expected: {}", - income, - self.expected_reward, - ); - assert_eq_error_rate!(income, self.expected_reward, 10); - self.expected_reward = 0; - } - } - - #[test] - fn fuzz_test() { - let mut reward_agent = RewardAgent::new(42); - sp_tracing::try_init_simple(); - // NOTE: use this to get predictable (non)randomness: - // use::{rngs::SmallRng, SeedableRng}; - // let mut rng = SmallRng::from_seed([0u8; 32]); - let mut rng = thread_rng(); - let mut ext = sp_io::TestExternalities::new_empty(); - // NOTE: sadly events don't fulfill the requirements of hashmap or btreemap. - let mut events_histogram = Vec::<(PoolsEvents, u32)>::default(); - let mut iteration = 0 as BlockNumber; - let mut ok = 0; - let mut err = 0; - - ext.execute_with(|| { - MaxPoolMembers::::set(Some(10_000)); - MaxPoolMembersPerPool::::set(Some(1000)); - MaxPools::::set(Some(1_000)); - - MinCreateBond::::set(10 * ExistentialDeposit::get()); - MinJoinBond::::set(5 * ExistentialDeposit::get()); - System::set_block_number(1); - }); - - ExistentialDeposit::set(10u128.pow(12u32)); - BondingDuration::set(8); - - loop { - ext.execute_with(|| { - iteration += 1; - let (call, origin) = random_call(&mut rng); - let outcome = call.clone().dispatch_bypass_filter(origin.clone()); - - match outcome { - Ok(_) => ok += 1, - Err(_) => err += 1, - }; - - log!( - debug, - "iteration {}, call {:?}, origin {:?}, outcome: {:?}, so far {} ok {} err", - iteration, - call, - origin, - outcome, - ok, - err, - ); - - // possibly join the reward_agent - if iteration > ERA / 2 && BondedPools::::count() > 0 { - reward_agent.join(); - } - // and possibly roughly every 4 era, trigger payout for the agent. Doing this more - // frequent is also harmless. - if rng.gen_range(0..(4 * ERA)) == 0 { - reward_agent.claim_payout(); - } - - // execute sanity checks at a fixed interval, possibly on every block. - if iteration % - (std::env::var("SANITY_CHECK_INTERVAL") - .ok() - .and_then(|x| x.parse::().ok())) - .unwrap_or(1) == 0 - { - log!(info, "running sanity checks at {}", iteration); - Pools::do_try_state(u8::MAX).unwrap(); - } - - // collect and reset events. - System::events() - .into_iter() - .map(|r| r.event) - .filter_map( - |e| if let mock::Event::Pools(inner) = e { Some(inner) } else { None }, - ) - .for_each(|e| { - if let Some((_, c)) = events_histogram - .iter_mut() - .find(|(x, _)| std::mem::discriminant(x) == std::mem::discriminant(&e)) - { - *c += 1; - } else { - events_histogram.push((e, 1)) - } - }); - System::reset_events(); - - // trigger an era change, and check the status of the reward agent. - if iteration % ERA == 0 { - CurrentEra::mutate(|c| *c += 1); - BondedPools::::iter().for_each(|(id, _)| { - let amount = random_ed_multiple(&mut rng); - let _ = - Balances::deposit_creating(&Pools::create_reward_account(id), amount); - // if we just paid out the reward agent, let's calculate how much we expect - // our reward agent to have earned. - if reward_agent.pool_id.map_or(false, |mid| mid == id) { - let all_points = BondedPool::::get(id).map(|p| p.points).unwrap(); - let member_points = - PoolMembers::::get(reward_agent.who).map(|m| m.points).unwrap(); - let agent_share = Perquintill::from_rational(member_points, all_points); - log::info!( - target: "reward-agent", - "🤖 REWARD = amount = {:?}, ratio: {:?}, share {:?}", - amount, - agent_share, - agent_share * amount, - ); - reward_agent.expected_reward += agent_share * amount; - } - }); - - log!( - info, - "iteration {}, {} pools, {} members, {} ok {} err, events = {:?}", - iteration, - BondedPools::::count(), - PoolMembers::::count(), - ok, - err, - events_histogram - .iter() - .map(|(x, c)| ( - format!("{:?}", x) - .split(" ") - .map(|x| x.to_string()) - .collect::>() - .first() - .cloned() - .unwrap(), - c, - )) - .collect::>(), - ); - } - }); - } - } -} From 38a955ba4a0c967659e6c944c56c664fce0f8f23 Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 10 Nov 2022 21:23:56 +0900 Subject: [PATCH 341/348] Remove `sp_tasks::spawn` API and related code + host functions (#12639) * Remove `sp_tasks::spawn` API and related code * Remove `RuntimeTasks::{spawn, join}` host functions * remove unused * Remove a few more tests that I forgot to remove Co-authored-by: Shawn Tabrizi --- Cargo.lock | 30 --- Cargo.toml | 2 - client/executor/Cargo.toml | 1 - client/executor/runtime-test/Cargo.toml | 2 - client/executor/runtime-test/src/lib.rs | 37 --- client/executor/src/integration_tests/mod.rs | 27 -- client/executor/src/native_executor.rs | 167 +----------- client/executor/wasmtime/src/runtime.rs | 3 - frame/examples/parallel/Cargo.toml | 38 --- frame/examples/parallel/README.md | 7 - frame/examples/parallel/src/lib.rs | 148 ----------- frame/examples/parallel/src/tests.rs | 146 ----------- primitives/core/src/traits.rs | 6 - primitives/io/src/lib.rs | 35 +-- primitives/tasks/Cargo.toml | 36 --- primitives/tasks/README.md | 3 - primitives/tasks/src/async_externalities.rs | 212 --------------- primitives/tasks/src/lib.rs | 257 ------------------- 18 files changed, 10 insertions(+), 1147 deletions(-) delete mode 100644 frame/examples/parallel/Cargo.toml delete mode 100644 frame/examples/parallel/README.md delete mode 100644 frame/examples/parallel/src/lib.rs delete mode 100644 frame/examples/parallel/src/tests.rs delete mode 100644 primitives/tasks/Cargo.toml delete mode 100644 primitives/tasks/README.md delete mode 100644 primitives/tasks/src/async_externalities.rs delete mode 100644 primitives/tasks/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index a13768a64a4ab..f0471d019c1a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5461,21 +5461,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-example-parallel" -version = "3.0.0-dev" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-tasks", -] - [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" @@ -7988,7 +7973,6 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-state-machine", - "sp-tasks", "sp-trie", "sp-version", "sp-wasm-interface", @@ -8548,7 +8532,6 @@ dependencies = [ "sp-runtime", "sp-sandbox", "sp-std", - "sp-tasks", "substrate-wasm-builder", ] @@ -9960,19 +9943,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "sp-tasks" -version = "4.0.0-dev" -dependencies = [ - "log", - "parity-scale-codec", - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-std", -] - [[package]] name = "sp-test-primitives" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 4ba2663cdb409..ab8fbd816b004 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,7 +98,6 @@ members = [ "frame/election-provider-support/solution-type/fuzzer", "frame/examples/basic", "frame/examples/offchain-worker", - "frame/examples/parallel", "frame/executive", "frame/gilt", "frame/grandpa", @@ -205,7 +204,6 @@ members = [ "primitives/state-machine", "primitives/std", "primitives/storage", - "primitives/tasks", "primitives/test-primitives", "primitives/timestamp", "primitives/tracing", diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 1a6c281c77ff5..e48c27dfc998e 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -31,7 +31,6 @@ sp-externalities = { version = "0.12.0", path = "../../primitives/externalities" sp-io = { version = "6.0.0", path = "../../primitives/io" } sp-panic-handler = { version = "4.0.0", path = "../../primitives/panic-handler" } sp-runtime-interface = { version = "6.0.0", path = "../../primitives/runtime-interface" } -sp-tasks = { version = "4.0.0-dev", path = "../../primitives/tasks" } sp-trie = { version = "6.0.0", path = "../../primitives/trie" } sp-version = { version = "5.0.0", path = "../../primitives/version" } sp-wasm-interface = { version = "6.0.0", path = "../../primitives/wasm-interface" } diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 7a7848700c4c1..f90b2e1439a77 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,7 +19,6 @@ sp-io = { version = "6.0.0", default-features = false, features = ["improved_pan sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } -sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } @@ -32,5 +31,4 @@ std = [ "sp-runtime/std", "sp-sandbox/std", "sp-std/std", - "sp-tasks/std", ] diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 5a741f51c3899..0424ad418617b 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -318,24 +318,6 @@ sp_core::wasm_export_functions! { message_slice.copy_from_slice(test_message); } - fn test_spawn() { - let data = vec![1u8, 2u8]; - let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); - - assert_eq!(data_new, vec![2u8, 3u8]); - } - - fn test_nested_spawn() { - let data = vec![7u8, 13u8]; - let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); - - assert_eq!(data_new, vec![10u8, 16u8]); - } - - fn test_panic_in_spawned() { - sp_tasks::spawn(tasks::panicker, vec![]).join(); - } - fn test_return_i8() -> i8 { -66 } @@ -358,25 +340,6 @@ sp_core::wasm_export_functions! { } } -#[cfg(not(feature = "std"))] -mod tasks { - use sp_std::prelude::*; - - pub fn incrementer(data: Vec) -> Vec { - data.into_iter().map(|v| v + 1).collect() - } - - pub fn panicker(_: Vec) -> Vec { - panic!() - } - - pub fn parallel_incrementer(data: Vec) -> Vec { - let first = data.into_iter().map(|v| v + 2).collect::>(); - let second = sp_tasks::spawn(incrementer, first).join(); - second - } -} - /// A macro to define a test entrypoint for each available sandbox executor. macro_rules! wasm_export_sandbox_test_functions { ( diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 9ffb7f502f5c6..3217f9f96ca79 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -770,33 +770,6 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(len, 2); } -test_wasm_execution!(spawning_runtime_instance_should_work); -fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - call_in_wasm("test_spawn", &[], wasm_method, &mut ext).unwrap(); -} - -test_wasm_execution!(spawning_runtime_instance_nested_should_work); -fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - call_in_wasm("test_nested_spawn", &[], wasm_method, &mut ext).unwrap(); -} - -test_wasm_execution!(panic_in_spawned_instance_panics_on_joining_its_result); -fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - let error_result = - call_in_wasm("test_panic_in_spawned", &[], wasm_method, &mut ext).unwrap_err(); - - assert!(error_result.to_string().contains("Spawned task")); -} - test_wasm_execution!(memory_is_cleared_between_invocations); fn memory_is_cleared_between_invocations(wasm_method: WasmExecutionMethod) { // This is based on the code generated by compiling a runtime *without* diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index b164b427e306d..0eabffb8c87df 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -23,24 +23,18 @@ use crate::{ }; use std::{ - collections::HashMap, marker::PhantomData, panic::{AssertUnwindSafe, UnwindSafe}, path::PathBuf, - sync::{ - atomic::{AtomicU64, Ordering}, - mpsc, Arc, - }, + sync::Arc, }; use codec::Encode; use sc_executor_common::{ runtime_blob::RuntimeBlob, - wasm_runtime::{AllocationStats, InvokeMethod, WasmInstance, WasmModule}, + wasm_runtime::{AllocationStats, WasmInstance, WasmModule}, }; -use sp_core::traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}; -use sp_externalities::ExternalitiesExt as _; -use sp_tasks::new_async_externalities; +use sp_core::traits::{CodeExecutor, Externalities, RuntimeCode}; use sp_version::{GetNativeVersion, NativeVersion, RuntimeVersion}; use sp_wasm_interface::{ExtendedHostFunctions, HostFunctions}; @@ -277,11 +271,9 @@ where let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); - let module = AssertUnwindSafe(module); let mut allocation_stats_out = AssertUnwindSafe(allocation_stats_out); with_externalities_safe(&mut **ext, move || { - preregister_builtin_ext(module.clone()); let (result, allocation_stats) = instance.call_with_allocation_stats(export_name.into(), call_data); **allocation_stats_out = allocation_stats; @@ -349,16 +341,10 @@ where "Executing function", ); - let result = self.with_instance( - runtime_code, - ext, - |module, mut instance, _onchain_version, mut ext| { - with_externalities_safe(&mut **ext, move || { - preregister_builtin_ext(module.clone()); - instance.call_export(method, data) - }) - }, - ); + let result = + self.with_instance(runtime_code, ext, |_, mut instance, _onchain_version, mut ext| { + with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) + }); (result, false) } } @@ -451,138 +437,6 @@ impl GetNativeVersion for NativeElseWasmExecutor } } -/// Helper inner struct to implement `RuntimeSpawn` extension. -pub struct RuntimeInstanceSpawn { - module: Arc, - tasks: parking_lot::Mutex>>>, - counter: AtomicU64, - scheduler: Box, -} - -impl RuntimeSpawn for RuntimeInstanceSpawn { - fn spawn_call(&self, dispatcher_ref: u32, func: u32, data: Vec) -> u64 { - let new_handle = self.counter.fetch_add(1, Ordering::Relaxed); - - let (sender, receiver) = mpsc::channel(); - self.tasks.lock().insert(new_handle, receiver); - - let module = self.module.clone(); - let scheduler = self.scheduler.clone(); - self.scheduler.spawn( - "executor-extra-runtime-instance", - None, - Box::pin(async move { - let module = AssertUnwindSafe(module); - - let async_ext = match new_async_externalities(scheduler.clone()) { - Ok(val) => val, - Err(e) => { - tracing::error!( - target: "executor", - error = %e, - "Failed to setup externalities for async context.", - ); - - // This will drop sender and receiver end will panic - return - }, - }; - - let mut async_ext = match async_ext.with_runtime_spawn(Box::new( - RuntimeInstanceSpawn::new(module.clone(), scheduler), - )) { - Ok(val) => val, - Err(e) => { - tracing::error!( - target: "executor", - error = %e, - "Failed to setup runtime extension for async externalities", - ); - - // This will drop sender and receiver end will panic - return - }, - }; - - let result = with_externalities_safe(&mut async_ext, move || { - // FIXME: Should be refactored to shared "instance factory". - // Instantiating wasm here every time is suboptimal at the moment, shared - // pool of instances should be used. - // - // https://github.com/paritytech/substrate/issues/7354 - let mut instance = match module.new_instance() { - Ok(instance) => instance, - Err(error) => { - panic!("failed to create new instance from module: {}", error) - }, - }; - - match instance - .call(InvokeMethod::TableWithWrapper { dispatcher_ref, func }, &data[..]) - { - Ok(result) => result, - Err(error) => panic!("failed to invoke instance: {}", error), - } - }); - - match result { - Ok(output) => { - let _ = sender.send(output); - }, - Err(error) => { - // If execution is panicked, the `join` in the original runtime code will - // panic as well, since the sender is dropped without sending anything. - tracing::error!(error = %error, "Call error in spawned task"); - }, - } - }), - ); - - new_handle - } - - fn join(&self, handle: u64) -> Vec { - let receiver = self.tasks.lock().remove(&handle).expect("No task for the handle"); - receiver.recv().expect("Spawned task panicked for the handle") - } -} - -impl RuntimeInstanceSpawn { - pub fn new( - module: Arc, - scheduler: Box, - ) -> Self { - Self { module, scheduler, counter: 0.into(), tasks: HashMap::new().into() } - } - - fn with_externalities_and_module( - module: Arc, - mut ext: &mut dyn Externalities, - ) -> Option { - ext.extension::() - .map(move |task_ext| Self::new(module, task_ext.clone())) - } -} - -/// Pre-registers the built-in extensions to the currently effective externalities. -/// -/// Meant to be called each time before calling into the runtime. -fn preregister_builtin_ext(module: Arc) { - sp_externalities::with_externalities(move |mut ext| { - if let Some(runtime_spawn) = - RuntimeInstanceSpawn::with_externalities_and_module(module, ext) - { - if let Err(e) = ext.register_extension(RuntimeSpawnExt(Box::new(runtime_spawn))) { - tracing::trace!( - target: "executor", - error = ?e, - "Failed to register `RuntimeSpawnExt` instance on externalities", - ) - } - } - }); -} - impl CodeExecutor for NativeElseWasmExecutor { type Error = Error; @@ -604,7 +458,7 @@ impl CodeExecutor for NativeElseWasmExecut let result = self.wasm.with_instance( runtime_code, ext, - |module, mut instance, onchain_version, mut ext| { + |_, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; @@ -632,10 +486,7 @@ impl CodeExecutor for NativeElseWasmExecut ); } - with_externalities_safe(&mut **ext, move || { - preregister_builtin_ext(module.clone()); - instance.call_export(method, data) - }) + with_externalities_safe(&mut **ext, move || instance.call_export(method, data)) } }, ); diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index e3509351022bc..5bca899648c34 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -395,9 +395,6 @@ fn common_config(semantics: &Semantics) -> std::result::Result"] -edition = "2021" -license = "Unlicense" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "FRAME example pallet using runtime worker threads" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } -frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } -sp-core = { version = "6.0.0", default-features = false, path = "../../../primitives/core" } -sp-io = { version = "6.0.0", default-features = false, path = "../../../primitives/io" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } -sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-tasks/std", -] -try-runtime = ["frame-support/try-runtime"] diff --git a/frame/examples/parallel/README.md b/frame/examples/parallel/README.md deleted file mode 100644 index 44b39a41507db..0000000000000 --- a/frame/examples/parallel/README.md +++ /dev/null @@ -1,7 +0,0 @@ - -# Parallel Tasks Example Pallet - -This example pallet demonstrates parallelizing validation of the enlisted participants (see -`enlist_participants` dispatch). - -**This pallet serves as an example and is not meant to be used in production.** diff --git a/frame/examples/parallel/src/lib.rs b/frame/examples/parallel/src/lib.rs deleted file mode 100644 index 3432a79638664..0000000000000 --- a/frame/examples/parallel/src/lib.rs +++ /dev/null @@ -1,148 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Parallel Tasks Example Pallet -//! -//! This example pallet demonstrates parallelizing validation of the enlisted participants -//! (see `enlist_participants` dispatch). -//! -//! **This pallet serves as an example and is not meant to be used in production.** - -#![cfg_attr(not(feature = "std"), no_std)] - -use sp_runtime::RuntimeDebug; - -use codec::{Decode, Encode}; -use sp_std::vec::Vec; - -#[cfg(test)] -mod tests; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] - pub struct Pallet(_); - - /// A public part of the pallet. - #[pallet::call] - impl Pallet { - /// Get the new event running. - #[pallet::weight(0)] - pub fn run_event(origin: OriginFor, id: Vec) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - >::kill(); - >::mutate(move |event_id| *event_id = id); - Ok(().into()) - } - - /// Submit list of participants to the current event. - /// - /// The example utilizes parallel execution by checking half of the - /// signatures in spawned task. - #[pallet::weight(0)] - pub fn enlist_participants( - origin: OriginFor, - participants: Vec, - ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - - if validate_participants_parallel(&>::get(), &participants[..]) { - for participant in participants { - >::append(participant.account); - } - } - Ok(().into()) - } - } - - /// A vector of current participants - /// - /// To enlist someone to participate, signed payload should be - /// sent to `enlist`. - #[pallet::storage] - #[pallet::getter(fn participants)] - pub(super) type Participants = StorageValue<_, Vec>, ValueQuery>; - - /// Current event id to enlist participants to. - #[pallet::storage] - #[pallet::getter(fn get_current_event_id)] - pub(super) type CurrentEventId = StorageValue<_, Vec, ValueQuery>; -} - -/// Request to enlist participant. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, scale_info::TypeInfo)] -pub struct EnlistedParticipant { - pub account: Vec, - pub signature: Vec, -} - -impl EnlistedParticipant { - fn verify(&self, event_id: &[u8]) -> bool { - use sp_core::ByteArray; - use sp_runtime::traits::Verify; - - match sp_core::sr25519::Signature::try_from(&self.signature[..]) { - Ok(signature) => match sp_core::sr25519::Public::from_slice(self.account.as_ref()) { - Err(()) => false, - Ok(signer) => signature.verify(event_id, &signer), - }, - _ => false, - } - } -} - -fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParticipant]) -> bool { - fn spawn_verify(data: Vec) -> Vec { - let stream = &mut &data[..]; - let event_id = Vec::::decode(stream).expect("Failed to decode"); - let participants = Vec::::decode(stream).expect("Failed to decode"); - - for participant in participants { - if !participant.verify(&event_id) { - return false.encode() - } - } - true.encode() - } - - let mut async_payload = Vec::new(); - event_id.encode_to(&mut async_payload); - participants[..participants.len() / 2].encode_to(&mut async_payload); - - let handle = sp_tasks::spawn(spawn_verify, async_payload); - let mut result = true; - - for participant in &participants[participants.len() / 2 + 1..] { - if !participant.verify(event_id) { - result = false; - break - } - } - - bool::decode(&mut &handle.join()[..]).expect("Failed to decode result") && result -} diff --git a/frame/examples/parallel/src/tests.rs b/frame/examples/parallel/src/tests.rs deleted file mode 100644 index fdef24a39ae36..0000000000000 --- a/frame/examples/parallel/src/tests.rs +++ /dev/null @@ -1,146 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{self as pallet_example_parallel, *}; - -use frame_support::parameter_types; -use sp_core::H256; -use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Example: pallet_example_parallel::{Pallet, Call, Storage}, - } -); - -parameter_types! { - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for Test { - type BaseCallFilter = frame_support::traits::Everything; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - type PalletInfo = PalletInfo; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = sp_core::sr25519::Public; - type Lookup = IdentityLookup; - type Header = Header; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type Version = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl Config for Test {} - -fn test_pub(n: u8) -> sp_core::sr25519::Public { - sp_core::sr25519::Public::from_raw([n; 32]) -} - -fn test_origin(n: u8) -> RuntimeOrigin { - RuntimeOrigin::signed(test_pub(n)) -} - -#[test] -fn it_can_enlist() { - use sp_core::Pair; - - sp_io::TestExternalities::default().execute_with(|| { - let (pair1, _) = sp_core::sr25519::Pair::generate(); - let (pair2, _) = sp_core::sr25519::Pair::generate(); - - let event_name = b"test"; - - Example::run_event(test_origin(1), event_name.to_vec()).expect("Failed to enlist"); - - let participants = vec![ - EnlistedParticipant { - account: pair1.public().to_vec(), - signature: AsRef::<[u8]>::as_ref(&pair1.sign(event_name)).to_vec(), - }, - EnlistedParticipant { - account: pair2.public().to_vec(), - signature: AsRef::<[u8]>::as_ref(&pair2.sign(event_name)).to_vec(), - }, - ]; - - Example::enlist_participants(RuntimeOrigin::signed(test_pub(1)), participants) - .expect("Failed to enlist"); - - assert_eq!(Example::participants().len(), 2); - }); -} - -#[test] -fn one_wrong_will_not_enlist_anyone() { - use sp_core::Pair; - - sp_io::TestExternalities::default().execute_with(|| { - let (pair1, _) = sp_core::sr25519::Pair::generate(); - let (pair2, _) = sp_core::sr25519::Pair::generate(); - let (pair3, _) = sp_core::sr25519::Pair::generate(); - - let event_name = b"test"; - - Example::run_event(test_origin(1), event_name.to_vec()).expect("Failed to enlist"); - - let participants = vec![ - EnlistedParticipant { - account: pair1.public().to_vec(), - signature: AsRef::<[u8]>::as_ref(&pair1.sign(event_name)).to_vec(), - }, - EnlistedParticipant { - account: pair2.public().to_vec(), - signature: AsRef::<[u8]>::as_ref(&pair2.sign(event_name)).to_vec(), - }, - // signing wrong event - EnlistedParticipant { - account: pair3.public().to_vec(), - signature: AsRef::<[u8]>::as_ref(&pair3.sign(&[])).to_vec(), - }, - ]; - - Example::enlist_participants(test_origin(1), participants).expect("Failed to enlist"); - - assert_eq!(Example::participants().len(), 0); - }); -} diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index c5149cc48c074..c4b7f20f7e9a0 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -179,12 +179,6 @@ pub trait RuntimeSpawn: Send { fn join(&self, handle: u64) -> Vec; } -#[cfg(feature = "std")] -sp_externalities::decl_extension! { - /// Extension that supports spawning extra runtime instances in externalities. - pub struct RuntimeSpawnExt(Box); -} - /// Something that can spawn tasks (blocking and non-blocking) with an assigned name /// and optional group. #[dyn_clonable::clonable] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 7942bafcc2a1b..33516bb0397f3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -40,7 +40,7 @@ use sp_core::{ hexdisplay::HexDisplay, offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, storage::ChildInfo, - traits::{RuntimeSpawnExt, TaskExecutorExt}, + traits::TaskExecutorExt, }; #[cfg(feature = "std")] use sp_keystore::{KeystoreExt, SyncCryptoStore}; @@ -1657,38 +1657,6 @@ pub trait Sandbox { } } -/// Wasm host functions for managing tasks. -/// -/// This should not be used directly. Use `sp_tasks` for running parallel tasks instead. -#[runtime_interface(wasm_only)] -pub trait RuntimeTasks { - /// Wasm host function for spawning task. - /// - /// This should not be used directly. Use `sp_tasks::spawn` instead. - fn spawn(dispatcher_ref: u32, entry: u32, payload: Vec) -> u64 { - sp_externalities::with_externalities(|mut ext| { - let runtime_spawn = ext - .extension::() - .expect("Cannot spawn without dynamic runtime dispatcher (RuntimeSpawnExt)"); - runtime_spawn.spawn_call(dispatcher_ref, entry, payload) - }) - .expect("`RuntimeTasks::spawn`: called outside of externalities context") - } - - /// Wasm host function for joining a task. - /// - /// This should not be used directly. Use `join` of `sp_tasks::spawn` result instead. - fn join(handle: u64) -> Vec { - sp_externalities::with_externalities(|mut ext| { - let runtime_spawn = ext - .extension::() - .expect("Cannot join without dynamic runtime dispatcher (RuntimeSpawnExt)"); - runtime_spawn.join(handle) - }) - .expect("`RuntimeTasks::join`: called outside of externalities context") - } -} - /// Allocator used by Substrate when executing the Wasm runtime. #[cfg(all(target_arch = "wasm32", not(feature = "std")))] struct WasmAllocator; @@ -1767,7 +1735,6 @@ pub type SubstrateHostFunctions = ( sandbox::HostFunctions, crate::trie::HostFunctions, offchain_index::HostFunctions, - runtime_tasks::HostFunctions, transaction_index::HostFunctions, ); diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml deleted file mode 100644 index c37a8a66f94df..0000000000000 --- a/primitives/tasks/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "sp-tasks" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Runtime asynchronous, pure computational tasks" -documentation = "https://docs.rs/sp-tasks" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -log = { version = "0.4.17", optional = true } -sp-core = { version = "6.0.0", default-features = false, path = "../core" } -sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } -sp-io = { version = "6.0.0", default-features = false, path = "../io" } -sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" } -sp-std = { version = "4.0.0", default-features = false, path = "../std" } - -[dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } - -[features] -default = ["std"] -std = [ - "log", - "sp-core/std", - "sp-externalities", - "sp-io/std", - "sp-runtime-interface/std", - "sp-std/std", -] diff --git a/primitives/tasks/README.md b/primitives/tasks/README.md deleted file mode 100644 index 1235e1bd933d4..0000000000000 --- a/primitives/tasks/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Runtime asynchronous, pure computational tasks. - -License: Apache-2.0 \ No newline at end of file diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs deleted file mode 100644 index 008955a714b21..0000000000000 --- a/primitives/tasks/src/async_externalities.rs +++ /dev/null @@ -1,212 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Async externalities. - -use sp_core::{ - storage::{ChildInfo, StateVersion, TrackedStorageKey}, - traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, -}; -use sp_externalities::{Extensions, ExternalitiesExt as _, MultiRemovalResults}; -use std::any::{Any, TypeId}; - -/// Simple state-less externalities for use in async context. -/// -/// Will panic if anything is accessing the storage. -#[derive(Debug)] -pub struct AsyncExternalities { - extensions: Extensions, -} - -/// New Async externalities. -pub fn new_async_externalities( - scheduler: Box, -) -> Result { - let mut res = AsyncExternalities { extensions: Default::default() }; - let mut ext = &mut res as &mut dyn Externalities; - ext.register_extension::(TaskExecutorExt(scheduler.clone())) - .map_err(|_| "Failed to register task executor extension.")?; - - Ok(res) -} - -impl AsyncExternalities { - /// Extend async externalities with the ability to spawn wasm instances. - pub fn with_runtime_spawn( - mut self, - runtime_ext: Box, - ) -> Result { - let mut ext = &mut self as &mut dyn Externalities; - ext.register_extension::(RuntimeSpawnExt(runtime_ext)) - .map_err(|_| "Failed to register task executor extension.")?; - - Ok(self) - } -} - -type StorageKey = Vec; - -type StorageValue = Vec; - -impl Externalities for AsyncExternalities { - fn set_offchain_storage(&mut self, _key: &[u8], _value: Option<&[u8]>) { - panic!("`set_offchain_storage`: should not be used in async externalities!") - } - - fn storage(&self, _key: &[u8]) -> Option { - panic!("`storage`: should not be used in async externalities!") - } - - fn storage_hash(&self, _key: &[u8]) -> Option> { - panic!("`storage_hash`: should not be used in async externalities!") - } - - fn child_storage(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { - panic!("`child_storage`: should not be used in async externalities!") - } - - fn child_storage_hash(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option> { - panic!("`child_storage_hash`: should not be used in async externalities!") - } - - fn next_storage_key(&self, _key: &[u8]) -> Option { - panic!("`next_storage_key`: should not be used in async externalities!") - } - - fn next_child_storage_key(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { - panic!("`next_child_storage_key`: should not be used in async externalities!") - } - - fn place_storage(&mut self, _key: StorageKey, _maybe_value: Option) { - panic!("`place_storage`: should not be used in async externalities!") - } - - fn place_child_storage( - &mut self, - _child_info: &ChildInfo, - _key: StorageKey, - _value: Option, - ) { - panic!("`place_child_storage`: should not be used in async externalities!") - } - - fn kill_child_storage( - &mut self, - _child_info: &ChildInfo, - _maybe_limit: Option, - _maybe_cursor: Option<&[u8]>, - ) -> MultiRemovalResults { - panic!("`kill_child_storage`: should not be used in async externalities!") - } - - fn clear_prefix( - &mut self, - _prefix: &[u8], - _maybe_limit: Option, - _maybe_cursor: Option<&[u8]>, - ) -> MultiRemovalResults { - panic!("`clear_prefix`: should not be used in async externalities!") - } - - fn clear_child_prefix( - &mut self, - _child_info: &ChildInfo, - _prefix: &[u8], - _maybe_limit: Option, - _maybe_cursor: Option<&[u8]>, - ) -> MultiRemovalResults { - panic!("`clear_child_prefix`: should not be used in async externalities!") - } - - fn storage_append(&mut self, _key: Vec, _value: Vec) { - panic!("`storage_append`: should not be used in async externalities!") - } - - fn storage_root(&mut self, _state_version: StateVersion) -> Vec { - panic!("`storage_root`: should not be used in async externalities!") - } - - fn child_storage_root( - &mut self, - _child_info: &ChildInfo, - _state_version: StateVersion, - ) -> Vec { - panic!("`child_storage_root`: should not be used in async externalities!") - } - - fn storage_start_transaction(&mut self) { - unimplemented!("Transactions are not supported by AsyncExternalities"); - } - - fn storage_rollback_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by AsyncExternalities"); - } - - fn storage_commit_transaction(&mut self) -> Result<(), ()> { - unimplemented!("Transactions are not supported by AsyncExternalities"); - } - - fn wipe(&mut self) {} - - fn commit(&mut self) {} - - fn read_write_count(&self) -> (u32, u32, u32, u32) { - unimplemented!("read_write_count is not supported in AsyncExternalities") - } - - fn reset_read_write_count(&mut self) { - unimplemented!("reset_read_write_count is not supported in AsyncExternalities") - } - - fn get_whitelist(&self) -> Vec { - unimplemented!("get_whitelist is not supported in AsyncExternalities") - } - - fn set_whitelist(&mut self, _: Vec) { - unimplemented!("set_whitelist is not supported in AsyncExternalities") - } - - fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { - unimplemented!("get_read_and_written_keys is not supported in AsyncExternalities") - } -} - -impl sp_externalities::ExtensionStore for AsyncExternalities { - fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(type_id) - } - - fn register_extension_with_type_id( - &mut self, - type_id: TypeId, - extension: Box, - ) -> Result<(), sp_externalities::Error> { - self.extensions.register_with_type_id(type_id, extension) - } - - fn deregister_extension_by_type_id( - &mut self, - type_id: TypeId, - ) -> Result<(), sp_externalities::Error> { - if self.extensions.deregister(type_id) { - Ok(()) - } else { - Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) - } - } -} diff --git a/primitives/tasks/src/lib.rs b/primitives/tasks/src/lib.rs deleted file mode 100644 index 3711fa71a2fab..0000000000000 --- a/primitives/tasks/src/lib.rs +++ /dev/null @@ -1,257 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Runtime tasks. -//! -//! Contains runtime-usable functions for spawning parallel purely computational tasks. -//! -//! NOTE: This is experimental API. -//! NOTE: When using in actual runtime, make sure you don't produce unbounded parallelism. -//! So this is bad example to use it: -//! ```rust -//! fn my_parallel_computator(data: Vec) -> Vec { -//! unimplemented!() -//! } -//! fn test(dynamic_variable: i32) { -//! for _ in 0..dynamic_variable { sp_tasks::spawn(my_parallel_computator, vec![]); } -//! } -//! ``` -//! -//! While this is a good example: -//! ```rust -//! use codec::Encode; -//! static STATIC_VARIABLE: i32 = 4; -//! -//! fn my_parallel_computator(data: Vec) -> Vec { -//! unimplemented!() -//! } -//! -//! fn test(computation_payload: Vec) { -//! let parallel_tasks = (0..STATIC_VARIABLE).map(|idx| -//! sp_tasks::spawn(my_parallel_computator, computation_payload.chunks(10).nth(idx as _).encode()) -//! ); -//! } -//! ``` -//! -//! When allowing unbounded parallelism, malicious transactions can exploit it and partition -//! network consensus based on how much resources nodes have. - -#![cfg_attr(not(feature = "std"), no_std)] - -#[cfg(feature = "std")] -mod async_externalities; - -#[cfg(feature = "std")] -pub use async_externalities::{new_async_externalities, AsyncExternalities}; - -#[cfg(feature = "std")] -mod inner { - use sp_core::traits::TaskExecutorExt; - use sp_externalities::ExternalitiesExt as _; - use std::{panic::AssertUnwindSafe, sync::mpsc}; - - /// Task handle (wasm). - /// - /// This can be `join`-ed to get (blocking) the result of - /// the spawned task execution. - #[must_use] - pub struct DataJoinHandle { - receiver: mpsc::Receiver>, - } - - impl DataJoinHandle { - /// Join handle returned by `spawn` function - pub fn join(self) -> Vec { - self.receiver - .recv() - .expect("Spawned runtime task terminated before sending result.") - } - } - - /// Spawn new runtime task (native). - pub fn spawn(entry_point: fn(Vec) -> Vec, data: Vec) -> DataJoinHandle { - let scheduler = sp_externalities::with_externalities(|mut ext| { - ext.extension::() - .expect("No task executor associated with the current context!") - .clone() - }) - .expect("Spawn called outside of externalities context!"); - - let (sender, receiver) = mpsc::channel(); - let extra_scheduler = scheduler.clone(); - scheduler.spawn( - "parallel-runtime-spawn", - Some("substrate-runtime"), - Box::pin(async move { - let result = match crate::new_async_externalities(extra_scheduler) { - Ok(mut ext) => { - let mut ext = AssertUnwindSafe(&mut ext); - match std::panic::catch_unwind(move || { - sp_externalities::set_and_run_with_externalities( - &mut **ext, - move || entry_point(data), - ) - }) { - Ok(result) => result, - Err(panic) => { - log::error!( - target: "runtime", - "Spawned task panicked: {:?}", - panic, - ); - - // This will drop sender without sending anything. - return - }, - } - }, - Err(e) => { - log::error!( - target: "runtime", - "Unable to run async task: {}", - e, - ); - - return - }, - }; - - let _ = sender.send(result); - }), - ); - - DataJoinHandle { receiver } - } -} - -#[cfg(not(feature = "std"))] -mod inner { - use core::mem; - use sp_std::prelude::*; - - /// Dispatch wrapper for wasm blob. - /// - /// Serves as trampoline to call any rust function with (Vec) -> Vec compiled - /// into the runtime. - /// - /// Function item should be provided with `func_ref`. Argument for the call - /// will be generated from bytes at `payload_ptr` with `payload_len`. - /// - /// NOTE: Since this dynamic dispatch function and the invoked function are compiled with - /// the same compiler, there should be no problem with ABI incompatibility. - extern "C" fn dispatch_wrapper( - func_ref: *const u8, - payload_ptr: *mut u8, - payload_len: u32, - ) -> u64 { - let payload_len = payload_len as usize; - let output = unsafe { - let payload = Vec::from_raw_parts(payload_ptr, payload_len, payload_len); - let ptr: fn(Vec) -> Vec = mem::transmute(func_ref); - (ptr)(payload) - }; - sp_runtime_interface::pack_ptr_and_len(output.as_ptr() as usize as _, output.len() as _) - } - - /// Spawn new runtime task (wasm). - pub fn spawn(entry_point: fn(Vec) -> Vec, payload: Vec) -> DataJoinHandle { - let func_ptr: usize = unsafe { mem::transmute(entry_point) }; - - let handle = - sp_io::runtime_tasks::spawn(dispatch_wrapper as usize as _, func_ptr as u32, payload); - DataJoinHandle { handle } - } - - /// Task handle (wasm). - /// - /// This can be `join`-ed to get (blocking) the result of - /// the spawned task execution. - #[must_use] - pub struct DataJoinHandle { - handle: u64, - } - - impl DataJoinHandle { - /// Join handle returned by `spawn` function - pub fn join(self) -> Vec { - sp_io::runtime_tasks::join(self.handle) - } - } -} - -pub use inner::{spawn, DataJoinHandle}; - -#[cfg(test)] -mod tests { - - use super::*; - - fn async_runner(mut data: Vec) -> Vec { - data.sort(); - data - } - - fn async_panicker(_data: Vec) -> Vec { - panic!("panic in async panicker!") - } - - #[test] - fn basic() { - sp_io::TestExternalities::default().execute_with(|| { - let a1 = spawn(async_runner, vec![5, 2, 1]).join(); - assert_eq!(a1, vec![1, 2, 5]); - }) - } - - #[test] - fn panicking() { - let res = sp_io::TestExternalities::default().execute_with_safe(|| { - spawn(async_panicker, vec![5, 2, 1]).join(); - }); - - assert!(res.unwrap_err().contains("Closure panicked")); - } - - #[test] - fn many_joins() { - sp_io::TestExternalities::default() - .execute_with_safe(|| { - // converges to 1 only after 1000+ steps - let mut running_val = 9780657630u64; - let mut data = vec![]; - let handles = (0..1024) - .map(|_| { - running_val = if running_val % 2 == 0 { - running_val / 2 - } else { - 3 * running_val + 1 - }; - data.push(running_val as u8); - (spawn(async_runner, data.clone()), data.clone()) - }) - .collect::>(); - - for (handle, mut data) in handles { - let result = handle.join(); - data.sort(); - - assert_eq!(result, data); - } - }) - .expect("Failed to run with externalities"); - } -} From e7cb51b325c19012cc5c269105c43717093f45b0 Mon Sep 17 00:00:00 2001 From: Jun Jiang Date: Fri, 11 Nov 2022 03:31:32 +0800 Subject: [PATCH 342/348] Contracts pallet: Bump Runtime API (#12677) --- frame/contracts/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index b2eb0439d6ced..52fb0190ba3a9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -1199,6 +1199,7 @@ where sp_api::decl_runtime_apis! { /// The API used to dry-run contract interactions. + #[api_version(2)] pub trait ContractsApi where AccountId: Codec, Balance: Codec, From 112468e1faeed85c04c41cda42861b23698e1f2b Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 10 Nov 2022 22:14:15 +0100 Subject: [PATCH 343/348] Fix typo (#12680) --- primitives/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 96706dd919650..3752e31cbeeb0 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -234,7 +234,7 @@ impl BuildStorage for () { /// Consensus engine unique ID. pub type ConsensusEngineId = [u8; 4]; -/// Signature verify that can work with any known signature types.. +/// Signature verify that can work with any known signature types. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Eq, PartialEq, Clone, Encode, Decode, MaxEncodedLen, RuntimeDebug, TypeInfo)] pub enum MultiSignature { From 7763a32b4dc7ce95e2f883b97a808d03d126ae7a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 11 Nov 2022 12:26:47 +0100 Subject: [PATCH 344/348] Move `WeightCounter` to `sp-weights` (#12603) * Move WeightCounter to sp_weights Signed-off-by: Oliver Tale-Yazdi * Rename to WeightMeter and test Signed-off-by: Oliver Tale-Yazdi * Fix pallet-scheduler for new usage Signed-off-by: Oliver Tale-Yazdi * Update primitives/weights/src/weight_meter.rs Co-authored-by: David * More tests for can_accrue Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Remove defensive_accrue and fixup consumed_ratio I dont think there is a good use-case for defensive_accrue without saturation. Only in tests maybe, will remove for now until we have a use-case. Signed-off-by: Oliver Tale-Yazdi * Test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi Co-authored-by: David Co-authored-by: Gavin Wood --- frame/scheduler/src/benchmarking.rs | 20 ++- frame/scheduler/src/lib.rs | 34 +---- primitives/weights/src/lib.rs | 4 + primitives/weights/src/weight_meter.rs | 176 +++++++++++++++++++++++++ 4 files changed, 195 insertions(+), 39 deletions(-) create mode 100644 primitives/weights/src/weight_meter.rs diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index aaa30fd88ffda..e621c913b2386 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -122,17 +122,13 @@ fn make_origin(signed: bool) -> ::PalletsOrigin { } } -fn dummy_counter() -> WeightCounter { - WeightCounter { used: Weight::zero(), limit: Weight::MAX } -} - benchmarks! { // `service_agendas` when no work is done. service_agendas_base { let now = T::BlockNumber::from(BLOCK_NUMBER); IncompleteSince::::put(now - One::one()); }: { - Scheduler::::service_agendas(&mut dummy_counter(), now, 0); + Scheduler::::service_agendas(&mut WeightMeter::max_limit(), now, 0); } verify { assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } @@ -144,7 +140,7 @@ benchmarks! { fill_schedule::(now, s)?; let mut executed = 0; }: { - Scheduler::::service_agenda(&mut dummy_counter(), &mut executed, now, now, 0); + Scheduler::::service_agenda(&mut WeightMeter::max_limit(), &mut executed, now, now, 0); } verify { assert_eq!(executed, 0); } @@ -155,7 +151,7 @@ benchmarks! { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + let mut counter = WeightMeter::from_limit(Weight::zero()); }: { let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); } verify { @@ -169,7 +165,7 @@ benchmarks! { let now = BLOCK_NUMBER.into(); let task = make_task::(false, false, false, Some(s), 0); // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + let mut counter = WeightMeter::from_limit(Weight::zero()); }: { let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); } verify { @@ -181,7 +177,7 @@ benchmarks! { let now = BLOCK_NUMBER.into(); let task = make_task::(false, true, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + let mut counter = WeightMeter::from_limit(Weight::zero()); }: { let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); } verify { @@ -193,7 +189,7 @@ benchmarks! { let now = BLOCK_NUMBER.into(); let task = make_task::(true, false, false, None, 0); // prevent any tasks from actually being executed as we only want the surrounding weight. - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + let mut counter = WeightMeter::from_limit(Weight::zero()); }: { let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); } verify { @@ -201,7 +197,7 @@ benchmarks! { // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. execute_dispatch_signed { - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let mut counter = WeightMeter::max_limit(); let origin = make_origin::(true); let call = T::Preimages::realize(&make_call::(None)).unwrap().0; }: { @@ -212,7 +208,7 @@ benchmarks! { // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. execute_dispatch_unsigned { - let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let mut counter = WeightMeter::max_limit(); let origin = make_origin::(false); let call = T::Preimages::realize(&make_call::(None)).unwrap().0; }: { diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 3a463b5808cd1..78533540be98f 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -70,7 +70,7 @@ use frame_support::{ Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, }, - weights::Weight, + weights::{Weight, WeightMeter}, }; use frame_system::{self as system}; pub use pallet::*; @@ -143,25 +143,6 @@ pub type ScheduledOf = Scheduled< ::AccountId, >; -struct WeightCounter { - used: Weight, - limit: Weight, -} -impl WeightCounter { - fn check_accrue(&mut self, w: Weight) -> bool { - let test = self.used.saturating_add(w); - if test.any_gt(self.limit) { - false - } else { - self.used = test; - true - } - } - fn can_accrue(&mut self, w: Weight) -> bool { - self.used.saturating_add(w).all_lte(self.limit) - } -} - pub(crate) trait MarginalWeightInfo: WeightInfo { fn service_task(maybe_lookup_len: Option, named: bool, periodic: bool) -> Weight { let base = Self::service_task_base(); @@ -306,10 +287,9 @@ pub mod pallet { impl Hooks> for Pallet { /// Execute the scheduled calls fn on_initialize(now: T::BlockNumber) -> Weight { - let mut weight_counter = - WeightCounter { used: Weight::zero(), limit: T::MaximumWeight::get() }; + let mut weight_counter = WeightMeter::from_limit(T::MaximumWeight::get()); Self::service_agendas(&mut weight_counter, now, u32::max_value()); - weight_counter.used + weight_counter.consumed } } @@ -933,7 +913,7 @@ use ServiceTaskError::*; impl Pallet { /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. - fn service_agendas(weight: &mut WeightCounter, now: T::BlockNumber, max: u32) { + fn service_agendas(weight: &mut WeightMeter, now: T::BlockNumber, max: u32) { if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { return } @@ -961,7 +941,7 @@ impl Pallet { /// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a /// later block. fn service_agenda( - weight: &mut WeightCounter, + weight: &mut WeightMeter, executed: &mut u32, now: T::BlockNumber, when: T::BlockNumber, @@ -1030,7 +1010,7 @@ impl Pallet { /// - realizing the task's call which can include a preimage lookup. /// - Rescheduling the task for execution in a later agenda if periodic. fn service_task( - weight: &mut WeightCounter, + weight: &mut WeightMeter, now: T::BlockNumber, when: T::BlockNumber, agenda_index: u32, @@ -1110,7 +1090,7 @@ impl Pallet { /// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the /// call itself). fn execute_dispatch( - weight: &mut WeightCounter, + weight: &mut WeightMeter, origin: T::PalletsOrigin, call: ::RuntimeCall, ) -> Result { diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index 954fea91e28dc..af9e730fbfefd 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -26,6 +26,9 @@ #![cfg_attr(not(feature = "std"), no_std)] +extern crate self as sp_weights; + +mod weight_meter; mod weight_v2; use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; @@ -40,6 +43,7 @@ use sp_arithmetic::{ use sp_core::Get; use sp_debug_derive::RuntimeDebug; +pub use weight_meter::*; pub use weight_v2::*; pub mod constants { diff --git a/primitives/weights/src/weight_meter.rs b/primitives/weights/src/weight_meter.rs new file mode 100644 index 0000000000000..d03e72968bb09 --- /dev/null +++ b/primitives/weights/src/weight_meter.rs @@ -0,0 +1,176 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the `WeightMeter` primitive to meter weight usage. + +use super::Weight; + +use sp_arithmetic::Perbill; + +/// Meters consumed weight and a hard limit for the maximal consumable weight. +/// +/// Can be used to check if enough weight for an operation is available before committing to it. +/// +/// # Example +/// +/// ```rust +/// use sp_weights::{Weight, WeightMeter}; +/// +/// // The weight is limited to (10, 0). +/// let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 0)); +/// // There is enough weight remaining for an operation with (5, 0) weight. +/// assert!(meter.check_accrue(Weight::from_parts(5, 0))); +/// // There is not enough weight remaining for an operation with (6, 0) weight. +/// assert!(!meter.check_accrue(Weight::from_parts(6, 0))); +/// ``` +#[derive(Debug, Clone)] +pub struct WeightMeter { + /// The already consumed weight. + pub consumed: Weight, + + /// The maximal consumable weight. + pub limit: Weight, +} + +impl WeightMeter { + /// Creates [`Self`] from a limit for the maximal consumable weight. + pub fn from_limit(limit: Weight) -> Self { + Self { consumed: Weight::zero(), limit } + } + + /// Creates [`Self`] with the maximal possible limit for the consumable weight. + pub fn max_limit() -> Self { + Self::from_limit(Weight::MAX) + } + + /// The remaining weight that can still be consumed. + pub fn remaining(&self) -> Weight { + self.limit.saturating_sub(self.consumed) + } + + /// The ratio of consumed weight to the limit. + /// + /// Calculates one ratio per component and returns the largest. + pub fn consumed_ratio(&self) -> Perbill { + let time = Perbill::from_rational(self.consumed.ref_time(), self.limit.ref_time()); + let pov = Perbill::from_rational(self.consumed.proof_size(), self.limit.proof_size()); + time.max(pov) + } + + /// Consume the given weight after checking that it can be consumed. Otherwise do nothing. + pub fn check_accrue(&mut self, w: Weight) -> bool { + self.consumed.checked_add(&w).map_or(false, |test| { + if test.any_gt(self.limit) { + false + } else { + self.consumed = test; + true + } + }) + } + + /// Check if the given weight can be consumed. + pub fn can_accrue(&self, w: Weight) -> bool { + self.consumed.checked_add(&w).map_or(false, |t| t.all_lte(self.limit)) + } +} + +#[cfg(test)] +mod tests { + use crate::*; + + #[test] + fn weight_meter_remaining_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 20)); + + assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.consumed, Weight::from_parts(5, 0)); + assert_eq!(meter.remaining(), Weight::from_parts(5, 20)); + + assert!(meter.check_accrue(Weight::from_parts(2, 10))); + assert_eq!(meter.consumed, Weight::from_parts(7, 10)); + assert_eq!(meter.remaining(), Weight::from_parts(3, 10)); + + assert!(meter.check_accrue(Weight::from_parts(3, 10))); + assert_eq!(meter.consumed, Weight::from_parts(10, 20)); + assert_eq!(meter.remaining(), Weight::from_parts(0, 0)); + } + + #[test] + fn weight_meter_can_accrue_works() { + let meter = WeightMeter::from_limit(Weight::from_parts(1, 1)); + + assert!(meter.can_accrue(Weight::from_parts(0, 0))); + assert!(meter.can_accrue(Weight::from_parts(1, 1))); + assert!(!meter.can_accrue(Weight::from_parts(0, 2))); + assert!(!meter.can_accrue(Weight::from_parts(2, 0))); + assert!(!meter.can_accrue(Weight::from_parts(2, 2))); + } + + #[test] + fn weight_meter_check_accrue_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(2, 2)); + + assert!(meter.check_accrue(Weight::from_parts(0, 0))); + assert!(meter.check_accrue(Weight::from_parts(1, 1))); + assert!(!meter.check_accrue(Weight::from_parts(0, 2))); + assert!(!meter.check_accrue(Weight::from_parts(2, 0))); + assert!(!meter.check_accrue(Weight::from_parts(2, 2))); + assert!(meter.check_accrue(Weight::from_parts(0, 1))); + assert!(meter.check_accrue(Weight::from_parts(1, 0))); + } + + #[test] + fn weight_meter_check_and_can_accrue_works() { + let mut meter = WeightMeter::max_limit(); + + assert!(meter.can_accrue(Weight::from_parts(u64::MAX, 0))); + assert!(meter.check_accrue(Weight::from_parts(u64::MAX, 0))); + + assert!(meter.can_accrue(Weight::from_parts(0, u64::MAX))); + assert!(meter.check_accrue(Weight::from_parts(0, u64::MAX))); + + assert!(!meter.can_accrue(Weight::from_parts(0, 1))); + assert!(!meter.check_accrue(Weight::from_parts(0, 1))); + + assert!(!meter.can_accrue(Weight::from_parts(1, 0))); + assert!(!meter.check_accrue(Weight::from_parts(1, 0))); + + assert!(meter.can_accrue(Weight::zero())); + assert!(meter.check_accrue(Weight::zero())); + } + + #[test] + fn consumed_ratio_works() { + let mut meter = WeightMeter::from_limit(Weight::from_parts(10, 20)); + + assert!(meter.check_accrue(Weight::from_parts(5, 0))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(50)); + assert!(meter.check_accrue(Weight::from_parts(0, 12))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(60)); + + assert!(meter.check_accrue(Weight::from_parts(2, 0))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(70)); + assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(80)); + + assert!(meter.check_accrue(Weight::from_parts(3, 0))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); + assert!(meter.check_accrue(Weight::from_parts(0, 4))); + assert_eq!(meter.consumed_ratio(), Perbill::from_percent(100)); + } +} From b042ebdd1fa37010a24668a8342dad0a66b4fdf0 Mon Sep 17 00:00:00 2001 From: Squirrel Date: Fri, 11 Nov 2022 14:22:17 +0000 Subject: [PATCH 345/348] Allow other pallets to check asset ids. (#12666) * Make it easier for other pallets to check asset ids. * Avoid boxing * cargo fmt --- frame/assets/src/impl_fungibles.rs | 11 ++++++++ frame/assets/src/lib.rs | 1 + frame/assets/src/tests.rs | 26 ++++++++++++++++--- frame/support/src/traits/tokens/fungibles.rs | 2 ++ .../src/traits/tokens/fungibles/enumerable.rs | 26 +++++++++++++++++++ 5 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 frame/support/src/traits/tokens/fungibles/enumerable.rs diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 842ee5c102c1d..5cddf23680ac2 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -283,3 +283,14 @@ impl, I: 'static> fungibles::roles::Inspect<::Ac Asset::::get(asset).map(|x| x.freezer) } } + +impl, I: 'static> fungibles::InspectEnumerable for Pallet { + type AssetsIterator = KeyPrefixIterator<>::AssetId>; + + /// Returns an iterator of the assets in existence. + /// + /// NOTE: iterating this list invokes a storage read per item. + fn asset_ids() -> Self::AssetsIterator { + Asset::::iter_keys() + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 7e7d68fa6c7dd..017db07194d09 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -154,6 +154,7 @@ use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, pallet_prelude::DispatchResultWithPostInfo, + storage::KeyPrefixIterator, traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 48cfad45a49fc..65e8b66705290 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -19,10 +19,19 @@ use super::*; use crate::{mock::*, Error}; -use frame_support::{assert_noop, assert_ok, traits::Currency}; +use frame_support::{ + assert_noop, assert_ok, + traits::{fungibles::InspectEnumerable, Currency}, +}; use pallet_balances::Error as BalancesError; use sp_runtime::{traits::ConvertInto, TokenError}; +fn asset_ids() -> Vec { + let mut s: Vec<_> = Assets::asset_ids().collect(); + s.sort(); + s +} + #[test] fn basic_minting_should_work() { new_test_ext().execute_with(|| { @@ -31,6 +40,7 @@ fn basic_minting_should_work() { assert_eq!(Assets::balance(0, 1), 100); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 2, 100)); assert_eq!(Assets::balance(0, 2), 100); + assert_eq!(asset_ids(), vec![0, 999]); }); } @@ -48,6 +58,7 @@ fn minting_too_many_insufficient_assets_fails() { Balances::make_free_balance_be(&2, 1); assert_ok!(Assets::transfer(RuntimeOrigin::signed(1), 0, 2, 100)); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 2, 1, 100)); + assert_eq!(asset_ids(), vec![0, 1, 2, 999]); }); } @@ -137,6 +148,7 @@ fn refunding_calls_died_hook() { assert_eq!(Asset::::get(0).unwrap().accounts, 0); assert_eq!(hooks(), vec![Hook::Died(0, 1)]); + assert_eq!(asset_ids(), vec![0, 999]); }); } @@ -162,6 +174,7 @@ fn approval_lifecycle_works() { assert_eq!(Assets::balance(0, 1), 60); assert_eq!(Assets::balance(0, 3), 40); assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(asset_ids(), vec![0, 999]); }); } @@ -352,12 +365,14 @@ fn destroy_with_bad_witness_should_not_work() { assert_ok!(Assets::force_create(RuntimeOrigin::root(), 0, 1, true, 1)); let mut w = Asset::::get(0).unwrap().destroy_witness(); assert_ok!(Assets::mint(RuntimeOrigin::signed(1), 0, 10, 100)); + assert_eq!(asset_ids(), vec![0, 999]); // witness too low assert_noop!(Assets::destroy(RuntimeOrigin::signed(1), 0, w), Error::::BadWitness); // witness too high is okay though w.accounts += 2; w.sufficients += 2; assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); + assert_eq!(asset_ids(), vec![999]); }); } @@ -371,10 +386,12 @@ fn destroy_should_refund_approvals() { assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 3, 50)); assert_ok!(Assets::approve_transfer(RuntimeOrigin::signed(1), 0, 4, 50)); assert_eq!(Balances::reserved_balance(&1), 3); + assert_eq!(asset_ids(), vec![0, 999]); let w = Asset::::get(0).unwrap().destroy_witness(); assert_ok!(Assets::destroy(RuntimeOrigin::signed(1), 0, w)); assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(asset_ids(), vec![999]); // all approvals are removed assert!(Approvals::::iter().count().is_zero()) @@ -406,6 +423,7 @@ fn non_providing_should_work() { Balances::make_free_balance_be(&2, 100); assert_ok!(Assets::transfer(RuntimeOrigin::signed(0), 0, 1, 25)); assert_ok!(Assets::force_transfer(RuntimeOrigin::signed(1), 0, 0, 2, 25)); + assert_eq!(asset_ids(), vec![0, 999]); }); } @@ -498,6 +516,7 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); assert!(hooks().is_empty()); + assert_eq!(asset_ids(), vec![0, 999]); }); } @@ -582,6 +601,7 @@ fn transfer_owner_should_work() { Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); assert_ok!(Assets::create(RuntimeOrigin::signed(1), 0, 1, 1)); + assert_eq!(asset_ids(), vec![0, 999]); assert_eq!(Balances::reserved_balance(&1), 1); @@ -1012,7 +1032,7 @@ fn balance_conversion_should_work() { assert_ok!(Assets::force_create(RuntimeOrigin::root(), id, 1, true, 10)); let not_sufficient = 23; assert_ok!(Assets::force_create(RuntimeOrigin::root(), not_sufficient, 1, false, 10)); - + assert_eq!(asset_ids(), vec![23, 42, 999]); assert_eq!( BalanceToAssetBalance::::to_asset_balance(100, 1234), Err(ConversionError::AssetMissing) @@ -1035,7 +1055,7 @@ fn balance_conversion_should_work() { #[test] fn assets_from_genesis_should_exist() { new_test_ext().execute_with(|| { - assert!(Asset::::contains_key(999)); + assert_eq!(asset_ids(), vec![999]); assert!(Metadata::::contains_key(999)); assert_eq!(Assets::balance(999, 1), 100); assert_eq!(Assets::total_supply(999), 100); diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index e4108b7f80a98..9bdd5a10d7944 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -27,6 +27,8 @@ use sp_std::vec::Vec; pub mod approvals; mod balanced; +pub mod enumerable; +pub use enumerable::InspectEnumerable; pub mod metadata; pub use balanced::{Balanced, Unbalanced}; mod imbalance; diff --git a/frame/support/src/traits/tokens/fungibles/enumerable.rs b/frame/support/src/traits/tokens/fungibles/enumerable.rs new file mode 100644 index 0000000000000..151d15b3684a5 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/enumerable.rs @@ -0,0 +1,26 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::traits::fungibles::Inspect; + +/// Interface for enumerating assets in existence or owned by a given account. +pub trait InspectEnumerable: Inspect { + type AssetsIterator; + + /// Returns an iterator of the collections in existence. + fn asset_ids() -> Self::AssetsIterator; +} From 67e3f9b99cd10530e45d203db7df0e340307f0b0 Mon Sep 17 00:00:00 2001 From: yjh Date: Fri, 11 Nov 2022 22:25:14 +0800 Subject: [PATCH 346/348] derive type info for some grandpa types (#12683) --- primitives/finality-grandpa/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index f1584dc673228..f92fee4f12963 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -129,7 +129,7 @@ pub type CompactCommit
= grandpa::CompactCommit< /// /// This is meant to be stored in the db and passed around the network to other /// nodes, and are used by syncing nodes to prove authority set handoffs. -#[derive(Clone, Encode, Decode, PartialEq, Eq)] +#[derive(Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] #[cfg_attr(feature = "std", derive(Debug))] pub struct GrandpaJustification { pub round: u64, @@ -139,7 +139,7 @@ pub struct GrandpaJustification { /// A scheduled change of authority set. #[cfg_attr(feature = "std", derive(Serialize))] -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ScheduledChange { /// The new authorities after the change, along with their respective weights. pub next_authorities: AuthorityList, From e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 11 Nov 2022 16:22:26 +0100 Subject: [PATCH 347/348] Safe TreeRoute constructor (#12691) * Safe TreeRoute constructor * Remove test duplicate * Better tree route error info --- client/db/src/lib.rs | 8 ++++ .../transaction-pool/src/enactment_state.rs | 42 +++++++++++-------- primitives/blockchain/src/header_metadata.rs | 14 +++++-- 3 files changed, 43 insertions(+), 21 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3bbff1625f2f9..305db2284b2ed 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2796,6 +2796,14 @@ pub(crate) mod tests { let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); let b2 = insert_header(&backend, 2, b1, None, Default::default()); + { + let tree_route = tree_route(blockchain, a1, a1).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + { let tree_route = tree_route(blockchain, a3, b2).unwrap(); diff --git a/client/transaction-pool/src/enactment_state.rs b/client/transaction-pool/src/enactment_state.rs index b347de824fa12..6aac98641cf85 100644 --- a/client/transaction-pool/src/enactment_state.rs +++ b/client/transaction-pool/src/enactment_state.rs @@ -231,14 +231,19 @@ mod enactment_state_tests { } }; - Ok(TreeRoute::new(vec, pivot)) + TreeRoute::new(vec, pivot) } mod mock_tree_route_tests { use super::*; /// asserts that tree routes are equal - fn assert_treeroute_eq(expected: TreeRoute, result: TreeRoute) { + fn assert_treeroute_eq( + expected: Result, String>, + result: Result, String>, + ) { + let expected = expected.unwrap(); + let result = result.unwrap(); assert_eq!(result.common_block().hash, expected.common_block().hash); assert_eq!(result.enacted().len(), expected.enacted().len()); assert_eq!(result.retracted().len(), expected.retracted().len()); @@ -255,65 +260,66 @@ mod enactment_state_tests { } // some tests for mock tree_route function + #[test] fn tree_route_mock_test_01() { - let result = tree_route(b1().hash, a().hash).expect("tree route exists"); + let result = tree_route(b1().hash, a().hash); let expected = TreeRoute::new(vec![b1(), a()], 1); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_02() { - let result = tree_route(a().hash, b1().hash).expect("tree route exists"); + let result = tree_route(a().hash, b1().hash); let expected = TreeRoute::new(vec![a(), b1()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_03() { - let result = tree_route(a().hash, c2().hash).expect("tree route exists"); + let result = tree_route(a().hash, c2().hash); let expected = TreeRoute::new(vec![a(), b2(), c2()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_04() { - let result = tree_route(e2().hash, a().hash).expect("tree route exists"); + let result = tree_route(e2().hash, a().hash); let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a()], 4); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_05() { - let result = tree_route(d1().hash, b1().hash).expect("tree route exists"); + let result = tree_route(d1().hash, b1().hash); let expected = TreeRoute::new(vec![d1(), c1(), b1()], 2); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_06() { - let result = tree_route(d2().hash, b2().hash).expect("tree route exists"); + let result = tree_route(d2().hash, b2().hash); let expected = TreeRoute::new(vec![d2(), c2(), b2()], 2); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_07() { - let result = tree_route(b1().hash, d1().hash).expect("tree route exists"); + let result = tree_route(b1().hash, d1().hash); let expected = TreeRoute::new(vec![b1(), c1(), d1()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_08() { - let result = tree_route(b2().hash, d2().hash).expect("tree route exists"); + let result = tree_route(b2().hash, d2().hash); let expected = TreeRoute::new(vec![b2(), c2(), d2()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_09() { - let result = tree_route(e2().hash, e1().hash).expect("tree route exists"); + let result = tree_route(e2().hash, e1().hash); let expected = TreeRoute::new(vec![e2(), d2(), c2(), b2(), a(), b1(), c1(), d1(), e1()], 4); assert_treeroute_eq(result, expected); @@ -321,49 +327,49 @@ mod enactment_state_tests { #[test] fn tree_route_mock_test_10() { - let result = tree_route(e1().hash, e2().hash).expect("tree route exists"); + let result = tree_route(e1().hash, e2().hash); let expected = TreeRoute::new(vec![e1(), d1(), c1(), b1(), a(), b2(), c2(), d2(), e2()], 4); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_11() { - let result = tree_route(b1().hash, c2().hash).expect("tree route exists"); + let result = tree_route(b1().hash, c2().hash); let expected = TreeRoute::new(vec![b1(), a(), b2(), c2()], 1); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_12() { - let result = tree_route(d2().hash, b1().hash).expect("tree route exists"); + let result = tree_route(d2().hash, b1().hash); let expected = TreeRoute::new(vec![d2(), c2(), b2(), a(), b1()], 3); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_13() { - let result = tree_route(c2().hash, e1().hash).expect("tree route exists"); + let result = tree_route(c2().hash, e1().hash); let expected = TreeRoute::new(vec![c2(), b2(), a(), b1(), c1(), d1(), e1()], 2); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_14() { - let result = tree_route(b1().hash, b1().hash).expect("tree route exists"); + let result = tree_route(b1().hash, b1().hash); let expected = TreeRoute::new(vec![b1()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_15() { - let result = tree_route(b2().hash, b2().hash).expect("tree route exists"); + let result = tree_route(b2().hash, b2().hash); let expected = TreeRoute::new(vec![b2()], 0); assert_treeroute_eq(result, expected); } #[test] fn tree_route_mock_test_16() { - let result = tree_route(a().hash, a().hash).expect("tree route exists"); + let result = tree_route(a().hash, a().hash); let expected = TreeRoute::new(vec![a()], 0); assert_treeroute_eq(result, expected); } diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 1db37b47e4d44..87ac44459987e 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -179,9 +179,17 @@ pub struct TreeRoute { impl TreeRoute { /// Creates a new `TreeRoute`. /// - /// It is required that `pivot >= route.len()`, otherwise it may panics. - pub fn new(route: Vec>, pivot: usize) -> Self { - TreeRoute { route, pivot } + /// To preserve the structure safety invariats it is required that `pivot < route.len()`. + pub fn new(route: Vec>, pivot: usize) -> Result { + if pivot < route.len() { + Ok(TreeRoute { route, pivot }) + } else { + Err(format!( + "TreeRoute pivot ({}) should be less than route length ({})", + pivot, + route.len() + )) + } } /// Get a slice of all retracted blocks in reverse order (towards common ancestor). From 3e71d606b7d1e91d9c1701c0a443530eefca1a39 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Sun, 13 Nov 2022 19:48:11 +0100 Subject: [PATCH 348/348] New `root_testing` pallet (#12451) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Move fill_block to RootOffences * docs * new pallet * new line * fix * Update frame/root-testing/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/root-testing/src/lib.rs Co-authored-by: Bastian Köcher * Update bin/node/runtime/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/root-testing/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/root-testing/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/root-testing/src/lib.rs Co-authored-by: Bastian Köcher * fixes * problem solved * revert * fix dependency * hopefully making the CI happy * ... * dummy call * remove dummy * fix warning Co-authored-by: Bastian Köcher --- Cargo.lock | 17 +++++++++-- Cargo.toml | 1 + bin/node/executor/Cargo.toml | 1 + bin/node/executor/tests/fees.rs | 6 ++-- bin/node/runtime/Cargo.toml | 3 ++ bin/node/runtime/src/lib.rs | 3 ++ frame/root-offences/Cargo.toml | 7 ++--- frame/root-offences/README.md | 2 +- frame/root-offences/src/lib.rs | 2 +- frame/root-testing/Cargo.toml | 34 +++++++++++++++++++++ frame/root-testing/README.md | 5 +++ frame/root-testing/src/lib.rs | 54 +++++++++++++++++++++++++++++++++ frame/system/src/lib.rs | 20 +----------- frame/system/src/mock.rs | 2 +- frame/utility/Cargo.toml | 1 + frame/utility/src/tests.rs | 9 ++++-- 16 files changed, 134 insertions(+), 33 deletions(-) create mode 100644 frame/root-testing/Cargo.toml create mode 100644 frame/root-testing/README.md create mode 100644 frame/root-testing/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index f0471d019c1a2..beb234518041e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3401,6 +3401,7 @@ dependencies = [ "pallet-recovery", "pallet-referenda", "pallet-remark", + "pallet-root-testing", "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", @@ -4589,6 +4590,7 @@ dependencies = [ "pallet-balances", "pallet-contracts", "pallet-im-online", + "pallet-root-testing", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", @@ -5957,13 +5959,12 @@ dependencies = [ [[package]] name = "pallet-root-offences" -version = "1.0.0" +version = "1.0.0-dev" dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", "pallet-balances", - "pallet-offences", "pallet-session", "pallet-staking", "pallet-staking-reward-curve", @@ -5977,6 +5978,17 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-root-testing" +version = "1.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -6317,6 +6329,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", + "pallet-root-testing", "pallet-collective", "pallet-timestamp", "parity-scale-codec", diff --git a/Cargo.toml b/Cargo.toml index ab8fbd816b004..956c106e0dc2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/state-trie-migration", "frame/sudo", "frame/root-offences", + "frame/root-testing", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index 651e4657dde32..681eb79f0d224 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -38,6 +38,7 @@ pallet-sudo = { version = "4.0.0-dev", path = "../../../frame/sudo" } pallet-timestamp = { version = "4.0.0-dev", path = "../../../frame/timestamp" } pallet-treasury = { version = "4.0.0-dev", path = "../../../frame/treasury" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } +pallet-root-testing = { version = "1.0.0-dev", path = "../../../frame/root-testing" } sp-application-crypto = { version = "6.0.0", path = "../../../primitives/application-crypto" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } sp-externalities = { version = "0.12.0", path = "../../../primitives/externalities" } diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 6932cb2cea867..3c696d595040b 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -60,9 +60,9 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), function: RuntimeCall::Sudo(pallet_sudo::Call::sudo { - call: Box::new(RuntimeCall::System(frame_system::Call::fill_block { - ratio: Perbill::from_percent(60), - })), + call: Box::new(RuntimeCall::RootTesting( + pallet_root_testing::Call::fill_block { ratio: Perbill::from_percent(60) }, + )), }), }, ], diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 39364961d57e2..c45d468c59616 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -89,6 +89,7 @@ pallet-ranked-collective = { version = "4.0.0-dev", default-features = false, pa pallet-recovery = { version = "4.0.0-dev", default-features = false, path = "../../../frame/recovery" } pallet-referenda = { version = "4.0.0-dev", default-features = false, path = "../../../frame/referenda" } pallet-remark = { version = "4.0.0-dev", default-features = false, path = "../../../frame/remark" } +pallet-root-testing = { version = "1.0.0-dev", default-features = false, path = "../../../frame/root-testing" } pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../../frame/session", default-features = false } pallet-session-benchmarking = { version = "4.0.0-dev", path = "../../../frame/session/benchmarking", default-features = false, optional = true } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" } @@ -192,6 +193,7 @@ std = [ "pallet-ranked-collective/std", "pallet-referenda/std", "pallet-remark/std", + "pallet-root-testing/std", "pallet-recovery/std", "pallet-uniques/std", "pallet-vesting/std", @@ -292,6 +294,7 @@ try-runtime = [ "pallet-recovery/try-runtime", "pallet-referenda/try-runtime", "pallet-remark/try-runtime", + "pallet-root-testing/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index eab40634ff241..6c3a46e529727 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -909,6 +909,8 @@ impl pallet_remark::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +impl pallet_root_testing::Config for Runtime {} + parameter_types! { pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; @@ -1670,6 +1672,7 @@ construct_runtime!( ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, Remark: pallet_remark, + RootTesting: pallet_root_testing, ConvictionVoting: pallet_conviction_voting, Whitelist: pallet_whitelist, AllianceMotion: pallet_collective::, diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml index ea6a6527848aa..a205fc4aa6ca7 100644 --- a/frame/root-offences/Cargo.toml +++ b/frame/root-offences/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "pallet-root-offences" -version = "1.0.0" +version = "1.0.0-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME root offences pallet" +readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -17,11 +18,10 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } -pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../frame/offences" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } [dev-dependencies] @@ -45,7 +45,6 @@ std = [ "frame-system/std", "pallet-session/std", "pallet-staking/std", - "pallet-offences/std", "scale-info/std", "sp-runtime/std", ] diff --git a/frame/root-offences/README.md b/frame/root-offences/README.md index a2c5261b6985a..c582158721816 100644 --- a/frame/root-offences/README.md +++ b/frame/root-offences/README.md @@ -1,4 +1,4 @@ -# Sudo Offences Pallet +# Root Offences Pallet Pallet that allows the root to create an offence. diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs index b4b549627f3fa..298fe0078a6a6 100644 --- a/frame/root-offences/src/lib.rs +++ b/frame/root-offences/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Sudo Offences Pallet +//! # Root Offences Pallet //! Pallet that allows the root to create an offence. //! //! NOTE: This pallet should be used for testing purposes. diff --git a/frame/root-testing/Cargo.toml b/frame/root-testing/Cargo.toml new file mode 100644 index 0000000000000..c625d640bc289 --- /dev/null +++ b/frame/root-testing/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "pallet-root-testing" +version = "1.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME root testing pallet" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } + +[dev-dependencies] + +[features] +try-runtime = ["frame-support/try-runtime"] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/frame/root-testing/README.md b/frame/root-testing/README.md new file mode 100644 index 0000000000000..637430445a22f --- /dev/null +++ b/frame/root-testing/README.md @@ -0,0 +1,5 @@ +# Root Testing Pallet + +Pallet that contains extrinsics that can be usefull in testing. + +NOTE: This pallet should only be used for testing purposes and should not be used in production runtimes! \ No newline at end of file diff --git a/frame/root-testing/src/lib.rs b/frame/root-testing/src/lib.rs new file mode 100644 index 0000000000000..25d66cfac202d --- /dev/null +++ b/frame/root-testing/src/lib.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Root Testing Pallet +//! +//! Pallet that contains extrinsics that can be usefull in testing. +//! +//! NOTE: This pallet should only be used for testing purposes and should not be used in production +//! runtimes! + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::dispatch::DispatchResult; +use sp_runtime::Perbill; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::call] + impl Pallet { + /// A dispatch that will fill the block weight up to the given ratio. + #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] + pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResult { + ensure_root(origin)?; + Ok(()) + } + } +} diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 7577d0dc6b158..477ebb97fbd95 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -75,7 +75,7 @@ use sp_runtime::{ CheckEqual, Dispatchable, Hash, Lookup, LookupError, MaybeDisplay, MaybeMallocSizeOf, MaybeSerializeDeserialize, Member, One, Saturating, SimpleBitOps, StaticLookup, Zero, }, - DispatchError, Perbill, RuntimeDebug, + DispatchError, RuntimeDebug, }; #[cfg(any(feature = "std", test))] use sp_std::map; @@ -197,7 +197,6 @@ impl, MaxOverflow: Get> ConsumerLimits for (MaxNormal, pub mod pallet { use crate::{self as frame_system, pallet_prelude::*, *}; use frame_support::pallet_prelude::*; - use sp_runtime::DispatchErrorWithPostInfo; /// System configuration trait. Implemented by runtime. #[pallet::config] @@ -370,23 +369,6 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// A dispatch that will fill the block weight up to the given ratio. - // TODO: This should only be available for testing, rather than in general usage, but - // that's not possible at present (since it's within the pallet macro). - #[pallet::weight(*_ratio * T::BlockWeights::get().max_block)] - pub fn fill_block(origin: OriginFor, _ratio: Perbill) -> DispatchResultWithPostInfo { - match ensure_root(origin) { - Ok(_) => Ok(().into()), - Err(_) => { - // roughly same as a 4 byte remark since perbill is u32. - Err(DispatchErrorWithPostInfo { - post_info: Some(T::SystemWeightInfo::remark(4u32)).into(), - error: DispatchError::BadOrigin, - }) - }, - } - } - /// Make some on-chain remark. /// /// # diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index d31a1b08667e5..fb230f66a94f7 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -24,7 +24,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, + BuildStorage, Perbill, }; type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index ac4f52c6bb9f3..f49348338394e 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -25,6 +25,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../../primitives [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-root-testing = { version = "1.0.0-dev", path = "../root-testing" } pallet-collective = { version = "4.0.0-dev", path = "../collective" } pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index c374f5ae21099..848fc374619b7 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -133,6 +133,7 @@ frame_support::construct_runtime!( System: frame_system::{Pallet, Call, Config, Storage, Event}, Timestamp: pallet_timestamp::{Call, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + RootTesting: pallet_root_testing::{Pallet, Call, Storage}, Council: pallet_collective::, Utility: utility::{Pallet, Call, Event}, Example: example::{Pallet, Call}, @@ -183,6 +184,8 @@ impl pallet_balances::Config for Test { type WeightInfo = (); } +impl pallet_root_testing::Config for Test {} + impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); @@ -247,6 +250,7 @@ type UtilityCall = crate::Call; use frame_system::Call as SystemCall; use pallet_balances::{Call as BalancesCall, Error as BalancesError}; +use pallet_root_testing::Call as RootTestingCall; use pallet_timestamp::Call as TimestampCall; pub fn new_test_ext() -> sp_io::TestExternalities { @@ -469,8 +473,9 @@ fn batch_early_exit_works() { fn batch_weight_calculation_doesnt_overflow() { use sp_runtime::Perbill; new_test_ext().execute_with(|| { - let big_call = - RuntimeCall::System(SystemCall::fill_block { ratio: Perbill::from_percent(50) }); + let big_call = RuntimeCall::RootTesting(RootTestingCall::fill_block { + ratio: Perbill::from_percent(50), + }); assert_eq!(big_call.get_dispatch_info().weight, Weight::MAX / 2); // 3 * 50% saturates to 100%

(slot, epoch_data); expected_author.and_then(|p| { @@ -382,7 +382,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - _epoch: Self::EpochData, + _epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index aef4785b7bb81..109e5aade02a7 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -729,7 +729,6 @@ where BS: BackoffAuthoringBlocksStrategy> + Sync, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; @@ -737,6 +736,7 @@ where Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; + type AuxData = ViableEpochDescriptor, Epoch>; fn logging_target(&self) -> &'static str { "babe" @@ -746,11 +746,7 @@ where &mut self.block_import } - fn epoch_data( - &self, - parent: &B::Header, - slot: Slot, - ) -> Result { + fn aux_data(&self, parent: &B::Header, slot: Slot) -> Result { self.epoch_changes .shared_data() .epoch_descriptor_for_child_of( @@ -763,7 +759,7 @@ where .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + fn authorities_len(&self, epoch_descriptor: &Self::AuxData) -> Option { self.epoch_changes .shared_data() .viable_epoch(epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) @@ -823,7 +819,7 @@ where body: Vec, storage_changes: StorageChanges<>::Transaction, B>, (_, public): Self::Claim, - epoch_descriptor: Self::EpochData, + epoch_descriptor: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 7c5d5d4a73bc1..6225bbbda1745 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -101,8 +101,8 @@ pub trait SimpleSlotWorker { /// Data associated with a slot claim. type Claim: Send + Sync + 'static; - /// Epoch data necessary for authoring. - type EpochData: Send + Sync + 'static; + /// Auxiliary data necessary for authoring. + type AuxData: Send + Sync + 'static; /// The logging target to use when logging messages. fn logging_target(&self) -> &'static str; @@ -110,29 +110,28 @@ pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. fn block_import(&mut self) -> &mut Self::BlockImport; - /// Returns the epoch data necessary for authoring. For time-dependent epochs, - /// use the provided slot number as a canonical source of time. - fn epoch_data( + /// Returns the auxiliary data necessary for authoring. + fn aux_data( &self, header: &B::Header, slot: Slot, - ) -> Result; + ) -> Result; - /// Returns the number of authorities given the epoch data. + /// Returns the number of authorities. /// None indicate that the authorities information is incomplete. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; + fn authorities_len(&self, aux_data: &Self::AuxData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. async fn claim_slot( &self, header: &B::Header, slot: Slot, - epoch_data: &Self::EpochData, + aux_data: &Self::AuxData, ) -> Option; /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _aux_data: &Self::AuxData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; @@ -145,7 +144,7 @@ pub trait SimpleSlotWorker { body: Vec, storage_changes: StorageChanges<>::Transaction, B>, public: Self::Claim, - epoch: Self::EpochData, + epoch: Self::AuxData, ) -> Result< sc_consensus::BlockImportParams>::Transaction>, sp_consensus::Error, @@ -268,12 +267,12 @@ pub trait SimpleSlotWorker { Delay::new(proposing_remaining_duration) }; - let epoch_data = match self.epoch_data(&slot_info.chain_head, slot) { - Ok(epoch_data) => epoch_data, + let aux_data = match self.aux_data(&slot_info.chain_head, slot) { + Ok(aux_data) => aux_data, Err(err) => { warn!( target: logging_target, - "Unable to fetch epoch data at block {:?}: {}", + "Unable to fetch auxiliary data for block {:?}: {}", slot_info.chain_head.hash(), err, ); @@ -290,9 +289,9 @@ pub trait SimpleSlotWorker { }, }; - self.notify_slot(&slot_info.chain_head, slot, &epoch_data); + self.notify_slot(&slot_info.chain_head, slot, &aux_data); - let authorities_len = self.authorities_len(&epoch_data); + let authorities_len = self.authorities_len(&aux_data); if !self.force_authoring() && self.sync_oracle().is_offline() && @@ -309,7 +308,7 @@ pub trait SimpleSlotWorker { return None } - let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data).await?; + let claim = self.claim_slot(&slot_info.chain_head, slot, &aux_data).await?; if self.should_backoff(slot, &slot_info.chain_head) { return None @@ -351,7 +350,7 @@ pub trait SimpleSlotWorker { body.clone(), proposal.storage_changes, claim, - epoch_data, + aux_data, ) .await { From 2a6c314cdce2b7813fbe2af2d21388ff5ededcbe Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Tue, 27 Sep 2022 13:44:20 +0200 Subject: [PATCH 180/348] Pallet staking events to named enum (#12342) * Pallet staking events to named enum * fmt * update np staking tests * update remaining events * update benchmarks * Update frame/nomination-pools/test-staking/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/pallet/mod.rs * Update frame/staking/src/lib.rs * Update frame/staking/src/pallet/impls.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- .../nomination-pools/test-staking/src/lib.rs | 109 ++++++++++++------ frame/offences/benchmarking/src/lib.rs | 4 +- frame/staking/src/lib.rs | 4 +- frame/staking/src/pallet/impls.rs | 26 +++-- frame/staking/src/pallet/mod.rs | 60 +++++----- frame/staking/src/slashing.rs | 5 +- frame/staking/src/tests.rs | 55 +++++---- 7 files changed, 165 insertions(+), 98 deletions(-) diff --git a/frame/nomination-pools/test-staking/src/lib.rs b/frame/nomination-pools/test-staking/src/lib.rs index 7d848e98174b4..00e0e40ce33b0 100644 --- a/frame/nomination-pools/test-staking/src/lib.rs +++ b/frame/nomination-pools/test-staking/src/lib.rs @@ -41,7 +41,10 @@ fn pool_lifecycle_e2e() { // have the pool nominate. assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 50),]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -56,7 +59,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 10), StakingEvent::Bonded(POOL1_BONDED, 10),] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -87,8 +93,8 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); assert_eq!( @@ -131,7 +137,7 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 20),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 20 },] ); assert_eq!( pool_events_since_last_call(), @@ -155,7 +161,10 @@ fn pool_lifecycle_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Chilled(POOL1_BONDED), StakingEvent::Unbonded(POOL1_BONDED, 50),] + vec![ + StakingEvent::Chilled { stash: POOL1_BONDED }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 50 }, + ] ); assert_eq!( pool_events_since_last_call(), @@ -169,7 +178,7 @@ fn pool_lifecycle_e2e() { // pools is fully destroyed now. assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 50),] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 50 },] ); assert_eq!( pool_events_since_last_call(), @@ -193,7 +202,10 @@ fn pool_slash_e2e() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -210,7 +222,10 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, 20), StakingEvent::Bonded(POOL1_BONDED, 20)] + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] ); assert_eq!( pool_events_since_last_call(), @@ -230,8 +245,8 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10) + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 } ] ); assert_eq!( @@ -253,9 +268,9 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), - StakingEvent::Unbonded(POOL1_BONDED, 10), + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, ] ); @@ -278,7 +293,10 @@ fn pool_slash_e2e() { 2, // slash era 2, affects chunks at era 5 onwards. ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -302,7 +320,10 @@ fn pool_slash_e2e() { unbonding_eras: bounded_btree_map!(5 => 10, 6 => 5) } ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Unbonded(POOL1_BONDED, 5)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::Unbonded { member: 21, pool_id: 1, balance: 5, points: 5, era: 6 }] @@ -327,7 +348,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), // a 10 (un-slashed) + 10/2 (slashed) balance from 10 has also been unlocked - vec![StakingEvent::Withdrawn(POOL1_BONDED, 15 + 10 + 15)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 + 10 + 15 }] ); // now, finally, we can unbond the depositor further than their current limit. @@ -336,7 +357,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, 10)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -361,7 +382,7 @@ fn pool_slash_e2e() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Withdrawn(POOL1_BONDED, 10)] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }] ); assert_eq!( pool_events_since_last_call(), @@ -388,7 +409,10 @@ fn pool_slash_proportional() { assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); assert_eq!(LastPoolId::::get(), 1); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -406,9 +430,9 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), vec![ - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), - StakingEvent::Bonded(POOL1_BONDED, bond), + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }, ] ); assert_eq!( @@ -428,7 +452,7 @@ fn pool_slash_proportional() { assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -445,7 +469,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(21), 21, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -462,7 +486,7 @@ fn pool_slash_proportional() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond),] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond },] ); assert_eq!( pool_events_since_last_call(), @@ -486,7 +510,10 @@ fn pool_slash_proportional() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -517,7 +544,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -531,7 +561,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -543,7 +573,7 @@ fn pool_slash_non_proportional_only_bonded_pool() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -567,7 +597,10 @@ fn pool_slash_non_proportional_only_bonded_pool() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 30)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); assert_eq!( pool_events_since_last_call(), vec![PoolsEvent::PoolSlashed { pool_id: 1, balance: 10 }] @@ -590,7 +623,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { // create the pool, we know this has id 1. assert_ok!(Pools::create(RuntimeOrigin::signed(10), 40, 10, 10, 10)); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Bonded(POOL1_BONDED, 40)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); assert_eq!( pool_events_since_last_call(), vec![ @@ -604,7 +640,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::join(RuntimeOrigin::signed(20), bond, 1)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Bonded(POOL1_BONDED, bond)] + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -616,7 +652,7 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { assert_ok!(Pools::unbond(RuntimeOrigin::signed(20), 20, bond)); assert_eq!( staking_events_since_last_call(), - vec![StakingEvent::Unbonded(POOL1_BONDED, bond)] + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: bond }] ); assert_eq!( pool_events_since_last_call(), @@ -640,7 +676,10 @@ fn pool_slash_non_proportional_bonded_pool_and_chunks() { 100, ); - assert_eq!(staking_events_since_last_call(), vec![StakingEvent::Slashed(POOL1_BONDED, 50)]); + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 50 }] + ); assert_eq!( pool_events_since_last_call(), vec![ diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index c9498214eade4..555ec42882ee1 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -309,13 +309,13 @@ benchmarks! { let reward_amount = slash_amount.saturating_mul(1 + n) / 2; let reward = reward_amount / r; let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) + ::RuntimeEvent::from(StakingEvent::::Slashed{staker: id, amount: BalanceOf::::from(slash_amount)}) ); let balance_slash = |id| core::iter::once( ::RuntimeEvent::from(pallet_balances::Event::::Slashed{who: id, amount: slash_amount.into()}) ); let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled(id)) + ::RuntimeEvent::from(StakingEvent::::Chilled{stash: id}) ); let balance_deposit = |id, amount: u32| ::RuntimeEvent::from(pallet_balances::Event::::Deposit{who: id, amount: amount.into()}); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index df568d6b596ba..eb30671d35a57 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -953,7 +953,9 @@ where if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); + >::deposit_event(Event::::OldSlashingReportDiscarded { + session_index: offence_session, + }); Ok(()) } } diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 399f50aaed865..6da27da362b53 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -181,14 +181,20 @@ impl Pallet { let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); + Self::deposit_event(Event::::PayoutStarted { + era_index: era, + validator_stash: ledger.stash.clone(), + }); let mut total_imbalance = PositiveImbalanceOf::::zero(); // We can now make total validator payout: if let Some(imbalance) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); + Self::deposit_event(Event::::Rewarded { + stash: ledger.stash, + amount: imbalance.peek(), + }); total_imbalance.subsume(imbalance); } @@ -208,7 +214,8 @@ impl Pallet { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); + let e = + Event::::Rewarded { stash: nominator.who.clone(), amount: imbalance.peek() }; Self::deposit_event(e); total_imbalance.subsume(imbalance); } @@ -232,7 +239,7 @@ impl Pallet { let chilled_as_validator = Self::do_remove_validator(stash); let chilled_as_nominator = Self::do_remove_nominator(stash); if chilled_as_validator || chilled_as_nominator { - Self::deposit_event(Event::::Chilled(stash.clone())); + Self::deposit_event(Event::::Chilled { stash: stash.clone() }); } } @@ -391,13 +398,18 @@ impl Pallet { let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); - let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); + let (validator_payout, remainder) = + T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); + Self::deposit_event(Event::::EraPaid { + era_index: active_era.index, + validator_payout, + remainder, + }); // Set ending era reward. >::insert(&active_era.index, validator_payout); - T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); + T::RewardRemainder::on_unbalanced(T::Currency::issue(remainder)); // Clear offending validators. >::kill(); diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 4db3870c62d8b..6e97697736223 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -653,39 +653,36 @@ pub mod pallet { pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - /// \[era_index, validator_payout, remainder\] - EraPaid(EraIndex, BalanceOf, BalanceOf), - /// The nominator has been rewarded by this amount. \[stash, amount\] - Rewarded(T::AccountId, BalanceOf), + EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, + /// The nominator has been rewarded by this amount. + Rewarded { stash: T::AccountId, amount: BalanceOf }, /// One staker (and potentially its nominators) has been slashed by the given amount. - /// \[staker, amount\] - Slashed(T::AccountId, BalanceOf), + Slashed { staker: T::AccountId, amount: BalanceOf }, /// An old slashing report from a prior era was discarded because it could - /// not be processed. \[session_index\] - OldSlashingReportDiscarded(SessionIndex), + /// not be processed. + OldSlashingReportDiscarded { session_index: SessionIndex }, /// A new set of stakers was elected. StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded(T::AccountId, BalanceOf), - /// An account has unbonded this amount. \[stash, amount\] - Unbonded(T::AccountId, BalanceOf), + Bonded { stash: T::AccountId, amount: BalanceOf }, + /// An account has unbonded this amount. + Unbonded { stash: T::AccountId, amount: BalanceOf }, /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` - /// from the unlocking queue. \[stash, amount\] - Withdrawn(T::AccountId, BalanceOf), - /// A nominator has been kicked from a validator. \[nominator, stash\] - Kicked(T::AccountId, T::AccountId), + /// from the unlocking queue. + Withdrawn { stash: T::AccountId, amount: BalanceOf }, + /// A nominator has been kicked from a validator. + Kicked { nominator: T::AccountId, stash: T::AccountId }, /// The election failed. No new era is planned. StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. - /// \[stash\] - Chilled(T::AccountId), - /// The stakers' rewards are getting paid. \[era_index, validator_stash\] - PayoutStarted(EraIndex, T::AccountId), + Chilled { stash: T::AccountId }, + /// The stakers' rewards are getting paid. + PayoutStarted { era_index: EraIndex, validator_stash: T::AccountId }, /// A validator has set their preferences. - ValidatorPrefsSet(T::AccountId, ValidatorPrefs), + ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, } #[pallet::error] @@ -850,7 +847,7 @@ pub mod pallet { let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - Self::deposit_event(Event::::Bonded(stash.clone(), value)); + Self::deposit_event(Event::::Bonded { stash: stash.clone(), amount: value }); let item = StakingLedger { stash, total: value, @@ -911,7 +908,7 @@ pub mod pallet { T::VoterList::on_update(&stash, Self::weight_of(&ledger.stash)).defensive(); } - Self::deposit_event(Event::::Bonded(stash, extra)); + Self::deposit_event(Event::::Bonded { stash, amount: extra }); } Ok(()) } @@ -994,7 +991,7 @@ pub mod pallet { .defensive(); } - Self::deposit_event(Event::::Unbonded(ledger.stash, value)); + Self::deposit_event(Event::::Unbonded { stash: ledger.stash, amount: value }); } Ok(()) } @@ -1050,7 +1047,7 @@ pub mod pallet { if ledger.total < old_total { // Already checked that this won't overflow by entry condition. let value = old_total - ledger.total; - Self::deposit_event(Event::::Withdrawn(stash, value)); + Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } Ok(post_info_weight.into()) @@ -1088,7 +1085,7 @@ pub mod pallet { Self::do_remove_nominator(stash); Self::do_add_validator(stash, prefs.clone()); - Self::deposit_event(Event::::ValidatorPrefsSet(ledger.stash, prefs)); + Self::deposit_event(Event::::ValidatorPrefsSet { stash: ledger.stash, prefs }); Ok(()) } @@ -1471,7 +1468,10 @@ pub mod pallet { // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); + Self::deposit_event(Event::::Bonded { + stash: ledger.stash.clone(), + amount: rebonded_value, + }); // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); @@ -1546,10 +1546,10 @@ pub mod pallet { if let Some(ref mut nom) = maybe_nom { if let Some(pos) = nom.targets.iter().position(|v| v == stash) { nom.targets.swap_remove(pos); - Self::deposit_event(Event::::Kicked( - nom_stash.clone(), - stash.clone(), - )); + Self::deposit_event(Event::::Kicked { + nominator: nom_stash.clone(), + stash: stash.clone(), + }); } } }); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index f3272a25fab5c..a1900136d64fd 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -626,7 +626,10 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event(super::Event::::Slashed(stash.clone(), value)); + >::deposit_event(super::Event::::Slashed { + staker: stash.clone(), + amount: value, + }); } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 6798a78030f9e..8ec98da99ecb1 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -303,7 +303,11 @@ fn rewards_should_work() { assert_eq!(mock::RewardRemainderUnbalanced::get(), maximum_payout - total_payout_0,); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) + Event::EraPaid { + era_index: 0, + validator_payout: total_payout_0, + remainder: maximum_payout - total_payout_0 + } ); mock::make_all_reward_payment(0); @@ -341,7 +345,11 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) + Event::EraPaid { + era_index: 1, + validator_payout: total_payout_1, + remainder: maximum_payout - total_payout_1 + } ); mock::make_all_reward_payment(1); @@ -1645,7 +1653,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 100 }); // Re-bond way more than available Staking::rebond(RuntimeOrigin::signed(10), 100_000).unwrap(); @@ -1660,7 +1668,7 @@ fn rebond_emits_right_value_in_event() { }) ); // Event emitted should be correct, only 800 - assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); + assert_eq!(*staking_events().last().unwrap(), Event::Bonded { stash: 11, amount: 800 }); }); } @@ -2870,9 +2878,9 @@ fn deferred_slashes_are_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); }) @@ -2901,9 +2909,9 @@ fn retroactive_deferred_slashes_two_eras_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 7100, 21300), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 3, validator_payout: 7100, remainder: 21300 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 }, ] ); }) @@ -2934,7 +2942,10 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(4); assert_eq!( staking_events_since_last_call(), - vec![Event::StakersElected, Event::EraPaid(3, 11075, 33225)] + vec![ + Event::StakersElected, + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 } + ] ); assert_eq!(Staking::ledger(10).unwrap().total, 1000); @@ -2944,9 +2955,9 @@ fn retroactive_deferred_slashes_one_before() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(4, 11075, 33225), - Event::Slashed(11, 100), - Event::Slashed(101, 12) + Event::EraPaid { era_index: 4, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 100 }, + Event::Slashed { staker: 101, amount: 12 } ] ); @@ -3090,9 +3101,9 @@ fn remove_deferred() { staking_events_since_last_call(), vec![ Event::StakersElected, - Event::EraPaid(3, 11075, 33225), - Event::Slashed(11, 50), - Event::Slashed(101, 7) + Event::EraPaid { era_index: 3, validator_payout: 11075, remainder: 33225 }, + Event::Slashed { staker: 11, amount: 50 }, + Event::Slashed { staker: 101, amount: 7 } ] ); @@ -4057,7 +4068,7 @@ fn offences_weight_calculated_correctly() { &one_offender, &[Perbill::from_percent(50)], 0, - DisableStrategy::WhenSlashed + DisableStrategy::WhenSlashed{} ), one_offence_unapplied_weight ); @@ -4955,10 +4966,10 @@ fn min_commission_works() { // event emitted should be correct assert_eq!( *staking_events().last().unwrap(), - Event::ValidatorPrefsSet( - 11, - ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } - ) + Event::ValidatorPrefsSet { + stash: 11, + prefs: ValidatorPrefs { commission: Perbill::from_percent(5), blocked: false } + } ); assert_ok!(Staking::set_staking_configs( From 74daaf1eb23686991a40c6cc361940421322472b Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:44:16 +0200 Subject: [PATCH 181/348] [fix] Bound staking ledger correctly with MaxUnlockingChunks from configuration (#12343) * used maxunlockingchunks from config * mhl MaxUnlockingChunks * no migration needed * changes as per requested * fmt * fix tests * fix benchmark * warning in the doc for abrupt changes in the config * less unnecessary details in the test * fix tests Co-authored-by: mrisholukamba Co-authored-by: parity-processbot <> --- frame/staking/src/benchmarking.rs | 4 +- frame/staking/src/lib.rs | 7 +--- frame/staking/src/mock.rs | 3 +- frame/staking/src/pallet/mod.rs | 27 +++++++++----- frame/staking/src/tests.rs | 61 +++++++++++++++++++++++++++++-- 5 files changed, 80 insertions(+), 22 deletions(-) diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 1ea05bba3b579..c7e6936ac75d8 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -613,7 +613,7 @@ benchmarks! { } rebond { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; // clean up any existing state. clear_validators_and_nominators::(); @@ -764,7 +764,7 @@ benchmarks! { #[extra] do_slash { - let l in 1 .. MaxUnlockingChunks::get() as u32; + let l in 1 .. T::MaxUnlockingChunks::get() as u32; let (stash, controller) = create_stash_controller::(0, 100, Default::default())?; let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); let unlock_chunk = UnlockChunk::> { diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index eb30671d35a57..a0144463540be 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -301,7 +301,6 @@ mod pallet; use codec::{Decode, Encode, HasCompact, MaxEncodedLen}; use frame_support::{ - parameter_types, traits::{Currency, Defensive, Get}, weights::Weight, BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, @@ -349,10 +348,6 @@ type NegativeImbalanceOf = <::Currency as Currency< type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -parameter_types! { - pub MaxUnlockingChunks: u32 = 32; -} - /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ActiveEraInfo { @@ -465,7 +460,7 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out of the stash /// (assuming it doesn't get slashed first). It is assumed that this will be treated as a first /// in, first out queue where the new (higher value) eras get pushed on the back. - pub unlocking: BoundedVec>, MaxUnlockingChunks>, + pub unlocking: BoundedVec>, T::MaxUnlockingChunks>, /// List of eras for which the stakers behind a validator have claimed rewards. Only updated /// for validators. pub claimed_rewards: BoundedVec, diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 385087f9bec41..3a9351ef4a271 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -237,6 +237,7 @@ parameter_types! { pub static BagThresholds: &'static [sp_npos_elections::VoteWeight] = &THRESHOLDS; pub static MaxNominations: u32 = 16; pub static HistoryDepth: u32 = 80; + pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); } @@ -301,7 +302,7 @@ impl crate::pallet::pallet::Config for Test { // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; - type MaxUnlockingChunks = ConstU32<32>; + type MaxUnlockingChunks = MaxUnlockingChunks; type HistoryDepth = HistoryDepth; type OnStakerSlash = OnStakerSlashMock; type BenchmarkingConfig = TestBenchmarkingConfig; diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 6e97697736223..560c3b6ed830c 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -43,9 +43,9 @@ pub use impls::*; use crate::{ slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, Exposure, Forcing, MaxUnlockingChunks, NegativeImbalanceOf, Nominations, - PositiveImbalanceOf, Releases, RewardDestination, SessionInterface, StakingLedger, - UnappliedSlash, UnlockChunk, ValidatorPrefs, + EraRewardPoints, Exposure, Forcing, NegativeImbalanceOf, Nominations, PositiveImbalanceOf, + Releases, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, + ValidatorPrefs, }; const STAKING_ID: LockIdentifier = *b"staking "; @@ -142,8 +142,9 @@ pub mod pallet { /// /// Note: `HistoryDepth` is used as the upper bound for the `BoundedVec` /// item `StakingLedger.claimed_rewards`. Setting this value lower than - /// the existing value can lead to inconsistencies and will need to be - /// handled properly in a migration. + /// the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a migration. + /// The test `reducing_history_depth_abrupt` shows this effect. #[pallet::constant] type HistoryDepth: Get; @@ -237,8 +238,16 @@ pub mod pallet { /// VALIDATOR. type TargetList: SortedListProvider>; - /// The maximum number of `unlocking` chunks a [`StakingLedger`] can have. Effectively - /// determines how many unique eras a staker may be unbonding in. + /// The maximum number of `unlocking` chunks a [`StakingLedger`] can + /// have. Effectively determines how many unique eras a staker may be + /// unbonding in. + /// + /// Note: `MaxUnlockingChunks` is used as the upper bound for the + /// `BoundedVec` item `StakingLedger.unlocking`. Setting this value + /// lower than the existing value can lead to inconsistencies in the + /// `StakingLedger` and will need to be handled properly in a runtime + /// migration. The test `reducing_max_unlocking_chunks_abrupt` shows + /// this effect. #[pallet::constant] type MaxUnlockingChunks: Get; @@ -940,7 +949,7 @@ pub mod pallet { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( - ledger.unlocking.len() < MaxUnlockingChunks::get() as usize, + ledger.unlocking.len() < T::MaxUnlockingChunks::get() as usize, Error::::NoMoreChunks, ); @@ -1454,7 +1463,7 @@ pub mod pallet { /// - Bounded by `MaxUnlockingChunks`. /// - Storage changes: Can't increase storage, only decrease it. /// # - #[pallet::weight(T::WeightInfo::rebond(MaxUnlockingChunks::get() as u32))] + #[pallet::weight(T::WeightInfo::rebond(T::MaxUnlockingChunks::get() as u32))] pub fn rebond( origin: OriginFor, #[pallet::compact] value: BalanceOf, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 8ec98da99ecb1..4812c105c0d80 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,7 +17,7 @@ //! Tests for the module. -use super::{ConfigOp, Event, MaxUnlockingChunks, *}; +use super::{ConfigOp, Event, *}; use frame_election_provider_support::{ElectionProvider, SortedListProvider, Support}; use frame_support::{ assert_noop, assert_ok, assert_storage_noop, bounded_vec, @@ -1354,7 +1354,8 @@ fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { let mut current_era = 0; // locked at era MaxUnlockingChunks - 1 until 3 - for i in 0..MaxUnlockingChunks::get() - 1 { + + for i in 0..<::MaxUnlockingChunks as Get>::get() - 1 { // There is only 1 chunk per era, so we need to be in a new era to create a chunk. current_era = i as u32; mock::start_active_era(current_era); @@ -1369,7 +1370,7 @@ fn too_many_unbond_calls_should_not_work() { assert_ok!(Staking::unbond(RuntimeOrigin::signed(10), 1)); assert_eq!( Staking::ledger(&10).unwrap().unlocking.len(), - MaxUnlockingChunks::get() as usize + <::MaxUnlockingChunks as Get>::get() as usize ); // can't do more. assert_noop!(Staking::unbond(RuntimeOrigin::signed(10), 1), Error::::NoMoreChunks); @@ -5494,7 +5495,7 @@ fn pre_bonding_era_cannot_be_claimed() { } #[test] -fn reducing_history_depth_without_migration() { +fn reducing_history_depth_abrupt() { // Verifies initial conditions of mock ExtBuilder::default().nominate(false).build_and_execute(|| { let original_history_depth = HistoryDepth::get(); @@ -5571,3 +5572,55 @@ fn reducing_history_depth_without_migration() { HistoryDepth::set(original_history_depth); }); } + +#[test] +fn reducing_max_unlocking_chunks_abrupt() { + // Concern is on validators only + // By Default 11, 10 are stash and ctrl and 21,20 + ExtBuilder::default().build_and_execute(|| { + // given a staker at era=10 and MaxUnlockChunks set to 2 + MaxUnlockingChunks::set(2); + start_active_era(10); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 4, 300, RewardDestination::Staked)); + assert!(matches!(Staking::ledger(4), Some(_))); + + // when staker unbonds + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 20)); + + // then an unlocking chunk is added at `current_era + bonding_duration` + // => 10 + 3 = 13 + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20 as Balance, era: 13 as EraIndex }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds at next era + start_active_era(11); + assert_ok!(Staking::unbond(RuntimeOrigin::signed(4), 50)); + // then another unlock chunk is added + let expected_unlocking: BoundedVec, MaxUnlockingChunks> = + bounded_vec![UnlockChunk { value: 20, era: 13 }, UnlockChunk { value: 50, era: 14 }]; + assert!(matches!(Staking::ledger(4), + Some(StakingLedger { + unlocking, + .. + }) if unlocking==expected_unlocking)); + + // when staker unbonds further + start_active_era(12); + // then further unbonding not possible + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NoMoreChunks); + + // when max unlocking chunks is reduced abruptly to a low value + MaxUnlockingChunks::set(1); + // then unbond, rebond ops are blocked with ledger in corrupt state + assert_noop!(Staking::unbond(RuntimeOrigin::signed(4), 20), Error::::NotController); + assert_noop!(Staking::rebond(RuntimeOrigin::signed(4), 100), Error::::NotController); + + // reset the ledger corruption + MaxUnlockingChunks::set(2); + }) +} From 94b9646177430adb74d7e4737c98ba333f91c451 Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 27 Sep 2022 19:31:12 +0200 Subject: [PATCH 182/348] [Feature] Add deposit to fast-unstake (#12366) * [Feature] Add deposit to fast-unstake * disable on ErasToCheckPerBlock == 0 * removed signed ext * remove obsolete import * remove some obsolete stuff * fix some comments * fixed all the comments * remove obsolete imports * fix some tests * CallNotAllowed tests * Update frame/fast-unstake/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix tests * fix deregister + tests * more fixes * make sure we go above existential deposit * fixed the last test * some nit fixes * fix node * fix bench * last bench fix * Update frame/fast-unstake/src/lib.rs * ".git/.scripts/fmt.sh" 1 Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: command-bot <> --- bin/node/runtime/src/lib.rs | 3 +- frame/fast-unstake/src/benchmarking.rs | 8 +- frame/fast-unstake/src/lib.rs | 105 +++++--- frame/fast-unstake/src/mock.rs | 19 +- frame/fast-unstake/src/tests.rs | 333 ++++++++++++++++--------- frame/fast-unstake/src/types.rs | 83 +----- 6 files changed, 313 insertions(+), 238 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 8ed5f1c847f5e..aa1a525bf095c 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -581,8 +581,9 @@ impl pallet_staking::Config for Runtime { impl pallet_fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = ConstU128<{ DOLLARS }>; type ControlOrigin = frame_system::EnsureRoot; + type Deposit = ConstU128<{ DOLLARS }>; + type DepositCurrency = Balances; type WeightInfo = (); } diff --git a/frame/fast-unstake/src/benchmarking.rs b/frame/fast-unstake/src/benchmarking.rs index 5690d5ce6f29f..8770cc6b64c0d 100644 --- a/frame/fast-unstake/src/benchmarking.rs +++ b/frame/fast-unstake/src/benchmarking.rs @@ -110,18 +110,18 @@ fn on_idle_full_block() { benchmarks! { // on_idle, we we don't check anyone, but fully unbond and move them to another pool. on_idle_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), )); - ErasToCheckPerBlock::::put(1); // run on_idle once. This will check era 0. assert_eq!(Head::::get(), None); on_idle_full_block::(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap() }) + Some(UnstakeRequest { stash: who.clone(), checked: vec![0].try_into().unwrap(), deposit: T::Deposit::get() }) ); } : { @@ -162,7 +162,7 @@ benchmarks! { let checked: frame_support::BoundedVec<_, _> = (1..=u).rev().collect::>().try_into().unwrap(); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: who.clone(), checked }) + Some(UnstakeRequest { stash: who.clone(), checked, deposit: T::Deposit::get() }) ); assert!(matches!( fast_unstake_events::().last(), @@ -171,6 +171,7 @@ benchmarks! { } register_fast_unstake { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); whitelist_account!(who); assert_eq!(Queue::::count(), 0); @@ -182,6 +183,7 @@ benchmarks! { } deregister { + ErasToCheckPerBlock::::put(1); let who = create_unexposed_nominator::(); assert_ok!(FastUnstake::::register_fast_unstake( RawOrigin::Signed(who.clone()).into(), diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index 7fbac8560ea6c..ed26d6b436e1d 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -81,7 +81,10 @@ pub mod pallet { use super::*; use crate::types::*; use frame_election_provider_support::ElectionProvider; - use frame_support::pallet_prelude::*; + use frame_support::{ + pallet_prelude::*, + traits::{Defensive, ReservableCurrency}, + }; use frame_system::{pallet_prelude::*, RawOrigin}; use pallet_staking::Pallet as Staking; use sp_runtime::{ @@ -90,7 +93,6 @@ pub mod pallet { }; use sp_staking::EraIndex; use sp_std::{prelude::*, vec::Vec}; - pub use types::PreventStakingOpsIfUnbonding; pub use weights::WeightInfo; #[derive(scale_info::TypeInfo, codec::Encode, codec::Decode, codec::MaxEncodedLen)] @@ -113,10 +115,12 @@ pub mod pallet { + IsType<::RuntimeEvent> + TryInto>; - /// The amount of balance slashed per each era that was wastefully checked. - /// - /// A reasonable value could be `runtime_weight_to_fee(weight_per_era_check)`. - type SlashPerEra: Get>; + /// The currency used for deposits. + type DepositCurrency: ReservableCurrency>; + + /// Deposit to take for unstaking, to make sure we're able to slash the it in order to cover + /// the costs of resources on unsuccessful unstake. + type Deposit: Get>; /// The origin that can control this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; @@ -128,13 +132,13 @@ pub mod pallet { /// The current "head of the queue" being unstaked. #[pallet::storage] pub type Head = - StorageValue<_, UnstakeRequest>, OptionQuery>; + StorageValue<_, UnstakeRequest, BalanceOf>, OptionQuery>; /// The map of all accounts wishing to be unstaked. /// - /// Keeps track of `AccountId` wishing to unstake. + /// Keeps track of `AccountId` wishing to unstake and it's corresponding deposit. #[pallet::storage] - pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, ()>; + pub type Queue = CountedStorageMap<_, Twox64Concat, T::AccountId, BalanceOf>; /// Number of eras to check per block. /// @@ -177,6 +181,8 @@ pub mod pallet { NotQueued, /// The provided un-staker is already in Head, and cannot deregister. AlreadyHead, + /// The call is not allowed at this point because the pallet is not active. + CallNotAllowed, } #[pallet::hooks] @@ -214,6 +220,8 @@ pub mod pallet { pub fn register_fast_unstake(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let ledger = pallet_staking::Ledger::::get(&ctrl).ok_or(Error::::NotController)?; ensure!(!Queue::::contains_key(&ledger.stash), Error::::AlreadyQueued); @@ -231,8 +239,10 @@ pub mod pallet { Staking::::chill(RawOrigin::Signed(ctrl.clone()).into())?; Staking::::unbond(RawOrigin::Signed(ctrl).into(), ledger.total)?; + T::DepositCurrency::reserve(&ledger.stash, T::Deposit::get())?; + // enqueue them. - Queue::::insert(ledger.stash, ()); + Queue::::insert(ledger.stash, T::Deposit::get()); Ok(()) } @@ -246,6 +256,9 @@ pub mod pallet { #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor) -> DispatchResult { let ctrl = ensure_signed(origin)?; + + ensure!(ErasToCheckPerBlock::::get() != 0, >::CallNotAllowed); + let stash = pallet_staking::Ledger::::get(&ctrl) .map(|l| l.stash) .ok_or(Error::::NotController)?; @@ -254,7 +267,17 @@ pub mod pallet { Head::::get().map_or(true, |UnstakeRequest { stash, .. }| stash != stash), Error::::AlreadyHead ); - Queue::::remove(stash); + let deposit = Queue::::take(stash.clone()); + + if let Some(deposit) = deposit.defensive() { + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } + } + Ok(()) } @@ -315,18 +338,23 @@ pub mod pallet { return T::DbWeight::get().reads(2) } - let UnstakeRequest { stash, mut checked } = match Head::::take().or_else(|| { - // NOTE: there is no order guarantees in `Queue`. - Queue::::drain() - .map(|(stash, _)| UnstakeRequest { stash, checked: Default::default() }) - .next() - }) { - None => { - // There's no `Head` and nothing in the `Queue`, nothing to do here. - return T::DbWeight::get().reads(4) - }, - Some(head) => head, - }; + let UnstakeRequest { stash, mut checked, deposit } = + match Head::::take().or_else(|| { + // NOTE: there is no order guarantees in `Queue`. + Queue::::drain() + .map(|(stash, deposit)| UnstakeRequest { + stash, + deposit, + checked: Default::default(), + }) + .next() + }) { + None => { + // There's no `Head` and nothing in the `Queue`, nothing to do here. + return T::DbWeight::get().reads(4) + }, + Some(head) => head, + }; log!( debug, @@ -381,9 +409,16 @@ pub mod pallet { num_slashing_spans, ); - log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + let remaining = T::DepositCurrency::unreserve(&stash, deposit); + if !remaining.is_zero() { + frame_support::defensive!("`not enough balance to unreserve`"); + ErasToCheckPerBlock::::put(0); + Self::deposit_event(Event::::InternalError) + } else { + log!(info, "unstaked {:?}, outcome: {:?}", stash, result); + Self::deposit_event(Event::::Unstaked { stash, result }); + } - Self::deposit_event(Event::::Unstaked { stash, result }); ::WeightInfo::on_idle_unstake() } else { // eras remaining to be checked. @@ -406,22 +441,18 @@ pub mod pallet { // the last 28 eras, have registered yourself to be unstaked, midway being checked, // you are exposed. if is_exposed { - let amount = T::SlashPerEra::get() - .saturating_mul(eras_checked.saturating_add(checked.len() as u32).into()); - pallet_staking::slashing::do_slash::( - &stash, - amount, - &mut Default::default(), - &mut Default::default(), - current_era, - ); - log!(info, "slashed {:?} by {:?}", stash, amount); - Self::deposit_event(Event::::Slashed { stash, amount }); + T::DepositCurrency::slash_reserved(&stash, deposit); + log!(info, "slashed {:?} by {:?}", stash, deposit); + Self::deposit_event(Event::::Slashed { stash, amount: deposit }); } else { // Not exposed in these eras. match checked.try_extend(unchecked_eras_to_check.clone().into_iter()) { Ok(_) => { - Head::::put(UnstakeRequest { stash: stash.clone(), checked }); + Head::::put(UnstakeRequest { + stash: stash.clone(), + checked, + deposit, + }); Self::deposit_event(Event::::Checking { stash, eras: unchecked_eras_to_check, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 62f343709e245..4c4c5f9ff26fd 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -164,12 +164,13 @@ impl Convert for U256ToBalance { } parameter_types! { - pub static SlashPerEra: u32 = 100; + pub static DepositAmount: u128 = 7; } impl fast_unstake::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type SlashPerEra = SlashPerEra; + type Deposit = DepositAmount; + type DepositCurrency = Balances; type ControlOrigin = frame_system::EnsureRoot; type WeightInfo = (); } @@ -213,11 +214,11 @@ impl Default for ExtBuilder { fn default() -> Self { Self { exposed_nominators: vec![ - (1, 2, 100), - (3, 4, 100), - (5, 6, 100), - (7, 8, 100), - (9, 10, 100), + (1, 2, 7 + 100), + (3, 4, 7 + 100), + (5, 6, 7 + 100), + (7, 8, 7 + 100), + (9, 10, 7 + 100), ], } } @@ -270,8 +271,8 @@ impl ExtBuilder { .into_iter() .map(|(_, ctrl, balance)| (ctrl, balance * 2)), ) - .chain(validators_range.clone().map(|x| (x, 100))) - .chain(nominators_range.clone().map(|x| (x, 100))) + .chain(validators_range.clone().map(|x| (x, 7 + 100))) + .chain(nominators_range.clone().map(|x| (x, 7 + 100))) .collect::>(), } .assimilate_storage(&mut storage); diff --git a/frame/fast-unstake/src/tests.rs b/frame/fast-unstake/src/tests.rs index 5586443ce797c..6e617fd992028 100644 --- a/frame/fast-unstake/src/tests.rs +++ b/frame/fast-unstake/src/tests.rs @@ -35,6 +35,7 @@ fn test_setup_works() { #[test] fn register_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Ensure stash is in the queue. @@ -42,9 +43,38 @@ fn register_works() { }); } +#[test] +fn register_insufficient_funds_fails() { + use pallet_balances::Error as BalancesError; + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + ::DepositCurrency::make_free_balance_be(&1, 3); + + // Controller account registers for fast unstake. + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + BalancesError::::InsufficientBalance, + ); + + // Ensure stash is in the queue. + assert_eq!(Queue::::get(1), None); + }); +} + +#[test] +fn register_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), + Error::::CallNotAllowed + ); + }); +} + #[test] fn cannot_register_if_not_bonded() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Mint accounts 1 and 2 with 200 tokens. for _ in 1..2 { let _ = Balances::make_free_balance_be(&1, 200); @@ -60,8 +90,9 @@ fn cannot_register_if_not_bonded() { #[test] fn cannot_register_if_in_queue() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Queue item - Queue::::insert(1, ()); + Queue::::insert(1, 10); // Cannot re-register, already in queue assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -73,8 +104,13 @@ fn cannot_register_if_in_queue() { #[test] fn cannot_register_if_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Insert some Head item for stash - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to regsiter assert_noop!( FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2)), @@ -86,6 +122,7 @@ fn cannot_register_if_head() { #[test] fn cannot_register_if_has_unlocking_chunks() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Start unbonding half of staked tokens assert_ok!(Staking::unbond(RuntimeOrigin::signed(2), 50_u128)); // Cannot register for fast unstake with unlock chunks active @@ -99,18 +136,37 @@ fn cannot_register_if_has_unlocking_chunks() { #[test] fn deregister_works() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + // Controller then changes mind and deregisters. assert_ok!(FastUnstake::deregister(RuntimeOrigin::signed(2))); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + // Ensure stash no longer exists in the queue. assert_eq!(Queue::::get(1), None); }); } +#[test] +fn deregister_disabled_fails() { + ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); + ErasToCheckPerBlock::::put(0); + assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::CallNotAllowed); + }); +} + #[test] fn cannot_deregister_if_not_controller() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller account registers for fast unstake. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Stash tries to deregister. @@ -121,6 +177,7 @@ fn cannot_deregister_if_not_controller() { #[test] fn cannot_deregister_if_not_queued() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller tries to deregister without first registering assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::NotQueued); }); @@ -129,10 +186,15 @@ fn cannot_deregister_if_not_queued() { #[test] fn cannot_deregister_already_head() { ExtBuilder::default().build_and_execute(|| { + ErasToCheckPerBlock::::put(1); // Controller attempts to register, should fail assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); // Insert some Head item for stash. - Head::::put(UnstakeRequest { stash: 1, checked: bounded_vec![] }); + Head::::put(UnstakeRequest { + stash: 1, + checked: bounded_vec![], + deposit: DepositAmount::get(), + }); // Controller attempts to deregister assert_noop!(FastUnstake::deregister(RuntimeOrigin::signed(2)), Error::::AlreadyHead); }); @@ -165,14 +227,14 @@ mod on_idle { // set up Queue item assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // call on_idle with no remaining weight FastUnstake::on_idle(System::block_number(), Weight::from_ref_time(0)); // assert nothing changed in Queue and Head assert_eq!(Head::::get(), None); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); }); } @@ -185,7 +247,7 @@ mod on_idle { // given assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_eq!(Queue::::count(), 1); assert_eq!(Head::::get(), None); @@ -204,7 +266,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); // when: another 1 era. @@ -220,7 +286,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when: then 5 eras, we only need 2 more. @@ -242,7 +312,11 @@ mod on_idle { ); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: not enough weight to unstake: @@ -254,7 +328,11 @@ mod on_idle { assert_eq!(fast_unstake_events_since_last_call(), vec![]); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: enough weight to get over at least one iteration: then we are unblocked and can @@ -285,12 +363,16 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // given + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(6))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(8))); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(10))); + assert_eq!(::DepositCurrency::reserved_balance(&1), DepositAmount::get()); + assert_eq!(Queue::::count(), 5); assert_eq!(Head::::get(), None); @@ -300,7 +382,11 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); assert_eq!(Queue::::count(), 4); @@ -317,10 +403,16 @@ mod on_idle { // then assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 5, checked: bounded_vec![3, 2, 1, 0] }), + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 5, + checked: bounded_vec![3, 2, 1, 0] + }), ); assert_eq!(Queue::::count(), 3); + assert_eq!(::DepositCurrency::reserved_balance(&1), 0); + assert_eq!( fast_unstake_events_since_last_call(), vec![ @@ -340,9 +432,9 @@ mod on_idle { // register multi accounts for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(4))); - assert_eq!(Queue::::get(3), Some(())); + assert_eq!(Queue::::get(3), Some(DepositAmount::get())); // assert 2 queue items are in Queue & None in Head to start with assert_eq!(Queue::::count(), 2); @@ -391,7 +483,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -402,7 +494,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -425,9 +521,11 @@ mod on_idle { ErasToCheckPerBlock::::put(BondingDuration::get() + 1); CurrentEra::::put(BondingDuration::get()); + Balances::make_free_balance_be(&2, 100); + // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -438,7 +536,11 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -464,7 +566,7 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); // process on idle next_block(true); @@ -475,28 +577,44 @@ mod on_idle { // assert head item present assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); @@ -529,30 +647,46 @@ mod on_idle { // register for fast unstake assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - assert_eq!(Queue::::get(1), Some(())); + assert_eq!(Queue::::get(1), Some(DepositAmount::get())); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 1, 0] + }) ); // when: a new era happens right before one is free. @@ -567,6 +701,7 @@ mod on_idle { stash: 1, // note era 0 is pruned to keep the vector length sane. checked: bounded_vec![3, 2, 1, 4], + deposit: DepositAmount::get(), }) ); @@ -602,13 +737,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // when @@ -618,13 +761,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2] + }) ); // then we register a new era. @@ -636,14 +787,22 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4] + }) ); // progress to end next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3, 2, 4, 1] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 1, + checked: bounded_vec![3, 2, 4, 1] + }) ); // but notice that we don't care about era 0 instead anymore! we're done. @@ -669,7 +828,6 @@ mod on_idle { fn exposed_nominator_cannot_unstake() { ExtBuilder::default().build_and_execute(|| { ErasToCheckPerBlock::::put(1); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -686,6 +844,7 @@ mod on_idle { )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(exposed), vec![exposed])); + Balances::make_free_balance_be(&exposed, 100_000); // register the exposed one. assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(exposed))); @@ -693,23 +852,30 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3] + }) ); next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 21, since we checked 3 eras in total (3, 2, 1). vec![ Event::Checking { stash: exposed, eras: vec![3] }, Event::Checking { stash: exposed, eras: vec![2] }, - Event::Slashed { stash: exposed, amount: 3 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -721,7 +887,6 @@ mod on_idle { // same as the previous check, but we check 2 eras per block, and we make the exposed be // exposed in era 0, so that it is detected halfway in a check era. ErasToCheckPerBlock::::put(2); - SlashPerEra::set(7); CurrentEra::::put(BondingDuration::get()); // create an exposed nominator in era 1 @@ -729,7 +894,7 @@ mod on_idle { pallet_staking::ErasStakers::::mutate(0, VALIDATORS_PER_ERA, |expo| { expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); }); - Balances::make_free_balance_be(&exposed, 100); + Balances::make_free_balance_be(&exposed, DepositAmount::get() + 100); assert_ok!(Staking::bond( RuntimeOrigin::signed(exposed), exposed, @@ -745,17 +910,21 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: exposed, checked: bounded_vec![3, 2] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: exposed, + checked: bounded_vec![3, 2] + }) ); next_block(true); assert_eq!(Head::::get(), None); assert_eq!( fast_unstake_events_since_last_call(), - // we slash them by 28, since we checked 4 eras in total. + // we slash them vec![ Event::Checking { stash: exposed, eras: vec![3, 2] }, - Event::Slashed { stash: exposed, amount: 4 * 7 } + Event::Slashed { stash: exposed, amount: DepositAmount::get() } ] ); }); @@ -786,7 +955,7 @@ mod on_idle { assert_eq!( fast_unstake_events_since_last_call(), - vec![Event::Slashed { stash: 100, amount: 100 }] + vec![Event::Slashed { stash: 100, amount: DepositAmount::get() }] ); }); } @@ -798,7 +967,7 @@ mod on_idle { CurrentEra::::put(BondingDuration::get()); // create a new validator that 100% not exposed. - Balances::make_free_balance_be(&42, 100); + Balances::make_free_balance_be(&42, 100 + DepositAmount::get()); assert_ok!(Staking::bond(RuntimeOrigin::signed(42), 42, 10, RewardDestination::Staked)); assert_ok!(Staking::validate(RuntimeOrigin::signed(42), Default::default())); @@ -809,7 +978,11 @@ mod on_idle { next_block(true); assert_eq!( Head::::get(), - Some(UnstakeRequest { stash: 42, checked: bounded_vec![3, 2, 1, 0] }) + Some(UnstakeRequest { + deposit: DepositAmount::get(), + stash: 42, + checked: bounded_vec![3, 2, 1, 0] + }) ); next_block(true); assert_eq!(Head::::get(), None); @@ -824,69 +997,3 @@ mod on_idle { }); } } - -mod signed_extension { - use super::*; - use sp_runtime::traits::SignedExtension; - - const STAKING_CALL: crate::mock::RuntimeCall = - crate::mock::RuntimeCall::Staking(pallet_staking::Call::::chill {}); - - #[test] - fn does_nothing_if_not_queued() { - ExtBuilder::default().build_and_execute(|| { - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_ok()); - }) - } - - #[test] - fn prevents_queued() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - // then - // stash can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't. - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } - - #[test] - fn prevents_head_stash() { - ExtBuilder::default().build_and_execute(|| { - // given: stash for 2 is 1. - // when - assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(2))); - - ErasToCheckPerBlock::::put(1); - CurrentEra::::put(BondingDuration::get()); - next_block(true); - - assert_eq!( - Head::::get(), - Some(UnstakeRequest { stash: 1, checked: bounded_vec![3] }) - ); - - // then - // stash can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&2, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - - // controller can't - assert!(PreventStakingOpsIfUnbonding::::new() - .pre_dispatch(&1, &STAKING_CALL, &Default::default(), Default::default()) - .is_err()); - }) - } -} diff --git a/frame/fast-unstake/src/types.rs b/frame/fast-unstake/src/types.rs index 2ddb8dca27e9e..08b9ab4326eb2 100644 --- a/frame/fast-unstake/src/types.rs +++ b/frame/fast-unstake/src/types.rs @@ -17,14 +17,12 @@ //! Types used in the Fast Unstake pallet. -use crate::*; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - traits::{Currency, Get, IsSubType}, + traits::{Currency, Get}, BoundedVec, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; -use sp_runtime::transaction_validity::{InvalidTransaction, TransactionValidityError}; use sp_staking::EraIndex; use sp_std::{fmt::Debug, prelude::*}; @@ -36,80 +34,15 @@ pub type BalanceOf = <::Currency as Currency< #[derive( Encode, Decode, EqNoBound, PartialEqNoBound, Clone, TypeInfo, RuntimeDebugNoBound, MaxEncodedLen, )] -pub struct UnstakeRequest> { +pub struct UnstakeRequest< + AccountId: Eq + PartialEq + Debug, + MaxChecked: Get, + Balance: PartialEq + Debug, +> { /// Their stash account. pub(crate) stash: AccountId, /// The list of eras for which they have been checked. pub(crate) checked: BoundedVec, -} - -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo, RuntimeDebugNoBound)] -#[scale_info(skip_type_params(T))] -pub struct PreventStakingOpsIfUnbonding(sp_std::marker::PhantomData); - -impl PreventStakingOpsIfUnbonding { - pub fn new() -> Self { - Self(Default::default()) - } -} - -impl sp_runtime::traits::SignedExtension - for PreventStakingOpsIfUnbonding -where - ::RuntimeCall: IsSubType>, -{ - type AccountId = T::AccountId; - type Call = ::RuntimeCall; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "PreventStakingOpsIfUnbonding"; - - fn additional_signed(&self) -> Result { - Ok(()) - } - - fn pre_dispatch( - self, - // NOTE: we want to prevent this stash-controller pair from doing anything in the - // staking system as long as they are registered here. - stash_or_controller: &Self::AccountId, - call: &Self::Call, - _info: &sp_runtime::traits::DispatchInfoOf, - _len: usize, - ) -> Result { - // we don't check this in the tx-pool as it requires a storage read. - if >>::is_sub_type(call).is_some() { - let check_stash = |stash: &T::AccountId| { - if Queue::::contains_key(&stash) || - Head::::get().map_or(false, |u| &u.stash == stash) - { - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) - } else { - Ok(()) - } - }; - match ( - // mapped from controller. - pallet_staking::Ledger::::get(&stash_or_controller), - // mapped from stash. - pallet_staking::Bonded::::get(&stash_or_controller), - ) { - (Some(ledger), None) => { - // it is a controller. - check_stash(&ledger.stash) - }, - (_, Some(_)) => { - // it's a stash. - let stash = stash_or_controller; - check_stash(stash) - }, - (None, None) => { - // They are not a staker -- let them execute. - Ok(()) - }, - } - } else { - Ok(()) - } - } + /// Deposit to be slashed if the unstake was unsuccessful. + pub(crate) deposit: Balance, } From 2ee4cb47fa01ad6c7d6c94acf3370fd26470b388 Mon Sep 17 00:00:00 2001 From: Liu-Cheng Xu Date: Wed, 28 Sep 2022 04:14:01 +0800 Subject: [PATCH 183/348] Add missing CountedStorageMap in pallet::storage error info (#12356) --- frame/support/procedural/src/pallet/parse/storage.rs | 4 ++-- .../test/tests/pallet_ui/storage_not_storage_type.stderr | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 321c4dd5d4914..b16ff05803d98 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -555,8 +555,8 @@ fn process_generics( found => { let msg = format!( "Invalid pallet::storage, expected ident: `StorageValue` or \ - `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, \ - found `{}`.", + `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` \ + in order to expand metadata, found `{}`.", found, ); return Err(syn::Error::new(segment.ident.span(), msg)) diff --git a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr index 4fd59183282d0..223e9cfa3e9f8 100644 --- a/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr +++ b/frame/support/test/tests/pallet_ui/storage_not_storage_type.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. +error: Invalid pallet::storage, expected ident: `StorageValue` or `StorageMap` or `CountedStorageMap` or `StorageDoubleMap` or `StorageNMap` in order to expand metadata, found `u8`. --> $DIR/storage_not_storage_type.rs:19:16 | 19 | type Foo = u8; From 17c07af0b953b84dbe89341294e98e586f9b4591 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Wed, 28 Sep 2022 18:21:53 +0800 Subject: [PATCH 184/348] Add storage size component to weights (#12277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add storage size component to weights * Rename storage_size to proof_size * Update primitives/weights/src/weight_v2.rs Co-authored-by: Oliver Tale-Yazdi * Fixes * cargo fmt * Implement custom Decode and CompactAs * Add missing import * Fixes * Remove CompactAs implementation * Properly migrate from 1D weight * Remove #[pallet::compact] from Weight parameters * More #[pallet::compact] removals * Add unit tests * Set appropriate default block proof size * cargo fmt * Remove nonsensical weight constant * Test only for the reference time weight in frame_system::limits * Only check for reference time weight on idle * Use destructuring syntax * Update test expectations * Fixes * Fixes * Fixes * Correctly migrate from 1D weights * cargo fmt * Migrate using extra extrinsics instead of custom Decode * Fixes * Silence dispatch call warnings that were previously allowed * Fix gas_left test * Use OldWeight instead of u64 * Fixes * Only check for reference time weight in election provider * Fix test expectations * Fix test expectations * Use only reference time weight in grandpa test * Use only reference time weight in examples test * Use only reference time weight in examples test * Fix test expectations Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Alexander Theißen --- frame/alliance/src/lib.rs | 62 ++++- frame/babe/src/tests.rs | 3 +- frame/collective/src/lib.rs | 69 +++++- frame/contracts/src/lib.rs | 179 ++++++++++++++- frame/contracts/src/wasm/mod.rs | 15 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../src/unsigned.rs | 9 +- frame/examples/basic/src/tests.rs | 6 +- frame/executive/src/lib.rs | 7 +- frame/grandpa/src/tests.rs | 3 +- .../procedural/src/pallet/expand/call.rs | 20 ++ .../procedural/src/pallet/parse/call.rs | 3 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/limits.rs | 21 +- frame/transaction-payment/src/types.rs | 5 +- primitives/weights/src/lib.rs | 21 +- primitives/weights/src/weight_v2.rs | 211 ++++++++++++------ 20 files changed, 531 insertions(+), 128 deletions(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 2ef6718538122..24111b44ced9e 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -120,7 +120,7 @@ use frame_support::{ ChangeMembers, Currency, Get, InitializeMembers, IsSubType, OnUnbalanced, ReservableCurrency, }, - weights::Weight, + weights::{OldWeight, Weight}, }; use pallet_identity::IdentityField; @@ -620,25 +620,22 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(x, y, p2)) .max(T::WeightInfo::close_approved(b, x, y, p2)) .max(T::WeightInfo::close_disapproved(x, y, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) })] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to use `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let who = ensure_signed(origin)?; ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); - let info = T::ProposalProvider::close_proposal( - proposal_hash, - index, - proposal_weight_bound, - length_bound, - )?; - Ok(info.into()) + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) } /// Initialize the Alliance, onboard founders, fellows, and allies. @@ -985,6 +982,34 @@ pub mod pallet { Self::deposit_event(Event::UnscrupulousItemRemoved { items }); Ok(()) } + + /// Close a vote that is either approved, disapproved, or whose voting period has ended. + /// + /// Requires the sender to be a founder or fellow. + #[pallet::weight({ + let b = *length_bound; + let x = T::MaxFounders::get(); + let y = T::MaxFellows::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, x, y, p2) + .max(T::WeightInfo::close_early_disapproved(x, y, p2)) + .max(T::WeightInfo::close_approved(b, x, y, p2)) + .max(T::WeightInfo::close_disapproved(x, y, p2)) + .saturating_add(p1) + })] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(Self::has_voting_rights(&who), Error::::NoVotingRights); + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } @@ -1197,4 +1222,19 @@ impl, I: 'static> Pallet { } res } + + fn do_close( + proposal_hash: T::Hash, + index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + let info = T::ProposalProvider::close_proposal( + proposal_hash, + index, + proposal_weight_bound, + length_bound, + )?; + Ok(info.into()) + } } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 8d2a9b326cd0f..d4132e6378540 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -852,7 +852,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index ae68ae2fe3e16..06d5b1fab78e7 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -57,7 +57,7 @@ use frame_support::{ traits::{ Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers, StorageVersion, }, - weights::Weight, + weights::{OldWeight, Weight}, }; #[cfg(test)] @@ -620,17 +620,20 @@ pub mod pallet { .max(T::WeightInfo::close_early_disapproved(m, p2)) .max(T::WeightInfo::close_approved(b, m, p2)) .max(T::WeightInfo::close_disapproved(m, p2)) - .saturating_add(p1) + .saturating_add(p1.into()) }, DispatchClass::Operational ))] - pub fn close( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `close`")] + pub fn close_old_weight( origin: OriginFor, proposal_hash: T::Hash, #[pallet::compact] index: ProposalIndex, - #[pallet::compact] proposal_weight_bound: Weight, + #[pallet::compact] proposal_weight_bound: OldWeight, #[pallet::compact] length_bound: u32, ) -> DispatchResultWithPostInfo { + let proposal_weight_bound: Weight = proposal_weight_bound.into(); let _ = ensure_signed(origin)?; Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) @@ -659,6 +662,64 @@ pub mod pallet { let proposal_count = Self::do_disapprove_proposal(proposal_hash); Ok(Some(T::WeightInfo::disapprove_proposal(proposal_count)).into()) } + + /// Close a vote that is either approved, disapproved or whose voting period has ended. + /// + /// May be called by any signed account in order to finish voting and close the proposal. + /// + /// If called before the end of the voting period it will only close the vote if it is + /// has enough votes to be approved or disapproved. + /// + /// If called after the end of the voting period abstentions are counted as rejections + /// unless there is a prime member set and the prime member cast an approval. + /// + /// If the close operation completes successfully with disapproval, the transaction fee will + /// be waived. Otherwise execution of the approved operation will be charged to the caller. + /// + /// + `proposal_weight_bound`: The maximum amount of weight consumed by executing the closed + /// proposal. + /// + `length_bound`: The upper bound for the length of the proposal in storage. Checked via + /// `storage::read` so it is `size_of::() == 4` larger than the pure length. + /// + /// # + /// ## Weight + /// - `O(B + M + P1 + P2)` where: + /// - `B` is `proposal` size in bytes (length-fee-bounded) + /// - `M` is members-count (code- and governance-bounded) + /// - `P1` is the complexity of `proposal` preimage. + /// - `P2` is proposal-count (code-bounded) + /// - DB: + /// - 2 storage reads (`Members`: codec `O(M)`, `Prime`: codec `O(1)`) + /// - 3 mutations (`Voting`: codec `O(M)`, `ProposalOf`: codec `O(B)`, `Proposals`: codec + /// `O(P2)`) + /// - any mutations done while executing `proposal` (`P1`) + /// - up to 3 events + /// # + #[pallet::weight(( + { + let b = *length_bound; + let m = T::MaxMembers::get(); + let p1 = *proposal_weight_bound; + let p2 = T::MaxProposals::get(); + T::WeightInfo::close_early_approved(b, m, p2) + .max(T::WeightInfo::close_early_disapproved(m, p2)) + .max(T::WeightInfo::close_approved(b, m, p2)) + .max(T::WeightInfo::close_disapproved(m, p2)) + .saturating_add(p1) + }, + DispatchClass::Operational + ))] + pub fn close( + origin: OriginFor, + proposal_hash: T::Hash, + #[pallet::compact] index: ProposalIndex, + proposal_weight_bound: Weight, + #[pallet::compact] length_bound: u32, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + + Self::do_close(proposal_hash, index, proposal_weight_bound, length_bound) + } } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index fc44e4507ca00..f9a1c8decf042 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -113,7 +113,7 @@ use frame_support::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, ReservableCurrency, Time, }, - weights::Weight, + weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; use frame_system::{limits::BlockWeights, Pallet as System}; @@ -429,15 +429,18 @@ pub mod pallet { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] - pub fn call( + #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] + pub fn call_old_weight( origin: OriginFor, dest: AccountIdLookupOf, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut output = Self::internal_call( @@ -485,17 +488,22 @@ pub mod pallet { /// - The `deploy` function is executed in the context of the newly-created account. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add(*gas_limit) + .saturating_add((*gas_limit).into()) )] - pub fn instantiate_with_code( + #[allow(deprecated)] + #[deprecated( + note = "1D weight is used in this extrinsic, please migrate to `instantiate_with_code`" + )] + pub fn instantiate_with_code_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code: Vec, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let code_len = code.len() as u32; let salt_len = salt.len() as u32; @@ -526,17 +534,20 @@ pub mod pallet { /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary /// must be supplied. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) )] - pub fn instantiate( + #[allow(deprecated)] + #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] + pub fn instantiate_old_weight( origin: OriginFor, #[pallet::compact] value: BalanceOf, - #[pallet::compact] gas_limit: Weight, + #[pallet::compact] gas_limit: OldWeight, storage_deposit_limit: Option< as codec::HasCompact>::Type>, code_hash: CodeHash, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); let origin = ensure_signed(origin)?; let salt_len = salt.len() as u32; let mut output = Self::internal_instantiate( @@ -639,6 +650,154 @@ pub mod pallet { Ok(()) }) } + + /// Makes a call to an account, optionally transferring some balance. + /// + /// # Parameters + /// + /// * `dest`: Address of the contract to call. + /// * `value`: The balance to transfer from the `origin` to `dest`. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the + /// caller to pay for the storage consumed. + /// * `data`: The input data to pass to the contract. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + #[pallet::weight(T::WeightInfo::call().saturating_add(*gas_limit))] + pub fn call( + origin: OriginFor, + dest: AccountIdLookupOf, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + data: Vec, + ) -> DispatchResultWithPostInfo { + let gas_limit: Weight = gas_limit.into(); + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + let mut output = Self::internal_call( + origin, + dest, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + data, + None, + ); + if let Ok(retval) = &output.result { + if retval.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + } + + /// Instantiates a new contract from the supplied `code` optionally transferring + /// some balance. + /// + /// This dispatchable has the same effect as calling [`Self::upload_code`] + + /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please + /// also check the documentation of [`Self::upload_code`]. + /// + /// # Parameters + /// + /// * `value`: The balance to transfer from the `origin` to the newly created contract. + /// * `gas_limit`: The gas limit enforced when executing the constructor. + /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved + /// from the caller to pay for the storage consumed. + /// * `code`: The contract code to deploy in raw bytes. + /// * `data`: The input data to pass to the contract constructor. + /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. + /// + /// Instantiation is executed as follows: + /// + /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that + /// code. + /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. + /// - The destination address is computed based on the sender, code_hash and the salt. + /// - The smart-contract account is created at the computed address. + /// - The `value` is transferred to the new account. + /// - The `deploy` function is executed in the context of the newly-created account. + #[pallet::weight( + T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) + .saturating_add(*gas_limit) + )] + pub fn instantiate_with_code( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code: Vec, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let code_len = code.len() as u32; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Upload(Bytes(code)), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, result)| result), + T::WeightInfo::instantiate_with_code(code_len, salt_len), + ) + } + + /// Instantiates a contract from a previously deployed wasm binary. + /// + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[pallet::weight( + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(*gas_limit) + )] + pub fn instantiate( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + gas_limit: Weight, + storage_deposit_limit: Option< as codec::HasCompact>::Type>, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let salt_len = salt.len() as u32; + let mut output = Self::internal_instantiate( + origin, + value, + gas_limit, + storage_deposit_limit.map(Into::into), + Code::Existing(code_hash), + data, + salt, + None, + ); + if let Ok(retval) = &output.result { + if retval.1.did_revert() { + output.result = Err(>::ContractReverted.into()); + } + } + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, output)| output), + T::WeightInfo::instantiate(salt_len), + ) + } } #[pallet::event] diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 126a37e9401ec..d8b4cd245356e 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -274,7 +274,11 @@ mod tests { BalanceOf, CodeHash, Error, Pallet as Contracts, }; use assert_matches::assert_matches; - use frame_support::{assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight}; + use frame_support::{ + assert_ok, + dispatch::DispatchResultWithPostInfo, + weights::{OldWeight, Weight}, + }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; use sp_core::{Bytes, H256}; @@ -1545,10 +1549,11 @@ mod tests { let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); - let gas_left = Weight::decode(&mut &*output.data).unwrap(); + let OldWeight(gas_left) = OldWeight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); - assert!(gas_left.all_lt(gas_limit), "gas_left must be less than initial"); - assert!(gas_left.all_gt(actual_left), "gas_left must be greater than final"); + // TODO: account for proof size weight + assert!(gas_left < gas_limit.ref_time(), "gas_left must be less than initial"); + assert!(gas_left > actual_left.ref_time(), "gas_left must be greater than final"); } const CODE_VALUE_TRANSFERRED: &str = r#" @@ -1946,7 +1951,7 @@ mod tests { )] ); - assert!(mock_ext.gas_meter.gas_left().all_gt(Weight::zero())); + assert!(mock_ext.gas_meter.gas_left().ref_time() > 0); } const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 05353e5a3ac61..bba8139f38f44 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1008,8 +1008,10 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; + // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), + Self::solution_weight_of(&raw_solution, size).ref_time() < + T::SignedMaxWeight::get().ref_time(), Error::::SignedTooMuchWeight, ); @@ -2336,8 +2338,9 @@ mod tests { }; let mut active = 1; - while weight_with(active) - .all_lte(::BlockWeights::get().max_block) || + // TODO: account for proof size weight + while weight_with(active).ref_time() <= + ::BlockWeights::get().max_block.ref_time() || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 833f80c90d13e..281ac37421174 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,7 +638,8 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - if current_weight.all_lt(max_weight) { + // TODO: account for proof size weight + if current_weight.ref_time() < max_weight.ref_time() { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -673,7 +674,8 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { + // TODO: account for proof size weight + while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -681,8 +683,9 @@ impl Miner { } let final_decision = voters.min(size.voters); + // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).all_lte(max_weight), + weight_with(final_decision).ref_time() <= max_weight.ref_time(), "weight_with({}) <= {}", final_decision, max_weight, diff --git a/frame/examples/basic/src/tests.rs b/frame/examples/basic/src/tests.rs index db4787eaa0faa..97fbddfbc41e0 100644 --- a/frame/examples/basic/src/tests.rs +++ b/frame/examples/basic/src/tests.rs @@ -191,11 +191,13 @@ fn weights_work() { let default_call = pallet_example_basic::Call::::accumulate_dummy { increase_by: 10 }; let info1 = default_call.get_dispatch_info(); // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert!(info1.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > 0); // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = pallet_example_basic::Call::::set_dummy { new_value: 20 }; let info2 = custom_call.get_dispatch_info(); - assert!(info1.weight.all_gt(info2.weight)); + // TODO: account for proof size weight + assert!(info1.weight.ref_time() > info2.weight.ref_time()); } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index a41c82da5757c..014c7a2bc02a6 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,7 +459,8 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - if remaining_weight.all_gt(Weight::zero()) { + // TODO: account for proof size weight + if remaining_weight.ref_time() > 0 { let used_weight = >::on_idle( block_number, remaining_weight, @@ -938,13 +939,13 @@ mod tests { block_import_works_inner( new_test_ext_v0(1), array_bytes::hex_n_into_unchecked( - "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5", + "0d786e24c1f9e6ce237806a22c005bbbc7dee4edd6692b6c5442843d164392de", ), ); block_import_works_inner( new_test_ext(1), array_bytes::hex_n_into_unchecked( - "75e7d8f360d375bbe91bcf8019c01ab6362448b4a89e3b329717eb9d910340e5", + "348485a4ab856467b440167e45f99b491385e8528e09b0e51f85f814a3021c93", ), ); } diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 775eda58c03e0..5d2ebdf29cb6b 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,7 +856,8 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.all_gt(Weight::zero())); + // TODO: account for proof size weight + assert!(info.weight.ref_time() > 0); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 18d5adee63ad6..39d16109aa8fa 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -16,6 +16,7 @@ // limitations under the License. use crate::{pallet::Def, COUNTER}; +use quote::ToTokens; use syn::spanned::Spanned; /// @@ -158,6 +159,24 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }); } + // Extracts #[allow] attributes, necessary so that we don't run into compiler warnings + let maybe_allow_attrs = methods + .iter() + .map(|method| { + method + .attrs + .iter() + .find(|attr| { + if let Ok(syn::Meta::List(syn::MetaList { path, .. })) = attr.parse_meta() { + path.segments.last().map(|seg| seg.ident == "allow").unwrap_or(false) + } else { + false + } + }) + .map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream()) + }) + .collect::>(); + quote::quote_spanned!(span => #[doc(hidden)] pub mod __substrate_call_check { @@ -289,6 +308,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::sp_tracing::enter_span!( #frame_support::sp_tracing::trace_span!(stringify!(#fn_name)) ); + #maybe_allow_attrs <#pallet_ident<#type_use_gen>>::#fn_name(origin, #( #args_name, )* ) .map(Into::into).map_err(Into::into) }, diff --git a/frame/support/procedural/src/pallet/parse/call.rs b/frame/support/procedural/src/pallet/parse/call.rs index 336e08c3d39b7..f7b2c9544d831 100644 --- a/frame/support/procedural/src/pallet/parse/call.rs +++ b/frame/support/procedural/src/pallet/parse/call.rs @@ -61,6 +61,8 @@ pub struct CallVariantDef { pub call_index: u8, /// Docs, used for metadata. pub docs: Vec, + /// Attributes annotated at the top of the dispatchable function. + pub attrs: Vec, } /// Attributes for functions in call impl block. @@ -287,6 +289,7 @@ impl CallDef { call_index: final_index, args, docs, + attrs: method.attrs.clone(), }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 5d159ec961c7f..b0716d569409c 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 4671855431b27..926dc92530659 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 159 others + and 160 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 278 others + and 279 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index d9cd20711403d..563190a06f76f 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 9a4e8d740cb2c..c10005223b674 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 76 others + and 77 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index e182eb626424d..cfc1d261baa01 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u32 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -224,6 +224,7 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); + // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -232,18 +233,16 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) - || max_for_class == Weight::zero(), + (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) + || max_for_class.ref_time() == 0, &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights - .max_extrinsic - .unwrap_or(Weight::zero()) - .all_lte(max_for_class.saturating_sub(base_for_class)), + weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= + max_for_class.saturating_sub(base_for_class).ref_time(), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -252,14 +251,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), + weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.all_gt(base_for_class) || reserved == Weight::zero(), + reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -268,7 +267,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), + self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -277,7 +276,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.all_gt(base_for_class + self.base_block), + self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 1f41ba7b0b72e..fff41ef6937f5 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -140,7 +140,8 @@ mod tests { partial_fee: 1_000_000_u64, }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"1000000"}"#; + let json_str = + r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"1000000"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); @@ -157,7 +158,7 @@ mod tests { partial_fee: u128::max_value(), }; - let json_str = r#"{"weight":{"ref_time":5},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + let json_str = r#"{"weight":{"ref_time":5,"proof_size":0},"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; assert_eq!(serde_json::to_string(&info).unwrap(), json_str); assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); diff --git a/primitives/weights/src/lib.rs b/primitives/weights/src/lib.rs index d260f73d41268..e1ac7fcd4e892 100644 --- a/primitives/weights/src/lib.rs +++ b/primitives/weights/src/lib.rs @@ -30,7 +30,7 @@ extern crate self as sp_weights; mod weight_v2; -use codec::{Decode, Encode}; +use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -53,6 +53,25 @@ pub mod constants { pub const WEIGHT_PER_NANOS: Weight = Weight::from_ref_time(1_000); } +/// The old weight type. +/// +/// NOTE: This type exists purely for compatibility purposes! Use [`weight_v2::Weight`] in all other +/// cases. +#[derive( + Decode, + Encode, + CompactAs, + PartialEq, + Eq, + Clone, + Copy, + RuntimeDebug, + Default, + MaxEncodedLen, + TypeInfo, +)] +pub struct OldWeight(pub u64); + /// The weight of database operations that the runtime can invoke. /// /// NOTE: This is currently only measured in computational time, and will probably diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index af0f469ebaaeb..a8eaf79a28711 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::ops::{Add, AddAssign, Div, Mul, Sub, SubAssign}; use sp_arithmetic::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use sp_debug_derive::RuntimeDebug; @@ -23,22 +23,22 @@ use sp_debug_derive::RuntimeDebug; use super::*; #[derive( - Encode, - Decode, - MaxEncodedLen, - TypeInfo, - Eq, - PartialEq, - Copy, - Clone, - RuntimeDebug, - Default, - CompactAs, + Encode, Decode, MaxEncodedLen, TypeInfo, Eq, PartialEq, Copy, Clone, RuntimeDebug, Default, )] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct Weight { + #[codec(compact)] /// The weight of computational time used based on some reference hardware. ref_time: u64, + #[codec(compact)] + /// The weight of storage space used by proof of validity. + proof_size: u64, +} + +impl From for Weight { + fn from(old: OldWeight) -> Self { + Weight::from_ref_time(old.0) + } } impl Weight { @@ -48,71 +48,118 @@ impl Weight { self } + /// Set the storage size part of the weight. + pub const fn set_proof_size(mut self, c: u64) -> Self { + self.proof_size = c; + self + } + /// Return the reference time part of the weight. pub const fn ref_time(&self) -> u64 { self.ref_time } - /// Return a mutable reference time part of the weight. + /// Return the storage size part of the weight. + pub const fn proof_size(&self) -> u64 { + self.proof_size + } + + /// Return a mutable reference to the reference time part of the weight. pub fn ref_time_mut(&mut self) -> &mut u64 { &mut self.ref_time } - pub const MAX: Self = Self { ref_time: u64::MAX }; + /// Return a mutable reference to the storage size part of the weight. + pub fn proof_size_mut(&mut self) -> &mut u64 { + &mut self.proof_size + } + + pub const MAX: Self = Self { ref_time: u64::MAX, proof_size: u64::MAX }; /// Get the conservative min of `self` and `other` weight. pub fn min(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.min(other.ref_time) } + Self { + ref_time: self.ref_time.min(other.ref_time), + proof_size: self.proof_size.min(other.proof_size), + } } /// Get the aggressive max of `self` and `other` weight. pub fn max(&self, other: Self) -> Self { - Self { ref_time: self.ref_time.max(other.ref_time) } + Self { + ref_time: self.ref_time.max(other.ref_time), + proof_size: self.proof_size.max(other.proof_size), + } } /// Try to add some `other` weight while upholding the `limit`. pub fn try_add(&self, other: &Self, limit: &Self) -> Option { let total = self.checked_add(other)?; - if total.ref_time > limit.ref_time { + if total.any_gt(*limit) { None } else { Some(total) } } - /// Construct [`Weight`] with reference time weight. + /// Construct [`Weight`] with reference time weight and 0 storage size weight. pub const fn from_ref_time(ref_time: u64) -> Self { - Self { ref_time } + Self { ref_time, proof_size: 0 } + } + + /// Construct [`Weight`] with storage size weight and 0 reference time weight. + pub const fn from_proof_size(proof_size: u64) -> Self { + Self { ref_time: 0, proof_size } + } + + /// Construct [`Weight`] with weight components, namely reference time and storage size weights. + pub const fn from_components(ref_time: u64, proof_size: u64) -> Self { + Self { ref_time, proof_size } } /// Saturating [`Weight`] addition. Computes `self + rhs`, saturating at the numeric bounds of /// all fields instead of overflowing. pub const fn saturating_add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_add(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_add(rhs.ref_time), + proof_size: self.proof_size.saturating_add(rhs.proof_size), + } } /// Saturating [`Weight`] subtraction. Computes `self - rhs`, saturating at the numeric bounds /// of all fields instead of overflowing. pub const fn saturating_sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time.saturating_sub(rhs.ref_time) } + Self { + ref_time: self.ref_time.saturating_sub(rhs.ref_time), + proof_size: self.proof_size.saturating_sub(rhs.proof_size), + } } /// Saturating [`Weight`] scalar multiplication. Computes `self.field * scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_mul(scalar) } + Self { + ref_time: self.ref_time.saturating_mul(scalar), + proof_size: self.proof_size.saturating_mul(scalar), + } } /// Saturating [`Weight`] scalar division. Computes `self.field / scalar` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time.saturating_div(scalar) } + Self { + ref_time: self.ref_time.saturating_div(scalar), + proof_size: self.proof_size.saturating_div(scalar), + } } /// Saturating [`Weight`] scalar exponentiation. Computes `self.field.pow(exp)` for all fields, /// saturating at the numeric bounds of all fields instead of overflowing. pub const fn saturating_pow(self, exp: u32) -> Self { - Self { ref_time: self.ref_time.saturating_pow(exp) } + Self { + ref_time: self.ref_time.saturating_pow(exp), + proof_size: self.proof_size.saturating_pow(exp), + } } /// Increment [`Weight`] by `amount` via saturating addition. @@ -122,124 +169,144 @@ impl Weight { /// Checked [`Weight`] addition. Computes `self + rhs`, returning `None` if overflow occurred. pub const fn checked_add(&self, rhs: &Self) -> Option { - match self.ref_time.checked_add(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_add(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_add(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] subtraction. Computes `self - rhs`, returning `None` if overflow /// occurred. pub const fn checked_sub(&self, rhs: &Self) -> Option { - match self.ref_time.checked_sub(rhs.ref_time) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_sub(rhs.ref_time) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_sub(rhs.proof_size) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar multiplication. Computes `self.field * scalar` for each field, /// returning `None` if overflow occurred. pub const fn checked_mul(self, scalar: u64) -> Option { - match self.ref_time.checked_mul(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_mul(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_mul(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Checked [`Weight`] scalar division. Computes `self.field / scalar` for each field, returning /// `None` if overflow occurred. pub const fn checked_div(self, scalar: u64) -> Option { - match self.ref_time.checked_div(scalar) { - Some(ref_time) => Some(Self { ref_time }), - None => None, - } + let ref_time = match self.ref_time.checked_div(scalar) { + Some(t) => t, + None => return None, + }; + let proof_size = match self.proof_size.checked_div(scalar) { + Some(s) => s, + None => return None, + }; + Some(Self { ref_time, proof_size }) } /// Return a [`Weight`] where all fields are zero. pub const fn zero() -> Self { - Self { ref_time: 0 } + Self { ref_time: 0, proof_size: 0 } } /// Constant version of Add with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn add(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time + scalar } + Self { ref_time: self.ref_time + scalar, proof_size: self.proof_size + scalar } } /// Constant version of Sub with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn sub(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time - scalar } + Self { ref_time: self.ref_time - scalar, proof_size: self.proof_size - scalar } } /// Constant version of Div with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn div(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time / scalar } + Self { ref_time: self.ref_time / scalar, proof_size: self.proof_size / scalar } } /// Constant version of Mul with u64. /// /// Is only overflow safe when evaluated at compile-time. pub const fn mul(self, scalar: u64) -> Self { - Self { ref_time: self.ref_time * scalar } + Self { ref_time: self.ref_time * scalar, proof_size: self.proof_size * scalar } } /// Returns true if any of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn any_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time || self.proof_size > other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly greater than that of the /// `other`'s, otherwise returns false. pub const fn all_gt(self, other: Self) -> bool { - self.ref_time > other.ref_time + self.ref_time > other.ref_time && self.proof_size > other.proof_size } /// Returns true if any of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn any_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time || self.proof_size < other.proof_size } /// Returns true if all of `self`'s constituent weights is strictly less than that of the /// `other`'s, otherwise returns false. pub const fn all_lt(self, other: Self) -> bool { - self.ref_time < other.ref_time + self.ref_time < other.ref_time && self.proof_size < other.proof_size } /// Returns true if any of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time || self.proof_size >= other.proof_size } /// Returns true if all of `self`'s constituent weights is greater than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_gte(self, other: Self) -> bool { - self.ref_time >= other.ref_time + self.ref_time >= other.ref_time && self.proof_size >= other.proof_size } /// Returns true if any of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn any_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time || self.proof_size <= other.proof_size } /// Returns true if all of `self`'s constituent weights is less than or equal to that of the /// `other`'s, otherwise returns false. pub const fn all_lte(self, other: Self) -> bool { - self.ref_time <= other.ref_time + self.ref_time <= other.ref_time && self.proof_size <= other.proof_size } /// Returns true if any of `self`'s constituent weights is equal to that of the `other`'s, /// otherwise returns false. pub const fn any_eq(self, other: Self) -> bool { - self.ref_time == other.ref_time + self.ref_time == other.ref_time || self.proof_size == other.proof_size } // NOTE: `all_eq` does not exist, as it's simply the `eq` method from the `PartialEq` trait. @@ -258,14 +325,20 @@ impl Zero for Weight { impl Add for Weight { type Output = Self; fn add(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time + rhs.ref_time } + Self { + ref_time: self.ref_time + rhs.ref_time, + proof_size: self.proof_size + rhs.proof_size, + } } } impl Sub for Weight { type Output = Self; fn sub(self, rhs: Self) -> Self { - Self { ref_time: self.ref_time - rhs.ref_time } + Self { + ref_time: self.ref_time - rhs.ref_time, + proof_size: self.proof_size - rhs.proof_size, + } } } @@ -275,7 +348,7 @@ where { type Output = Self; fn mul(self, b: T) -> Self { - Self { ref_time: b * self.ref_time } + Self { ref_time: b * self.ref_time, proof_size: b * self.proof_size } } } @@ -285,7 +358,10 @@ macro_rules! weight_mul_per_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: self * b.ref_time } + Weight { + ref_time: self * b.ref_time, + proof_size: self * b.proof_size, + } } } )* @@ -305,7 +381,10 @@ macro_rules! weight_mul_primitive_impl { impl Mul for $t { type Output = Weight; fn mul(self, b: Weight) -> Weight { - Weight { ref_time: u64::from(self) * b.ref_time } + Weight { + ref_time: u64::from(self) * b.ref_time, + proof_size: u64::from(self) * b.proof_size, + } } } )* @@ -320,7 +399,7 @@ where { type Output = Self; fn div(self, b: T) -> Self { - Self { ref_time: self.ref_time / b } + Self { ref_time: self.ref_time / b, proof_size: self.proof_size / b } } } @@ -338,7 +417,7 @@ impl CheckedSub for Weight { impl core::fmt::Display for Weight { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "Weight(ref_time: {})", self.ref_time) + write!(f, "Weight(ref_time: {}, proof_size: {})", self.ref_time, self.proof_size) } } @@ -353,12 +432,18 @@ impl Bounded for Weight { impl AddAssign for Weight { fn add_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time + other.ref_time }; + *self = Self { + ref_time: self.ref_time + other.ref_time, + proof_size: self.proof_size + other.proof_size, + }; } } impl SubAssign for Weight { fn sub_assign(&mut self, other: Self) { - *self = Self { ref_time: self.ref_time - other.ref_time }; + *self = Self { + ref_time: self.ref_time - other.ref_time, + proof_size: self.proof_size - other.proof_size, + }; } } From 01a905e304f2b6b2c1caf4c12b622edb12b265fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 28 Sep 2022 14:37:03 +0200 Subject: [PATCH 185/348] pallet-utility: Only disallow the `None` origin (#12351) --- frame/utility/src/lib.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 819314f3d8454..9ae89097a9bc3 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -63,7 +63,7 @@ use frame_support::{ }; use sp_core::TypeId; use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Dispatchable, TrailingZeroInput}; +use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; @@ -203,7 +203,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -319,7 +324,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); @@ -426,7 +436,12 @@ pub mod pallet { origin: OriginFor, calls: Vec<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - let is_root = ensure_signed_or_root(origin.clone())?.is_none(); + // Do not allow the `None` origin. + if ensure_none(origin.clone()).is_ok() { + return Err(BadOrigin.into()) + } + + let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::::TooManyCalls); From 1b1a5e12c0e391c7ed4e3ffa332eb2fe928d257f Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 28 Sep 2022 14:43:04 +0200 Subject: [PATCH 186/348] Fix staking migration (#12373) Causing issues on Kusama... Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- frame/staking/src/migrations.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/frame/staking/src/migrations.rs b/frame/staking/src/migrations.rs index 8f37ae30dd056..f2ccb4f8b096f 100644 --- a/frame/staking/src/migrations.rs +++ b/frame/staking/src/migrations.rs @@ -40,10 +40,14 @@ pub mod v12 { "Expected v11 before upgrading to v12" ); - frame_support::ensure!( - T::HistoryDepth::get() == HistoryDepth::::get(), - "Provided value of HistoryDepth should be same as the existing storage value" - ); + if HistoryDepth::::exists() { + frame_support::ensure!( + T::HistoryDepth::get() == HistoryDepth::::get(), + "Provided value of HistoryDepth should be same as the existing storage value" + ); + } else { + log::info!("No HistoryDepth in storage; nothing to remove"); + } Ok(Default::default()) } From 0ec4373d9c1252b60f0a3512fd910b1d48af385a Mon Sep 17 00:00:00 2001 From: Koute Date: Thu, 29 Sep 2022 04:38:12 +0900 Subject: [PATCH 187/348] Support running the pallet benchmarks analysis without running the benchmarks (#12361) * Support running the pallet benchmarks analysis without running the benchmarks * Rename `override-results` to `json-input` and update the help comment * ".git/.scripts/fmt.sh" 1 Co-authored-by: command-bot <> --- frame/benchmarking/src/utils.rs | 20 ++++-- .../benchmarking-cli/src/pallet/command.rs | 67 +++++++++++++++++-- .../frame/benchmarking-cli/src/pallet/mod.rs | 10 ++- 3 files changed, 82 insertions(+), 15 deletions(-) diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index b483208e3ef69..753e8c1c684ee 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -23,14 +23,14 @@ use frame_support::{ traits::StorageInfo, }; #[cfg(feature = "std")] -use serde::Serialize; +use serde::{Deserialize, Serialize}; use sp_io::hashing::blake2_256; use sp_runtime::traits::TrailingZeroInput; use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; /// An alphabet of possible parameters to use for benchmarking. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] @@ -71,7 +71,7 @@ impl std::fmt::Display for BenchmarkParameter { } /// The results of a single of benchmark. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { /// The pallet containing this benchmark. @@ -89,7 +89,7 @@ pub struct BenchmarkBatch { // TODO: could probably make API cleaner here. /// The results of a single of benchmark, where time and db results are separated. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatchSplitResults { /// The pallet containing this benchmark. @@ -110,7 +110,7 @@ pub struct BenchmarkBatchSplitResults { /// Result from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. -#[cfg_attr(feature = "std", derive(Serialize))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] pub struct BenchmarkResult { pub components: Vec<(BenchmarkParameter, u32)>, @@ -121,7 +121,7 @@ pub struct BenchmarkResult { pub writes: u32, pub repeat_writes: u32, pub proof_size: u32, - #[cfg_attr(feature = "std", serde(skip_serializing))] + #[cfg_attr(feature = "std", serde(skip))] pub keys: Vec<(Vec, u32, u32, bool)>, } @@ -141,6 +141,14 @@ mod serde_as_str { let s = std::str::from_utf8(value).map_err(serde::ser::Error::custom)?; serializer.collect_str(s) } + + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: serde::de::Deserializer<'de>, + { + let s: &str = serde::de::Deserialize::deserialize(deserializer)?; + Ok(s.into()) + } } /// Possible errors returned from the benchmarking pipeline. diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 6870ec386d23d..72592617c52ac 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -134,6 +134,20 @@ impl PalletCmd { }; } + if let Some(json_input) = &self.json_input { + let raw_data = match std::fs::read(json_input) { + Ok(raw_data) => raw_data, + Err(error) => + return Err(format!("Failed to read {:?}: {}", json_input, error).into()), + }; + let batches: Vec = match serde_json::from_slice(&raw_data) { + Ok(batches) => batches, + Err(error) => + return Err(format!("Failed to deserialize {:?}: {}", json_input, error).into()), + }; + return self.output_from_results(&batches) + } + let spec = config.chain_spec; let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); let pallet = self.pallet.clone().unwrap_or_default(); @@ -396,8 +410,16 @@ impl PalletCmd { // Combine all of the benchmark results, so that benchmarks of the same pallet/function // are together. - let batches: Vec = combine_batches(batches, batches_db); + let batches = combine_batches(batches, batches_db); + self.output(&batches, &storage_info, &component_ranges) + } + fn output( + &self, + batches: &[BenchmarkBatchSplitResults], + storage_info: &[StorageInfo], + component_ranges: &HashMap<(Vec, Vec), Vec>, + ) -> Result<()> { // Jsonify the result and write it to a file or stdout if desired. if !self.jsonify(&batches)? { // Print the summary only if `jsonify` did not write to stdout. @@ -412,10 +434,45 @@ impl PalletCmd { Ok(()) } + fn output_from_results(&self, batches: &[BenchmarkBatchSplitResults]) -> Result<()> { + let mut component_ranges = + HashMap::<(Vec, Vec), HashMap>::new(); + for batch in batches { + let range = component_ranges + .entry((batch.pallet.clone(), batch.benchmark.clone())) + .or_default(); + for result in &batch.time_results { + for (param, value) in &result.components { + let name = param.to_string(); + let (ref mut min, ref mut max) = range.entry(name).or_insert((*value, *value)); + if *value < *min { + *min = *value; + } + if *value > *max { + *max = *value; + } + } + } + } + + let component_ranges: HashMap<_, _> = component_ranges + .into_iter() + .map(|(key, ranges)| { + let ranges = ranges + .into_iter() + .map(|(name, (min, max))| ComponentRange { name, min, max }) + .collect(); + (key, ranges) + }) + .collect(); + + self.output(batches, &[], &component_ranges) + } + /// Jsonifies the passed batches and writes them to stdout or into a file. /// Can be configured via `--json` and `--json-file`. /// Returns whether it wrote to stdout. - fn jsonify(&self, batches: &Vec) -> Result { + fn jsonify(&self, batches: &[BenchmarkBatchSplitResults]) -> Result { if self.json_output || self.json_file.is_some() { let json = serde_json::to_string_pretty(&batches) .map_err(|e| format!("Serializing into JSON: {:?}", e))?; @@ -432,11 +489,7 @@ impl PalletCmd { } /// Prints the results as human-readable summary without raw timing data. - fn print_summary( - &self, - batches: &Vec, - storage_info: &Vec, - ) { + fn print_summary(&self, batches: &[BenchmarkBatchSplitResults], storage_info: &[StorageInfo]) { for batch in batches.iter() { // Print benchmark metadata println!( diff --git a/utils/frame/benchmarking-cli/src/pallet/mod.rs b/utils/frame/benchmarking-cli/src/pallet/mod.rs index b8c1f7b905c0c..0e698c4e73910 100644 --- a/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -35,11 +35,11 @@ fn parse_pallet_name(pallet: &str) -> String { #[derive(Debug, clap::Parser)] pub struct PalletCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present = "list")] + #[clap(short, long, parse(from_str = parse_pallet_name), required_unless_present_any = ["list", "json-input"])] pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[clap(short, long, required_unless_present = "list")] + #[clap(short, long, required_unless_present_any = ["list", "json-input"])] pub extrinsic: Option, /// Select how many samples we should take across the variable components. @@ -166,4 +166,10 @@ pub struct PalletCmd { /// template for that purpose. #[clap(long)] pub no_storage_info: bool, + + /// A path to a `.json` file with existing benchmark results generated with `--json` or + /// `--json-file`. When specified the benchmarks are not actually executed, and the data for + /// the analysis is read from this file. + #[clap(long)] + pub json_input: Option, } From d66adfabd7911bf01ab01ec96ec4228307a03e07 Mon Sep 17 00:00:00 2001 From: Daniel Shiposha Date: Wed, 28 Sep 2022 22:00:33 +0200 Subject: [PATCH 188/348] fix: typo in AllPalletsWithSystem deprecated msg (#12379) --- frame/support/procedural/src/construct_runtime/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index e20cb61b7aec1..73d0d54343eb9 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -422,7 +422,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// Excludes the System pallet. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); } }); @@ -433,7 +433,7 @@ fn decl_all_pallets<'a>( #attr /// All pallets included in the runtime as a nested tuple of types in reversed order. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsWithSystemReversed = ( #(#names,)* ); } }); @@ -447,7 +447,7 @@ fn decl_all_pallets<'a>( /// All pallets included in the runtime as a nested tuple of types in reversed order. /// With the system pallet first. #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletWithSystem or AllPalletsWithoutSystem`")] + `AllPalletsWithSystem or AllPalletsWithoutSystem`")] pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); } }); From 96de768061b182934b2d824b2fe76effb5b4db85 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:04:14 +0200 Subject: [PATCH 189/348] New Pallet: Root offences (#11943) * root-offences pallet * fix errors * cleaned up a bit * remove unwrap() * new pallet is getting compiled * remove unnecessary type annotations * remove more unnecessary type annotations * addidtional cleaning * commit * cleaned up * fix in logic * add event * removed Clone trait from AccountId * test module * remove unused imports * fmt * fix * separate into functions, still messy * test * first test * fmt * cleaned up a bit * separate into mock.rs and tests.rs * basic docs for now * pallet_staking GenesisiConfig * fix * added start_session * passing tests * impl GenesisConfig for pallet_session * updated event * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove * Update frame/root-offences/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * docs * Update frame/root-offences/README.md Co-authored-by: Andronik * Update frame/root-offences/Cargo.toml Co-authored-by: Andronik * license header Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Andronik --- Cargo.lock | 22 ++ Cargo.toml | 1 + frame/nomination-pools/src/lib.rs | 2 +- frame/root-offences/Cargo.toml | 51 +++++ frame/root-offences/README.md | 5 + frame/root-offences/src/lib.rs | 131 +++++++++++ frame/root-offences/src/mock.rs | 356 ++++++++++++++++++++++++++++++ frame/root-offences/src/tests.rs | 94 ++++++++ 8 files changed, 661 insertions(+), 1 deletion(-) create mode 100644 frame/root-offences/Cargo.toml create mode 100644 frame/root-offences/README.md create mode 100644 frame/root-offences/src/lib.rs create mode 100644 frame/root-offences/src/mock.rs create mode 100644 frame/root-offences/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index a9a0eef551179..de50d4ec27105 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6197,6 +6197,28 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-root-offences" +version = "1.0.0" +dependencies = [ + "frame-election-provider-support", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-offences", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 018355df6c9fd..25f12a2c9fd3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,6 +135,7 @@ members = [ "frame/staking/reward-fn", "frame/state-trie-migration", "frame/sudo", + "frame/root-offences", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", diff --git a/frame/nomination-pools/src/lib.rs b/frame/nomination-pools/src/lib.rs index 28d10ce573401..9e77adaeee677 100644 --- a/frame/nomination-pools/src/lib.rs +++ b/frame/nomination-pools/src/lib.rs @@ -2523,7 +2523,7 @@ impl Pallet { impl OnStakerSlash> for Pallet { fn on_slash( pool_account: &T::AccountId, - // Bonded balance is always read directly from staking, therefore we need not update + // Bonded balance is always read directly from staking, therefore we don't need to update // anything here. slashed_bonded: BalanceOf, slashed_unlocking: &BTreeMap>, diff --git a/frame/root-offences/Cargo.toml b/frame/root-offences/Cargo.toml new file mode 100644 index 0000000000000..ea6a6527848aa --- /dev/null +++ b/frame/root-offences/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "pallet-root-offences" +version = "1.0.0" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME root offences pallet" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } + +pallet-session = { version = "4.0.0-dev", features = [ "historical" ], path = "../../frame/session", default-features = false } +pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../frame/staking" } +pallet-offences = { version = "4.0.0-dev", default-features = false, path = "../../frame/offences" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } + +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../staking/reward-curve" } + +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } + +frame-election-provider-support = { version = "4.0.0-dev", path = "../election-provider-support" } + +[features] +runtime-benchmarks = [] +try-runtime = ["frame-support/try-runtime"] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-session/std", + "pallet-staking/std", + "pallet-offences/std", + "scale-info/std", + "sp-runtime/std", +] diff --git a/frame/root-offences/README.md b/frame/root-offences/README.md new file mode 100644 index 0000000000000..a2c5261b6985a --- /dev/null +++ b/frame/root-offences/README.md @@ -0,0 +1,5 @@ +# Sudo Offences Pallet + +Pallet that allows the root to create an offence. + +NOTE: This pallet should only be used for testing purposes. \ No newline at end of file diff --git a/frame/root-offences/src/lib.rs b/frame/root-offences/src/lib.rs new file mode 100644 index 0000000000000..b4b549627f3fa --- /dev/null +++ b/frame/root-offences/src/lib.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Sudo Offences Pallet +//! Pallet that allows the root to create an offence. +//! +//! NOTE: This pallet should be used for testing purposes. + +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +use pallet_session::historical::IdentificationTuple; +use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; +use sp_runtime::Perbill; +use sp_staking::offence::{DisableStrategy, OnOffenceHandler}; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: + frame_system::Config + + pallet_staking::Config + + pallet_session::Config::AccountId> + + pallet_session::historical::Config< + FullIdentification = Exposure< + ::AccountId, + BalanceOf, + >, + FullIdentificationOf = ExposureOf, + > + { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// An offence was created by root. + OffenceCreated { offenders: Vec<(T::AccountId, Perbill)> }, + } + + #[pallet::error] + pub enum Error { + /// Failed to get the active era from the staking pallet. + FailedToGetActiveEra, + } + + type OffenceDetails = sp_staking::offence::OffenceDetails< + ::AccountId, + IdentificationTuple, + >; + + #[pallet::call] + impl Pallet { + /// Allows the `root`, for example sudo to create an offence. + #[pallet::weight(T::DbWeight::get().reads(2))] + pub fn create_offence( + origin: OriginFor, + offenders: Vec<(T::AccountId, Perbill)>, + ) -> DispatchResult { + ensure_root(origin)?; + + let slash_fraction = + offenders.clone().into_iter().map(|(_, fraction)| fraction).collect::>(); + let offence_details = Self::get_offence_details(offenders.clone())?; + + Self::submit_offence(&offence_details, &slash_fraction); + Self::deposit_event(Event::OffenceCreated { offenders }); + Ok(()) + } + } + + impl Pallet { + /// Returns a vector of offenders that are going to be slashed. + fn get_offence_details( + offenders: Vec<(T::AccountId, Perbill)>, + ) -> Result>, DispatchError> { + let now = Staking::::active_era() + .map(|e| e.index) + .ok_or(Error::::FailedToGetActiveEra)?; + + Ok(offenders + .clone() + .into_iter() + .map(|(o, _)| OffenceDetails:: { + offender: (o.clone(), Staking::::eras_stakers(now, o)), + reporters: vec![], + }) + .collect()) + } + + /// Submits the offence by calling the `on_offence` function. + fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { + let session_index = as frame_support::traits::ValidatorSet>::session_index(); + + as OnOffenceHandler< + T::AccountId, + IdentificationTuple, + Weight, + >>::on_offence(&offenders, &slash_fraction, session_index, DisableStrategy::WhenSlashed); + } + } +} diff --git a/frame/root-offences/src/mock.rs b/frame/root-offences/src/mock.rs new file mode 100644 index 0000000000000..3f0a26afc1358 --- /dev/null +++ b/frame/root-offences/src/mock.rs @@ -0,0 +1,356 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as root_offences; + +use frame_election_provider_support::{onchain, SequentialPhragmen}; +use frame_support::{ + parameter_types, + traits::{ConstU32, ConstU64, GenesisBuild, Hooks, OneSessionHandler}, +}; +use pallet_staking::StakerStatus; +use sp_core::H256; +use sp_runtime::{ + curve::PiecewiseLinear, + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, IdentityLookup, Zero}, +}; +use sp_staking::{EraIndex, SessionIndex}; +use sp_std::collections::btree_map::BTreeMap; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; +type Balance = u64; +type BlockNumber = u64; + +pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Staking: pallet_staking::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + RootOffences: root_offences::{Pallet, Call, Storage, Event}, + Historical: pallet_session::historical::{Pallet, Storage}, + } +); + +/// Another session handler struct to test on_disabled. +pub struct OtherSessionHandler; +impl OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + +parameter_types! { + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type RuntimeCall = RuntimeCall; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000u64, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Test; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); +} + +pub struct OnStakerSlashMock(core::marker::PhantomData); +impl sp_staking::OnStakerSlash for OnStakerSlashMock { + fn on_slash( + _pool_account: &AccountId, + slashed_bonded: Balance, + slashed_chunks: &BTreeMap, + ) { + LedgerSlashPerEra::set((slashed_bonded, slashed_chunks.clone())); + } +} + +parameter_types! { + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub static Offset: BlockNumber = 0; + pub const Period: BlockNumber = 1; + pub static SessionsPerEra: SessionIndex = 3; + pub static SlashDeferDuration: EraIndex = 0; + pub const BondingDuration: EraIndex = 3; + pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); +} + +impl pallet_staking::Config for Test { + type MaxNominations = ConstU32<16>; + type Currency = Balances; + type CurrencyBalance = ::Balance; + type UnixTime = Timestamp; + type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = Self; + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = ConstU32<64>; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + type ElectionProvider = onchain::UnboundedExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type TargetList = pallet_staking::UseValidatorsMap; + type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type OnStakerSlash = OnStakerSlashMock; + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); +} + +impl pallet_session::historical::Config for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +impl pallet_session::Config for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; +} + +pub struct ExtBuilder { + validator_count: u32, + minimum_validator_count: u32, + invulnerables: Vec, + balance_factor: Balance, +} + +impl Default for ExtBuilder { + fn default() -> Self { + Self { + validator_count: 2, + minimum_validator_count: 0, + invulnerables: vec![], + balance_factor: 1, + } + } +} + +impl ExtBuilder { + fn build(self) -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + //controllers + (10, self.balance_factor * 50), + (20, self.balance_factor * 50), + (30, self.balance_factor * 50), + (40, self.balance_factor * 50), + // stashes + (11, self.balance_factor * 1000), + (21, self.balance_factor * 1000), + (31, self.balance_factor * 500), + (41, self.balance_factor * 1000), + ], + } + .assimilate_storage(&mut storage) + .unwrap(); + + let stakers = vec![ + // (stash, ctrl, stake, status) + // these two will be elected in the default test where we elect 2. + (11, 10, 1000, StakerStatus::::Validator), + (21, 20, 1000, StakerStatus::::Validator), + // a loser validator + (31, 30, 500, StakerStatus::::Validator), + // an idle validator + (41, 40, 1000, StakerStatus::::Idle), + ]; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + ..Default::default() + }; + + let _ = pallet_staking::GenesisConfig:: { + stakers: stakers.clone(), + validator_count: self.validator_count, + minimum_validator_count: self.minimum_validator_count, + invulnerables: self.invulnerables, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let _ = pallet_session::GenesisConfig:: { + keys: stakers + .into_iter() + .map(|(id, ..)| (id, id, SessionKeys { other: id.into() })) + .collect(), + } + .assimilate_storage(&mut storage); + + storage.into() + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = self.build(); + ext.execute_with(test); + } +} + +/// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. +pub(crate) fn start_session(session_index: SessionIndex) { + let end: u64 = if Offset::get().is_zero() { + (session_index as u64) * Period::get() + } else { + Offset::get() + (session_index.saturating_sub(1) as u64) * Period::get() + }; + run_to_block(end); + // session must have progressed properly. + assert_eq!( + Session::current_index(), + session_index, + "current session index = {}, expected = {}", + Session::current_index(), + session_index, + ); +} + +/// Progress to the given block, triggering session and era changes as we progress. +/// +/// This will finalize the previous block, initialize up to the given block, essentially simulating +/// a block import/propose process where we first initialize the block, then execute some stuff (not +/// in the function), and then finalize the block. +pub(crate) fn run_to_block(n: BlockNumber) { + Staking::on_finalize(System::block_number()); + for b in (System::block_number() + 1)..=n { + System::set_block_number(b); + Session::on_initialize(b); + >::on_initialize(b); + Timestamp::set_timestamp(System::block_number() * BLOCK_TIME + INIT_TIMESTAMP); + if b != n { + Staking::on_finalize(System::block_number()); + } + } +} + +pub(crate) fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} diff --git a/frame/root-offences/src/tests.rs b/frame/root-offences/src/tests.rs new file mode 100644 index 0000000000000..a8b7d0a6d6aca --- /dev/null +++ b/frame/root-offences/src/tests.rs @@ -0,0 +1,94 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::{assert_err, assert_ok}; +use mock::{active_era, start_session, Balances, ExtBuilder, RootOffences, RuntimeOrigin, System}; + +#[test] +fn create_offence_fails_given_signed_origin() { + use sp_runtime::traits::BadOrigin; + ExtBuilder::default().build_and_execute(|| { + let offenders = (&[]).to_vec(); + assert_err!(RootOffences::create_offence(RuntimeOrigin::signed(1), offenders), BadOrigin); + }) +} + +#[test] +fn create_offence_works_given_root_origin() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + assert_eq!(Balances::free_balance(11), 1000); + + let offenders = [(11, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + // the slash should be applied right away. + assert_eq!(Balances::free_balance(11), 500); + + // the other validator should keep his balance, because we only created + // an offences for the first validator. + assert_eq!(Balances::free_balance(21), 1000); + }) +} + +#[test] +fn create_offence_wont_slash_non_active_validators() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 31 is not an active validator. + assert_eq!(Balances::free_balance(31), 500); + + let offenders = [(31, Perbill::from_percent(20)), (11, Perbill::from_percent(20))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // so 31 didn't get slashed. + assert_eq!(Balances::free_balance(31), 500); + + // but 11 is an active validator so he got slashed. + assert_eq!(Balances::free_balance(11), 800); + }) +} + +#[test] +fn create_offence_wont_slash_idle() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + + assert_eq!(active_era(), 0); + + // 41 is idle. + assert_eq!(Balances::free_balance(41), 1000); + + let offenders = [(41, Perbill::from_percent(50))].to_vec(); + assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); + + System::assert_last_event(Event::OffenceCreated { offenders }.into()); + + // 41 didn't get slashed. + assert_eq!(Balances::free_balance(41), 1000); + }) +} From e7f994d1e797420f252dd24714b029071ccbc46c Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 28 Sep 2022 22:52:16 +0200 Subject: [PATCH 190/348] bounding staking: `BoundedElectionProvider` trait (#12362) * add a bounded election provider trait * extract common trait election provider base * fmt * only bound the outer support vector * fix tests * docs * fix rust docs * fmt * fix rustdocs * docs * improve docs * small doc change --- .../election-provider-multi-phase/src/lib.rs | 16 ++-- .../election-provider-multi-phase/src/mock.rs | 5 +- frame/election-provider-support/src/lib.rs | 83 ++++++++++++------- .../election-provider-support/src/onchain.rs | 27 +++--- frame/fast-unstake/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- primitives/npos-elections/src/lib.rs | 14 ++-- 7 files changed, 99 insertions(+), 55 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index bba8139f38f44..649aec30c58b3 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -231,7 +231,8 @@ use codec::{Decode, Encode}; use frame_election_provider_support::{ - ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolution, + ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolution, }; use frame_support::{ dispatch::DispatchClass, @@ -289,7 +290,7 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -312,7 +313,7 @@ pub trait BenchmarkingConfig { /// A fallback implementation that transitions the pallet to the emergency phase. pub struct NoFallback(sp_std::marker::PhantomData); -impl ElectionProvider for NoFallback { +impl ElectionProviderBase for NoFallback { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type DataProvider = T::DataProvider; @@ -321,7 +322,9 @@ impl ElectionProvider for NoFallback { fn ongoing() -> bool { false } +} +impl ElectionProvider for NoFallback { fn elect() -> Result, Self::Error> { // Do nothing, this will enable the emergency phase. Err("NoFallback.") @@ -1563,7 +1566,7 @@ impl Pallet { >::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - T::Fallback::elect() + ::elect() .map(|supports| ReadySolution { supports, score: Default::default(), @@ -1598,7 +1601,7 @@ impl Pallet { } } -impl ElectionProvider for Pallet { +impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; type BlockNumber = T::BlockNumber; type Error = ElectionError; @@ -1610,7 +1613,9 @@ impl ElectionProvider for Pallet { _ => true, } } +} +impl ElectionProvider for Pallet { fn elect() -> Result, Self::Error> { match Self::do_elect() { Ok(supports) => { @@ -1627,7 +1632,6 @@ impl ElectionProvider for Pallet { } } } - /// convert a DispatchError to a custom InvalidTransaction with the inner code being the error /// number. pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 34aa2e1bbfc58..c1c53a3980676 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -297,7 +297,7 @@ impl onchain::Config for OnChainSeqPhragmen { } pub struct MockFallback; -impl ElectionProvider for MockFallback { +impl ElectionProviderBase for MockFallback { type AccountId = AccountId; type BlockNumber = u64; type Error = &'static str; @@ -306,7 +306,8 @@ impl ElectionProvider for MockFallback { fn ongoing() -> bool { false } - +} +impl ElectionProvider for MockFallback { fn elect() -> Result, Self::Error> { Self::elect_with_bounds(Bounded::max_value(), Bounded::max_value()) } diff --git a/frame/election-provider-support/src/lib.rs b/frame/election-provider-support/src/lib.rs index 0bf62bd8c35cd..5ee65e102bd06 100644 --- a/frame/election-provider-support/src/lib.rs +++ b/frame/election-provider-support/src/lib.rs @@ -20,10 +20,11 @@ //! This crate provides two traits that could interact to enable extensible election functionality //! within FRAME pallets. //! -//! Something that will provide the functionality of election will implement [`ElectionProvider`], -//! whilst needing an associated [`ElectionProvider::DataProvider`], which needs to be fulfilled by -//! an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* the receiver -//! of the election, resulting in a diagram as below: +//! Something that will provide the functionality of election will implement +//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an +//! associated [`ElectionProviderBase::DataProvider`], which needs to be +//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* +//! the receiver of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -131,12 +132,16 @@ //! type DataProvider: ElectionDataProvider; //! } //! -//! impl ElectionProvider for GenericElectionProvider { +//! impl ElectionProviderBase for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; //! type DataProvider = T::DataProvider; //! fn ongoing() -> bool { false } +//! +//! } +//! +//! impl ElectionProvider for GenericElectionProvider { //! fn elect() -> Result, Self::Error> { //! Self::DataProvider::electable_targets(None) //! .map_err(|_| "failed to elect") @@ -177,8 +182,8 @@ pub use frame_support::{traits::Get, weights::Weight, BoundedVec, RuntimeDebug}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, - Support, Supports, VoteWeight, + Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, + IdentifierT, PerThing128, Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -349,12 +354,12 @@ pub trait ElectionDataProvider { fn clear() {} } -/// Something that can compute the result of an election and pass it back to the caller. +/// Base trait for [`ElectionProvider`] and [`BoundedElectionProvider`]. It is +/// meant to be used only with an extension trait that adds an election +/// functionality. /// -/// This trait only provides an interface to _request_ an election, i.e. -/// [`ElectionProvider::elect`]. That data required for the election need to be passed to the -/// implemented of this trait through [`ElectionProvider::DataProvider`]. -pub trait ElectionProvider { +/// Data can be bounded or unbounded and is fetched from [`Self::DataProvider`]. +pub trait ElectionProviderBase { /// The account identifier type. type AccountId; @@ -372,24 +377,39 @@ pub trait ElectionProvider { /// Indicate if this election provider is currently ongoing an asynchronous election or not. fn ongoing() -> bool; +} - /// Elect a new set of winners, without specifying any bounds on the amount of data fetched from - /// [`Self::DataProvider`]. An implementation could nonetheless impose its own custom limits. - /// - /// The result is returned in a target major format, namely as *vector of supports*. - /// - /// This should be implemented as a self-weighing function. The implementor should register its - /// appropriate weight at the end of execution with the system pallet directly. +/// Elect a new set of winners, bounded by `MaxWinners`. +/// +/// Returns a result in bounded, target major format, namely as +/// *BoundedVec<(AccountId, Vec), MaxWinners>*. +pub trait BoundedElectionProvider: ElectionProviderBase { + /// The upper bound on election winners. + type MaxWinners: Get; + /// Performs the election. This should be implemented as a self-weighing function. The + /// implementor should register its appropriate weight at the end of execution with the + /// system pallet directly. + fn elect() -> Result, Self::Error>; +} + +/// Same a [`BoundedElectionProvider`], but no bounds are imposed on the number +/// of winners. +/// +/// The result is returned in a target major format, namely as +///*Vec<(AccountId, Vec)>*. +pub trait ElectionProvider: ElectionProviderBase { + /// Performs the election. This should be implemented as a self-weighing + /// function, similar to [`BoundedElectionProvider::elect()`]. fn elect() -> Result, Self::Error>; } -/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure an election needs to -/// happen instantly, not asynchronously. +/// A sub-trait of the [`ElectionProvider`] for cases where we need to be sure +/// an election needs to happen instantly, not asynchronously. /// /// The same `DataProvider` is assumed to be used. /// -/// Consequently, allows for control over the amount of data that is being fetched from the -/// [`ElectionProvider::DataProvider`]. +/// Consequently, allows for control over the amount of data that is being +/// fetched from the [`ElectionProviderBase::DataProvider`]. pub trait InstantElectionProvider: ElectionProvider { /// Elect a new set of winners, but unlike [`ElectionProvider::elect`] which cannot enforce /// bounds, this trait method can enforce bounds on the amount of data provided by the @@ -410,7 +430,7 @@ pub trait InstantElectionProvider: ElectionProvider { pub struct NoElection(sp_std::marker::PhantomData); #[cfg(feature = "std")] -impl ElectionProvider +impl ElectionProviderBase for NoElection<(AccountId, BlockNumber, DataProvider)> where DataProvider: ElectionDataProvider, @@ -420,15 +440,22 @@ where type Error = &'static str; type DataProvider = DataProvider; - fn elect() -> Result, Self::Error> { - Err(" cannot do anything.") - } - fn ongoing() -> bool { false } } +#[cfg(feature = "std")] +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider)> +where + DataProvider: ElectionDataProvider, +{ + fn elect() -> Result, Self::Error> { + Err(" cannot do anything.") + } +} + /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. /// /// This is generic over `AccountId` and it can represent a validator, a nominator, or any other diff --git a/frame/election-provider-support/src/onchain.rs b/frame/election-provider-support/src/onchain.rs index 10c3519d03df6..88aa6ca7267a0 100644 --- a/frame/election-provider-support/src/onchain.rs +++ b/frame/election-provider-support/src/onchain.rs @@ -20,7 +20,8 @@ //! careful when using it onchain. use crate::{ - Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, NposSolver, WeightInfo, + Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, InstantElectionProvider, + NposSolver, WeightInfo, }; use frame_support::{dispatch::DispatchClass, traits::Get}; use sp_npos_elections::*; @@ -133,15 +134,6 @@ fn elect_with( } impl ElectionProvider for UnboundedExecution { - type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; - type Error = Error; - type DataProvider = T::DataProvider; - - fn ongoing() -> bool { - false - } - fn elect() -> Result, Self::Error> { // This should not be called if not in `std` mode (and therefore neither in genesis nor in // testing) @@ -156,6 +148,17 @@ impl ElectionProvider for UnboundedExecution { } } +impl ElectionProviderBase for UnboundedExecution { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Error = Error; + type DataProvider = T::DataProvider; + + fn ongoing() -> bool { + false + } +} + impl InstantElectionProvider for UnboundedExecution { fn elect_with_bounds( max_voters: usize, @@ -165,7 +168,7 @@ impl InstantElectionProvider for UnboundedExecution { } } -impl ElectionProvider for BoundedExecution { +impl ElectionProviderBase for BoundedExecution { type AccountId = ::AccountId; type BlockNumber = ::BlockNumber; type Error = Error; @@ -174,7 +177,9 @@ impl ElectionProvider for BoundedExecution { fn ongoing() -> bool { false } +} +impl ElectionProvider for BoundedExecution { fn elect() -> Result, Self::Error> { elect_with::(Some(T::VotersBound::get() as usize), Some(T::TargetsBound::get() as usize)) } diff --git a/frame/fast-unstake/src/lib.rs b/frame/fast-unstake/src/lib.rs index ed26d6b436e1d..8fdb7a79dd537 100644 --- a/frame/fast-unstake/src/lib.rs +++ b/frame/fast-unstake/src/lib.rs @@ -80,7 +80,7 @@ macro_rules! log { pub mod pallet { use super::*; use crate::types::*; - use frame_election_provider_support::ElectionProvider; + use frame_election_provider_support::ElectionProviderBase; use frame_support::{ pallet_prelude::*, traits::{Defensive, ReservableCurrency}, @@ -330,7 +330,8 @@ pub mod pallet { } } - if ::ElectionProvider::ongoing() { + if <::ElectionProvider as ElectionProviderBase>::ongoing() + { // NOTE: we assume `ongoing` does not consume any weight. // there is an ongoing election -- we better not do anything. Imagine someone is not // exposed anywhere in the last era, and the snapshot for the election is already diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index 4c4c5f9ff26fd..dc2c694d52956 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -104,7 +104,7 @@ parameter_types! { } pub struct MockElection; -impl frame_election_provider_support::ElectionProvider for MockElection { +impl frame_election_provider_support::ElectionProviderBase for MockElection { type AccountId = AccountId; type BlockNumber = BlockNumber; type DataProvider = Staking; @@ -113,7 +113,9 @@ impl frame_election_provider_support::ElectionProvider for MockElection { fn ongoing() -> bool { Ongoing::get() } +} +impl frame_election_provider_support::ElectionProvider for MockElection { fn elect() -> Result, Self::Error> { Err(()) } diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index dd2a9bf198f8d..514ded67ad38b 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -74,17 +74,16 @@ #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::RuntimeDebug; +use sp_core::{bounded::BoundedVec, RuntimeDebug}; use sp_std::{ cell::RefCell, cmp::Ordering, collections::btree_map::BTreeMap, prelude::*, rc::Rc, vec, }; -use codec::{Decode, Encode, MaxEncodedLen}; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - #[cfg(test)] mod mock; #[cfg(test)] @@ -451,6 +450,11 @@ impl Default for Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; +/// Same as `Supports` bounded by `MaxWinners`. +/// +/// To note, the inner `Support` is still unbounded. +pub type BoundedSupports = BoundedVec<(A, Support), MaxWinners>; + /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. From 427fd09bcb193c1e79dec85b1e207c718b686c35 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Thu, 29 Sep 2022 09:28:22 +0300 Subject: [PATCH 191/348] BEEFY: impl TypeInfo for SignedCommitment (#12382) --- primitives/beefy/src/commitment.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 4880d4b69ab01..0e22c8d56d937 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -16,6 +16,7 @@ // limitations under the License. use codec::{Decode, Encode, Error, Input}; +use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; use crate::ValidatorSetId; @@ -39,7 +40,7 @@ pub mod known_payload_ids { /// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected /// value. Duplicated identifiers are disallowed. It's okay for different implementations to only /// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash)] +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] pub struct Payload(Vec<(BeefyPayloadId, Vec)>); impl Payload { @@ -80,7 +81,7 @@ impl Payload { /// height [block_number](Commitment::block_number). /// GRANDPA validators collect signatures on commitments and a stream of such signed commitments /// (see [SignedCommitment]) forms the BEEFY protocol. -#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, TypeInfo)] pub struct Commitment { /// A collection of payloads to be signed, see [`Payload`] for details. /// @@ -138,7 +139,7 @@ where /// Note that SCALE-encoding of the structure is optimized for size efficiency over the wire, /// please take a look at custom [`Encode`] and [`Decode`] implementations and /// `CompactSignedCommitment` struct. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, TypeInfo)] pub struct SignedCommitment { /// The commitment signatures are collected for. pub commitment: Commitment, From 61b9a4d1a8a9bf39c1d89a8dd02f82785c10860c Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 29 Sep 2022 23:48:10 +0800 Subject: [PATCH 192/348] Properly set the max proof size weight on defaults and tests (#12383) * Properly set the max proof size weight on defaults and tests * cargo fmt * Set proper max proof size for contracts pallet tests * Properly set max proof size for node * Properly set max proof size for frame system mock * Update test expectations * Update test expectations * Properly set max proof size for balances mock * Update test expectations * Update test expectations * Properly set max proof size for democracy mock * Properly set max proof size for scheduler mock * Properly set max proof size for fast unstake mock * Properly set max proof size for tx payment mock * Properly set max proof size for elections phragmen mock * Properly set max proof size for node template --- bin/node-template/runtime/src/lib.rs | 7 +- bin/node/runtime/src/impls.rs | 2 +- bin/node/runtime/src/lib.rs | 4 +- frame/balances/src/tests_composite.rs | 4 +- frame/balances/src/tests_local.rs | 4 +- frame/balances/src/tests_reentrancy.rs | 4 +- frame/contracts/src/tests.rs | 14 +- frame/democracy/src/tests.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 9 +- .../election-provider-multi-phase/src/mock.rs | 7 +- .../src/signed.rs | 9 +- .../src/unsigned.rs | 292 +++++++++++++++--- frame/elections-phragmen/src/lib.rs | 4 +- frame/executive/src/lib.rs | 5 +- frame/fast-unstake/src/mock.rs | 4 +- frame/grandpa/src/tests.rs | 3 +- frame/scheduler/src/mock.rs | 4 +- frame/system/src/extensions/check_weight.rs | 20 +- frame/system/src/limits.rs | 24 +- frame/system/src/mock.rs | 2 +- .../asset-tx-payment/src/tests.rs | 2 +- frame/transaction-payment/src/lib.rs | 2 +- 22 files changed, 323 insertions(+), 107 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f801068b10fda..1d0e18d31bf80 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -139,8 +139,11 @@ parameter_types! { pub const BlockHashCount: BlockNumber = 2400; pub const Version: RuntimeVersion = VERSION; /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::with_sensible_defaults( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + NORMAL_DISPATCH_RATIO, + ); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index fb2f3cec65290..0f9ed6e275196 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -224,7 +224,7 @@ mod multiplier_tests { fn multiplier_can_grow_from_zero() { // if the min is too small, then this will not change, and we are doomed forever. // the weight is 1/100th bigger than target. - run_with_system_weight(target() * 101 / 100, || { + run_with_system_weight(target().set_ref_time(target().ref_time() * 101 / 100), || { let next = runtime_multiplier_update(min_multiplier()); assert!(next > min_multiplier(), "{:?} !>= {:?}", next, min_multiplier()); }) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index aa1a525bf095c..5e4fdb4748d15 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -170,8 +170,8 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 2 seconds of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2); +/// We allow for 2 seconds of compute with a 6 second average block time, with maximum proof size. +const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND.saturating_mul(2).set_proof_size(u64::MAX); parameter_types! { pub const BlockHashCount: BlockNumber = 2400; diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 1e38d611773d4..f8a8fdd1851d4 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -47,7 +47,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index e080eafb66067..152a5da37410f 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -48,7 +48,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index fa2eb0e488e7d..90363140000e8 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -51,7 +51,9 @@ frame_support::construct_runtime!( parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 0; } impl frame_system::Config for Test { diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index a56e4f5564845..e5893c3dbd112 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -279,7 +279,9 @@ impl RegisteredChainExtension for TempStorageExtension { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); pub static ExistentialDeposit: u64 = 1; } impl frame_system::Config for Test { @@ -413,7 +415,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); pub struct ExtBuilder { existential_deposit: u64, @@ -628,7 +630,7 @@ fn deposit_event_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer, + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer, None, ::Schedule::get().limits.payload_len.encode(), )); @@ -769,7 +771,7 @@ fn storage_max_value_limit() { RuntimeOrigin::signed(ALICE), addr.clone(), 0, - GAS_LIMIT * 2, // we are copying a huge buffer + GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() * 2), // we are copying a huge buffer None, ::Schedule::get().limits.payload_len.encode(), )); @@ -2543,7 +2545,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2613,7 +2615,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required), + Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), None, call.encode(), false, diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 17b35ee3c38cd..03d7216fd5aaa 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -78,7 +78,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(1_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(1_000_000).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 649aec30c58b3..fb17bd25ea541 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1011,10 +1011,8 @@ pub mod pallet { // unlikely to ever return an error: if phase is signed, snapshot will exist. let size = Self::snapshot_metadata().ok_or(Error::::MissingSnapshotMetadata)?; - // TODO: account for proof size weight ensure!( - Self::solution_weight_of(&raw_solution, size).ref_time() < - T::SignedMaxWeight::get().ref_time(), + Self::solution_weight_of(&raw_solution, size).all_lt(T::SignedMaxWeight::get()), Error::::SignedTooMuchWeight, ); @@ -2342,9 +2340,8 @@ mod tests { }; let mut active = 1; - // TODO: account for proof size weight - while weight_with(active).ref_time() <= - ::BlockWeights::get().max_block.ref_time() || + while weight_with(active) + .all_lte(::BlockWeights::get().max_block) || active == all_voters { active += 1; diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index c1c53a3980676..d3082be0cf750 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -26,7 +26,7 @@ pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; use frame_support::{ bounded_vec, parameter_types, traits::{ConstU32, Hooks}, - weights::Weight, + weights::{constants, Weight}, BoundedVec, }; use multi_phase::unsigned::{IndexAssignmentOf, VoterOf}; @@ -227,7 +227,10 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const ExistentialDeposit: u64 = 1; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults(2u64 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); + ::with_sensible_defaults( + Weight::from_components(2u64 * constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + NORMAL_DISPATCH_RATIO, + ); } impl pallet_balances::Config for Runtime { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 1cf071e6796f1..2e01d99be0a42 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -957,7 +957,7 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { ExtBuilder::default() - .signed_weight(Weight::from_ref_time(40)) + .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(15); @@ -973,11 +973,14 @@ mod tests { // default solution will have 5 edges (5 * 5 + 10) assert_eq!(solution_weight, Weight::from_ref_time(35)); assert_eq!(raw.solution.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), Weight::from_ref_time(40)); + assert_eq!( + ::SignedMaxWeight::get(), + Weight::from_ref_time(40).set_proof_size(u64::MAX) + ); assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(raw.clone()))); - ::set(Weight::from_ref_time(30)); + ::set(Weight::from_ref_time(30).set_proof_size(u64::MAX)); // note: resubmitting the same solution is technically okay as long as the queue has // space. diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 281ac37421174..025ff832bb08a 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -638,8 +638,7 @@ impl Miner { }; let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { - // TODO: account for proof size weight - if current_weight.ref_time() < max_weight.ref_time() { + if current_weight.all_lt(max_weight) { let next_voters = voters.checked_add(step); match next_voters { Some(voters) if voters < max_voters => Ok(voters), @@ -674,8 +673,7 @@ impl Miner { // Time to finish. We might have reduced less than expected due to rounding error. Increase // one last time if we have any room left, the reduce until we are sure we are below limit. - // TODO: account for proof size weight - while voters < max_voters && weight_with(voters + 1).ref_time() < max_weight.ref_time() { + while voters < max_voters && weight_with(voters + 1).all_lt(max_weight) { voters += 1; } while voters.checked_sub(1).is_some() && weight_with(voters).any_gt(max_weight) { @@ -683,9 +681,8 @@ impl Miner { } let final_decision = voters.min(size.voters); - // TODO: account for proof size weight debug_assert!( - weight_with(final_decision).ref_time() <= max_weight.ref_time(), + weight_with(final_decision).all_lte(max_weight), "weight_with({}) <= {}", final_decision, max_weight, @@ -703,151 +700,346 @@ mod max_weight { fn find_max_voter_binary_search_works() { let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; MockWeightInfo::set(crate::mock::MockedWeightInfo::Complex); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::zero()), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::zero().set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2990).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2999).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3000).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 3 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(5500)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(5500).set_proof_size(u64::MAX) + ), 5 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(7777)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(7777).set_proof_size(u64::MAX) + ), 7 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(9999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(9999).set_proof_size(u64::MAX) + ), 9 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(10_999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(10_999).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(11_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(11_000).set_proof_size(u64::MAX) + ), 10 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(22_000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(22_000).set_proof_size(u64::MAX) + ), 10 ); let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1990)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1990).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 1 ); let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(0)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1)), 0); - assert_eq!(Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(999)), 0); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(0).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(999).set_proof_size(u64::MAX) + ), + 0 + ); + assert_eq!( + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1000).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1001).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(1999)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(1999).set_proof_size(u64::MAX) + ), 1 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2000)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2000).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2001)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2001).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(2010)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(2010).set_proof_size(u64::MAX) + ), 2 ); assert_eq!( - Miner::::maximum_voter_for_weight(0, w, Weight::from_ref_time(3333)), + Miner::::maximum_voter_for_weight( + 0, + w, + Weight::from_ref_time(3333).set_proof_size(u64::MAX) + ), 2 ); } @@ -1131,7 +1323,7 @@ mod tests { #[test] fn miner_trims_weight() { ExtBuilder::default() - .miner_weight(Weight::from_ref_time(100)) + .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { roll_to(25); @@ -1149,7 +1341,7 @@ mod tests { assert_eq!(raw.solution.voter_count(), 5); // now reduce the max weight - ::set(Weight::from_ref_time(25)); + ::set(Weight::from_ref_time(25).set_proof_size(u64::MAX)); let (raw, witness) = MultiPhase::mine_solution().unwrap(); let solution_weight = ::solution_weight( diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 0616087d975e8..165a8fcab429b 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -1174,7 +1174,9 @@ mod tests { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(frame_support::weights::Weight::from_ref_time(1024)); + frame_system::limits::BlockWeights::simple_max( + frame_support::weights::Weight::from_ref_time(1024).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Test { diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 014c7a2bc02a6..b7884efccf685 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -459,8 +459,7 @@ where let max_weight = >::get().max_block; let remaining_weight = max_weight.saturating_sub(weight.total()); - // TODO: account for proof size weight - if remaining_weight.ref_time() > 0 { + if remaining_weight.all_gt(Weight::zero()) { let used_weight = >::on_idle( block_number, remaining_weight, @@ -768,7 +767,7 @@ mod tests { frame_system::limits::BlockWeights::builder() .base_block(Weight::from_ref_time(10)) .for_class(DispatchClass::all(), |weights| weights.base_extrinsic = Weight::from_ref_time(5)) - .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).into()) + .for_class(DispatchClass::non_mandatory(), |weights| weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into()) .build_or_panic(); pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 10, diff --git a/frame/fast-unstake/src/mock.rs b/frame/fast-unstake/src/mock.rs index dc2c694d52956..71fc2d4ba905a 100644 --- a/frame/fast-unstake/src/mock.rs +++ b/frame/fast-unstake/src/mock.rs @@ -32,7 +32,9 @@ pub type T = Runtime; parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(2u64 * WEIGHT_PER_SECOND); + frame_system::limits::BlockWeights::simple_max( + (2u64 * WEIGHT_PER_SECOND).set_proof_size(u64::MAX), + ); } impl frame_system::Config for Runtime { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 5d2ebdf29cb6b..626decd12821e 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -856,8 +856,7 @@ fn valid_equivocation_reports_dont_pay_fees() { .get_dispatch_info(); // it should have non-zero weight and the fee has to be paid. - // TODO: account for proof size weight - assert!(info.weight.ref_time() > 0); + assert!(info.weight.any_gt(Weight::zero())); assert_eq!(info.pays_fee, Pays::Yes); // report the equivocation. diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6f6667590a6c3..6aaad13e48183 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -118,7 +118,9 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(Weight::from_ref_time(2_000_000_000_000)); + frame_system::limits::BlockWeights::simple_max( + Weight::from_ref_time(2_000_000_000_000).set_proof_size(u64::MAX), + ); } impl system::Config for Test { type BaseCallFilter = BaseFilter; diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 15a88913cd337..5c3b80f59bfa8 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -310,7 +310,7 @@ mod tests { check(|max, len| { assert_ok!(CheckWeight::::do_pre_dispatch(max, len)); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); check(|max, len| { assert_ok!(CheckWeight::::do_validate(max, len)); @@ -367,7 +367,7 @@ mod tests { new_test_ext().execute_with(|| { System::register_extra_weight_unchecked(Weight::MAX, DispatchClass::Normal); assert_eq!(System::block_weight().total(), Weight::MAX); - assert!(System::block_weight().total().all_gt(block_weight_limit())); + assert!(System::block_weight().total().ref_time() > block_weight_limit().ref_time()); }); } @@ -392,8 +392,8 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&rest_operational, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); // Checking single extrinsic should not take current block weight into account. assert_eq!(CheckWeight::::check_extrinsic_weight(&rest_operational), Ok(())); }); @@ -417,8 +417,8 @@ mod tests { // Extra 20 here from block execution + base extrinsic weight assert_eq!(System::block_weight().total(), Weight::from_ref_time(266)); assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); - assert_eq!(System::block_weight().total(), block_weight_limit()); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); + assert_eq!(System::block_weight().total(), block_weight_limit().set_proof_size(0)); }); } @@ -669,7 +669,7 @@ mod tests { assert_ok!(CheckWeight::::do_pre_dispatch(&max_normal, len)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(768)); assert_ok!(CheckWeight::::do_pre_dispatch(&mandatory, len)); - assert_eq!(block_weight_limit(), Weight::from_ref_time(1024)); + assert_eq!(block_weight_limit(), Weight::from_ref_time(1024).set_proof_size(u64::MAX)); assert_eq!(System::block_weight().total(), Weight::from_ref_time(1024 + 768)); assert_eq!(CheckWeight::::check_extrinsic_weight(&mandatory), Ok(())); }); @@ -682,11 +682,11 @@ mod tests { .base_block(Weight::zero()) .for_class(DispatchClass::non_mandatory(), |w| { w.base_extrinsic = Weight::zero(); - w.max_total = Some(Weight::from_ref_time(20)); + w.max_total = Some(Weight::from_ref_time(20).set_proof_size(u64::MAX)); }) .for_class(DispatchClass::Mandatory, |w| { w.base_extrinsic = Weight::zero(); - w.reserved = Some(Weight::from_ref_time(5)); + w.reserved = Some(Weight::from_ref_time(5).set_proof_size(u64::MAX)); w.max_total = None; }) .build_or_panic(); @@ -695,7 +695,7 @@ mod tests { DispatchClass::Operational => Weight::from_ref_time(10), DispatchClass::Mandatory => Weight::zero(), }); - assert_eq!(maximum_weight.max_block, all_weight.total()); + assert_eq!(maximum_weight.max_block, all_weight.total().set_proof_size(u64::MAX)); // fits into reserved let mandatory1 = DispatchInfo { diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index cfc1d261baa01..07ad240afe159 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -207,7 +207,10 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults(1u64 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) + Self::with_sensible_defaults( + Weight::from_components(constants::WEIGHT_PER_SECOND.ref_time(), u64::MAX), + DEFAULT_NORMAL_RATIO, + ) } } @@ -224,7 +227,6 @@ impl BlockWeights { } let mut error = ValidationErrors::default(); - // TODO: account for proof size weight in the assertions below for class in DispatchClass::all() { let weights = self.per_class.get(*class); let max_for_class = or_max(weights.max_total); @@ -233,16 +235,18 @@ impl BlockWeights { // Make sure that if total is set it's greater than base_block && // base_for_class error_assert!( - (max_for_class.ref_time() > self.base_block.ref_time() && max_for_class.ref_time() > base_for_class.ref_time()) - || max_for_class.ref_time() == 0, + (max_for_class.all_gt(self.base_block) && max_for_class.all_gt(base_for_class)) + || max_for_class == Weight::zero(), &mut error, "[{:?}] {:?} (total) has to be greater than {:?} (base block) & {:?} (base extrinsic)", class, max_for_class, self.base_block, base_for_class, ); // Max extrinsic can't be greater than max_for_class. error_assert!( - weights.max_extrinsic.unwrap_or(Weight::zero()).ref_time() <= - max_for_class.saturating_sub(base_for_class).ref_time(), + weights + .max_extrinsic + .unwrap_or(Weight::zero()) + .all_lte(max_for_class.saturating_sub(base_for_class)), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", class, @@ -251,14 +255,14 @@ impl BlockWeights { ); // Max extrinsic should not be 0 error_assert!( - weights.max_extrinsic.unwrap_or_else(Weight::max_value).ref_time() > 0, + weights.max_extrinsic.unwrap_or_else(Weight::max_value).all_gt(Weight::zero()), &mut error, "[{:?}] {:?} (max_extrinsic) must not be 0. Check base cost and average initialization cost.", class, weights.max_extrinsic, ); // Make sure that if reserved is set it's greater than base_for_class. error_assert!( - reserved.ref_time() > base_for_class.ref_time() || reserved.ref_time() == 0, + reserved.all_gt(base_for_class) || reserved == Weight::zero(), &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", class, @@ -267,7 +271,7 @@ impl BlockWeights { ); // Make sure max block is greater than max_total if it's set. error_assert!( - self.max_block.ref_time() >= weights.max_total.unwrap_or(Weight::zero()).ref_time(), + self.max_block.all_gte(weights.max_total.unwrap_or(Weight::zero())), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", class, @@ -276,7 +280,7 @@ impl BlockWeights { ); // Make sure we can fit at least one extrinsic. error_assert!( - self.max_block.ref_time() > (base_for_class + self.base_block).ref_time(), + self.max_block.all_gt(base_for_class + self.base_block), &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", class, diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index b6fc121612050..d31a1b08667e5 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -41,7 +41,7 @@ frame_support::construct_runtime!( ); const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024); +const MAX_BLOCK_WEIGHT: Weight = Weight::from_ref_time(1024).set_proof_size(u64::MAX); parameter_types! { pub Version: RuntimeVersion = RuntimeVersion { diff --git a/frame/transaction-payment/asset-tx-payment/src/tests.rs b/frame/transaction-payment/asset-tx-payment/src/tests.rs index cdf7d17898145..e775f3aa92990 100644 --- a/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -71,7 +71,7 @@ impl Get for BlockWeights { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 1ad6a2b3b3b6f..80297d1a0d362 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -889,7 +889,7 @@ mod tests { weights.base_extrinsic = ExtrinsicBaseWeight::get().into(); }) .for_class(DispatchClass::non_mandatory(), |weights| { - weights.max_total = Weight::from_ref_time(1024).into(); + weights.max_total = Weight::from_ref_time(1024).set_proof_size(u64::MAX).into(); }) .build_or_panic() } From c2026ca6e9b2a24d8ae1a05c5b3784ffa0748946 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Fri, 30 Sep 2022 11:14:13 +0800 Subject: [PATCH 193/348] Carry over where clauses defined in Config to Call and Hook (#12388) --- frame/support/procedural/src/pallet/expand/call.rs | 2 +- frame/support/procedural/src/pallet/expand/hooks.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 39d16109aa8fa..6b166e6726d38 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -32,7 +32,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { (span, where_clause, methods, docs) }, - None => (def.item.span(), None, Vec::new(), Vec::new()), + None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; let frame_system = &def.frame_system; diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 48d4aec436d40..d8d009cf3c940 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -26,7 +26,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let has_runtime_upgrade = hooks.has_runtime_upgrade; (where_clause, span, has_runtime_upgrade) }, - None => (None, def.pallet_struct.attr_span, false), + None => (def.config.where_clause.clone(), def.pallet_struct.attr_span, false), }; let frame_support = &def.frame_support; From dbb72f3fd98253b72c0090375b738b9d00995090 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 30 Sep 2022 12:06:46 +0200 Subject: [PATCH 194/348] unsafe_pruning flag removed (#12385) --- client/cli/src/config.rs | 11 ----------- client/cli/src/params/import_params.rs | 12 ------------ 2 files changed, 23 deletions(-) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index fad2ec7bc4a93..77689708a231f 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -659,17 +659,6 @@ pub trait CliConfiguration: Sized { } } - if self.import_params().map_or(false, |p| { - #[allow(deprecated)] - p.unsafe_pruning - }) { - // according to https://github.com/substrate/issues/8103; - warn!( - "WARNING: \"--unsafe-pruning\" CLI-flag is deprecated and has no effect. \ - In future builds it will be removed, and providing this flag will lead to an error." - ); - } - Ok(()) } } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index c851050838965..3cd9fd83bd31b 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -41,18 +41,6 @@ pub struct ImportParams { #[clap(flatten)] pub database_params: DatabaseParams, - /// THIS IS A DEPRECATED CLI-ARGUMENT. - /// - /// It has been preserved in order to not break the compatibility with the existing scripts. - /// Enabling this option will lead to a runtime warning. - /// In future this option will be removed completely, thus specifying it will lead to a start - /// up error. - /// - /// Details: - #[clap(long)] - #[deprecated = "According to https://github.com/paritytech/substrate/issues/8103"] - pub unsafe_pruning: bool, - /// Method for executing Wasm runtime code. #[clap( long = "wasm-execution", From 952030cfa6f11be6aef938e5359064c4cf6b30a9 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 30 Sep 2022 13:46:48 +0300 Subject: [PATCH 195/348] pallet-mmr: generate historical proofs (#12324) * BEEFY: generate historical proofs Signed-off-by: Serban Iorga * Update frame/merkle-mountain-range/rpc/src/lib.rs Co-authored-by: Adrian Catangiu * Update primitives/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * Update frame/merkle-mountain-range/src/lib.rs Co-authored-by: Adrian Catangiu * cargo fmt * fix off-by-one in leaves powerset generation * test all possible mmr sizes for historical proofs * remove now redundant simple_historical_proof * cargo fmt Signed-off-by: Serban Iorga Co-authored-by: Adrian Catangiu Co-authored-by: Robert Hambrock --- bin/node/runtime/src/lib.rs | 39 ++- client/beefy/src/tests.rs | 7 + frame/merkle-mountain-range/rpc/src/lib.rs | 49 ++++ frame/merkle-mountain-range/src/lib.rs | 22 +- frame/merkle-mountain-range/src/tests.rs | 276 +++++++++++++++++++- primitives/merkle-mountain-range/src/lib.rs | 11 +- 6 files changed, 379 insertions(+), 25 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 5e4fdb4748d15..4fa4049e22682 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -2011,10 +2011,7 @@ impl_runtime_apis! { } } - impl pallet_mmr::primitives::MmrApi< - Block, - mmr::Hash, - > for Runtime { + impl pallet_mmr::primitives::MmrApi for Runtime { fn generate_proof(leaf_index: pallet_mmr::primitives::LeafIndex) -> Result<(mmr::EncodableOpaqueLeaf, mmr::Proof), mmr::Error> { @@ -2049,11 +2046,35 @@ impl_runtime_apis! { Ok(Mmr::mmr_root()) } - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, mmr::BatchProof), mmr::Error> - { - Mmr::generate_batch_proof(leaf_indices) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) + fn generate_batch_proof( + leaf_indices: Vec, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_batch_proof(leaf_indices).map(|(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }) + } + + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: pallet_mmr::primitives::LeafIndex, + ) -> Result<(Vec, mmr::BatchProof), mmr::Error> { + Mmr::generate_historical_batch_proof(leaf_indices, leaves_count).map( + |(leaves, proof)| { + ( + leaves + .into_iter() + .map(|leaf| mmr::EncodableOpaqueLeaf::from_leaf(&leaf)) + .collect(), + proof, + ) + }, + ) } fn verify_batch_proof(leaves: Vec, proof: mmr::BatchProof) diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 26c85592ecb85..3e49f4e05cc91 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -277,6 +277,13 @@ macro_rules! create_test_api { unimplemented!() } + fn generate_historical_batch_proof( + _leaf_indices: Vec, + _leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), MmrError> { + unimplemented!() + } + fn verify_batch_proof(_leaves: Vec, _proof: BatchProof) -> Result<(), MmrError> { unimplemented!() } diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index 75032d40f492a..e939ff8ae7cd0 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -128,6 +128,31 @@ pub trait MmrApi { leaf_indices: Vec, at: Option, ) -> RpcResult>; + + /// Generate a MMR proof for the given `leaf_indices` of the MMR that had `leaves_count` leaves. + /// + /// This method calls into a runtime with MMR pallet included and attempts to generate + /// a MMR proof for the set of leaves at the given `leaf_indices` with MMR fixed to the state + /// with exactly `leaves_count` leaves. `leaves_count` must be larger than all `leaf_indices` + /// for the function to succeed. + /// + /// Optionally, a block hash at which the runtime should be queried can be specified. + /// Note that specifying the block hash isn't super-useful here, unless you're generating + /// proof using non-finalized blocks where there are several competing forks. That's because + /// MMR state will be fixed to the state with `leaves_count`, which already points to some + /// historical block. + /// + /// Returns the leaves and a proof for these leaves (compact encoding, i.e. hash of + /// the leaves). Both parameters are SCALE-encoded. + /// The order of entries in the `leaves` field of the returned struct + /// is the same as the order of the entries in `leaf_indices` supplied + #[method(name = "mmr_generateHistoricalBatchProof")] + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option, + ) -> RpcResult>; } /// MMR RPC methods. @@ -192,6 +217,30 @@ where Ok(LeafBatchProof::new(block_hash, leaves, proof)) } + + fn generate_historical_batch_proof( + &self, + leaf_indices: Vec, + leaves_count: LeafIndex, + at: Option<::Hash>, + ) -> RpcResult::Hash>> { + let api = self.client.runtime_api(); + let block_hash = at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash); + + let (leaves, proof) = api + .generate_historical_batch_proof_with_context( + &BlockId::hash(block_hash), + sp_core::ExecutionContext::OffchainCall(None), + leaf_indices, + leaves_count, + ) + .map_err(runtime_error_into_rpc_error)? + .map_err(mmr_error_into_rpc_error)?; + + Ok(LeafBatchProof::new(block_hash, leaves, proof)) + } } /// Converts a mmr-specific error into a [`CallError`]. diff --git a/frame/merkle-mountain-range/src/lib.rs b/frame/merkle-mountain-range/src/lib.rs index 9f989847af0f9..8b4f2b60bc198 100644 --- a/frame/merkle-mountain-range/src/lib.rs +++ b/frame/merkle-mountain-range/src/lib.rs @@ -330,7 +330,27 @@ impl, I: 'static> Pallet { (Vec>, primitives::BatchProof<>::Hash>), primitives::Error, > { - let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); + Self::generate_historical_batch_proof(leaf_indices, Self::mmr_leaves()) + } + + /// Generate a MMR proof for the given `leaf_indices` for the MMR of `leaves_count` size. + /// + /// Note this method can only be used from an off-chain context + /// (Offchain Worker or Runtime API call), since it requires + /// all the leaves to be present. + /// It may return an error or panic if used incorrectly. + pub fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex, + ) -> Result< + (Vec>, primitives::BatchProof<>::Hash>), + primitives::Error, + > { + if leaves_count > Self::mmr_leaves() { + return Err(Error::InvalidLeavesCount) + } + + let mmr: ModuleMmr = mmr::Mmr::new(leaves_count); mmr.generate_batch_proof(leaf_indices) } diff --git a/frame/merkle-mountain-range/src/tests.rs b/frame/merkle-mountain-range/src/tests.rs index d6886f90a5da7..bcb775ba02819 100644 --- a/frame/merkle-mountain-range/src/tests.rs +++ b/frame/merkle-mountain-range/src/tests.rs @@ -227,7 +227,8 @@ fn should_generate_proofs_correctly() { let _ = env_logger::try_init(); let mut ext = new_test_ext(); // given - ext.execute_with(|| add_blocks(7)); + let num_blocks: u64 = 7; + ext.execute_with(|| add_blocks(num_blocks as usize)); ext.persist_offchain_overlay(); // Try to generate proofs now. This requires the offchain extensions to be present @@ -241,6 +242,23 @@ fn should_generate_proofs_correctly() { crate::Pallet::::generate_batch_proof(vec![leaf_index]).unwrap() }) .collect::>(); + // when generate historical proofs for all leaves + let historical_proofs = (0_u64..crate::NumberOfLeaves::::get()) + .into_iter() + .map(|leaf_index| { + let mut proofs = vec![]; + for leaves_count in leaf_index + 1..=num_blocks { + proofs.push( + crate::Pallet::::generate_historical_batch_proof( + vec![leaf_index], + leaves_count, + ) + .unwrap(), + ) + } + proofs + }) + .collect::>(); // then assert_eq!( @@ -258,6 +276,79 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[0][0], + ( + vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], + BatchProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + ) + ); + + // D + // / \ + // / \ + // A B C + // / \ / \ / \ + // 1 2 3 4 5 6 7 + // + // we're proving 3 => we need { 4, A, C++7 } + assert_eq!( + proofs[2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 7, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + } + ) + ); + // A + // / \ + // 1 2 3 + // + // we're proving 3 => we need { A } + assert_eq!( + historical_proofs[2][0], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 3, + items: vec![hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ),], + } + ) + ); + // D + // / \ + // / \ + // A B + // / \ / \ + // 1 2 3 4 5 + // we're proving 3 => we need { 4, A, 5 } + assert_eq!( + historical_proofs[2][2], + ( + vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], + BatchProof { + leaf_indices: vec![2], + leaf_count: 5, + items: vec![ + hex("1b14c1dc7d3e4def11acdf31be0584f4b85c3673f1ff72a3af467b69a3b0d9d0"), + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + hex("3b031d22e24f1126c8f7d2f394b663f9b960ed7abbedb7152e17ce16112656d0") + ], + } + ) + ); + assert_eq!(historical_proofs[2][4], proofs[2]); + assert_eq!( proofs[4], ( @@ -273,6 +364,21 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!( + historical_proofs[4][0], + ( + vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], + BatchProof { + leaf_indices: vec![4], + leaf_count: 5, + items: vec![hex( + "ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252" + ),], + } + ) + ); + assert_eq!(historical_proofs[4][2], proofs[4]); + assert_eq!( proofs[6], ( @@ -287,6 +393,7 @@ fn should_generate_proofs_correctly() { } ) ); + assert_eq!(historical_proofs[6][0], proofs[6]); }); } @@ -302,9 +409,8 @@ fn should_generate_batch_proof_correctly() { // to retrieve full leaf data. register_offchain_ext(&mut ext); ext.execute_with(|| { - // when generate proofs for all leaves + // when generate proofs for a batch of leaves let (.., proof) = crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap(); - // then assert_eq!( proof, @@ -318,6 +424,28 @@ fn should_generate_batch_proof_correctly() { ], } ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap(); + // then + assert_eq!( + historical_proof, + BatchProof { + leaf_indices: vec![0, 4, 5], + leaf_count: 6, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + ], + } + ); + + // when generate historical proofs for a batch of leaves + let (.., historical_proof) = + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 7).unwrap(); + // then + assert_eq!(historical_proof, proof); }); } @@ -338,11 +466,33 @@ fn should_verify() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); + let (simple_historical_leaves, simple_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); + let (advanced_historical_leaves, advanced_historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 7).unwrap() + }); ext.execute_with(|| { add_blocks(7); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof5), Ok(())); + assert_eq!( + crate::Pallet::::verify_leaves( + simple_historical_leaves, + simple_historical_proof5 + ), + Ok(()) + ); + assert_eq!( + crate::Pallet::::verify_leaves( + advanced_historical_leaves, + advanced_historical_proof5 + ), + Ok(()) + ); }); } @@ -350,16 +500,40 @@ fn should_verify() { fn should_verify_batch_proofs() { fn generate_and_verify_batch_proof( ext: &mut sp_io::TestExternalities, - leaves: &Vec, + leaf_indices: &Vec, blocks_to_add: usize, ) { - let (leaves, proof) = ext - .execute_with(|| crate::Pallet::::generate_batch_proof(leaves.to_vec()).unwrap()); + let (leaves, proof) = ext.execute_with(|| { + crate::Pallet::::generate_batch_proof(leaf_indices.to_vec()).unwrap() + }); + + let mmr_size = ext.execute_with(|| crate::Pallet::::mmr_leaves()); + let min_mmr_size = leaf_indices.iter().max().unwrap() + 1; + + // generate historical proofs for all possible mmr sizes, + // lower bound being index of highest leaf to be proven + let historical_proofs = (min_mmr_size..=mmr_size) + .map(|mmr_size| { + ext.execute_with(|| { + crate::Pallet::::generate_historical_batch_proof( + leaf_indices.to_vec(), + mmr_size, + ) + .unwrap() + }) + }) + .collect::>(); ext.execute_with(|| { add_blocks(blocks_to_add); // then assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + historical_proofs.iter().for_each(|(leaves, proof)| { + assert_eq!( + crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), + Ok(()) + ); + }); }) } @@ -378,7 +552,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate powerset (skipping empty set) of all possible leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().powerset().skip(1).collect(); + let leaves_set: Vec> = (0..=n).into_iter().powerset().skip(1).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -393,7 +567,7 @@ fn should_verify_batch_proofs() { ext.persist_offchain_overlay(); // generate all possible 2-leaf combinations for mmr size n - let leaves_set: Vec> = (0..n).into_iter().combinations(2).collect(); + let leaves_set: Vec> = (0..=n).into_iter().combinations(2).collect(); leaves_set.iter().for_each(|leaves_subset| { generate_and_verify_batch_proof(&mut ext, leaves_subset, 0); @@ -414,7 +588,13 @@ fn verification_should_be_stateless() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -424,12 +604,27 @@ fn verification_should_be_stateless() { // when crate::Pallet::::generate_batch_proof(vec![5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (_, historical_proof5) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaves[0].clone()); assert_eq!( - crate::verify_leaves_proof::<::Hashing, _>(root, vec![leaf], proof5), + crate::verify_leaves_proof::<::Hashing, _>( + root_7, + vec![leaf.clone()], + proof5 + ), + Ok(()) + ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + vec![leaf], + historical_proof5 + ), Ok(()) ); } @@ -441,7 +636,13 @@ fn should_verify_batch_proof_statelessly() { // Start off with chain initialisation and storing indexing data off-chain // (MMR Leafs) let mut ext = new_test_ext(); - ext.execute_with(|| add_blocks(7)); + let (root_6, root_7) = ext.execute_with(|| { + add_blocks(6); + let root_6 = crate::Pallet::::mmr_root_hash(); + add_blocks(1); + let root_7 = crate::Pallet::::mmr_root_hash(); + (root_6, root_7) + }); ext.persist_offchain_overlay(); // Try to generate proof now. This requires the offchain extensions to be present @@ -451,12 +652,15 @@ fn should_verify_batch_proof_statelessly() { // when crate::Pallet::::generate_batch_proof(vec![0, 4, 5]).unwrap() }); - let root = ext.execute_with(|| crate::Pallet::::mmr_root_hash()); + let (historical_leaves, historical_proof) = ext.execute_with(|| { + // when + crate::Pallet::::generate_historical_batch_proof(vec![0, 4, 5], 6).unwrap() + }); // Verify proof without relying on any on-chain data. assert_eq!( crate::verify_leaves_proof::<::Hashing, _>( - root, + root_7, leaves .into_iter() .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) @@ -465,6 +669,17 @@ fn should_verify_batch_proof_statelessly() { ), Ok(()) ); + assert_eq!( + crate::verify_leaves_proof::<::Hashing, _>( + root_6, + historical_leaves + .into_iter() + .map(|leaf| crate::primitives::DataOrHash::Data(leaf)) + .collect(), + historical_proof + ), + Ok(()) + ); } #[test] @@ -721,3 +936,36 @@ fn should_verify_canonicalized() { assert_eq!(crate::Pallet::::verify_leaves(leaves, proofs), Ok(())); }); } + +#[test] +fn does_not_panic_when_generating_historical_proofs() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + + // given 7 blocks (7 MMR leaves) + ext.execute_with(|| add_blocks(7)); + ext.persist_offchain_overlay(); + + // Try to generate historical proof with invalid arguments. This requires the offchain + // extensions to be present to retrieve full leaf data. + register_offchain_ext(&mut ext); + ext.execute_with(|| { + // when leaf index is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 7), + Err(Error::LeafNotFound), + ); + + // when leaves count is invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![3], 100), + Err(Error::InvalidLeavesCount), + ); + + // when both leaf index and leaves count are invalid + assert_eq!( + crate::Pallet::::generate_historical_batch_proof(vec![10], 100), + Err(Error::InvalidLeavesCount), + ); + }); +} diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index 29a7e3d1a6fb6..c40a594739ec1 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -402,6 +402,8 @@ pub enum Error { PalletNotIncluded, /// Cannot find the requested leaf index InvalidLeafIndex, + /// The provided leaves count is larger than the actual leaves count. + InvalidLeavesCount, } impl Error { @@ -455,7 +457,14 @@ sp_api::decl_runtime_apis! { fn mmr_root() -> Result; /// Generate MMR proof for a series of leaves under given indices. - fn generate_batch_proof(leaf_indices: Vec) -> Result<(Vec, BatchProof), Error>; + fn generate_batch_proof(leaf_indices: Vec) + -> Result<(Vec, BatchProof), Error>; + + /// Generate MMR proof for a series of leaves under given indices, using MMR at given `leaves_count` size. + fn generate_historical_batch_proof( + leaf_indices: Vec, + leaves_count: LeafIndex + ) -> Result<(Vec, BatchProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// From 37664fe5b3513eb996225f016eceaf74963b8133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Sun, 2 Oct 2022 17:16:45 +0200 Subject: [PATCH 196/348] Remove contracts RPCs (#12358) * Remove contracts RPCs * Remove serde as RPC serialization is no longer needed * Rename folder to match crate name * Compile fix * Remove Byte wrapper --- Cargo.lock | 26 +- Cargo.toml | 4 +- bin/node/rpc/Cargo.toml | 1 - bin/node/rpc/src/lib.rs | 3 - bin/node/runtime/Cargo.toml | 6 +- bin/node/runtime/src/lib.rs | 2 +- frame/contracts/Cargo.toml | 2 +- .../{common => primitives}/Cargo.toml | 8 - .../{common => primitives}/README.md | 0 .../{common => primitives}/src/lib.rs | 91 +-- frame/contracts/rpc/Cargo.toml | 30 - frame/contracts/rpc/README.md | 3 - frame/contracts/rpc/runtime-api/README.md | 7 - frame/contracts/rpc/src/lib.rs | 524 ------------------ .../{rpc => }/runtime-api/Cargo.toml | 12 +- frame/contracts/runtime-api/README.md | 7 + .../{rpc => }/runtime-api/src/lib.rs | 6 +- frame/contracts/src/exec.rs | 27 +- frame/contracts/src/lib.rs | 8 +- frame/contracts/src/tests.rs | 15 +- frame/contracts/src/wasm/mod.rs | 103 ++-- frame/contracts/src/wasm/runtime.rs | 10 +- 22 files changed, 103 insertions(+), 792 deletions(-) rename frame/contracts/{common => primitives}/Cargo.toml (70%) rename frame/contracts/{common => primitives}/README.md (100%) rename frame/contracts/{common => primitives}/src/lib.rs (74%) delete mode 100644 frame/contracts/rpc/Cargo.toml delete mode 100644 frame/contracts/rpc/README.md delete mode 100644 frame/contracts/rpc/runtime-api/README.md delete mode 100644 frame/contracts/rpc/src/lib.rs rename frame/contracts/{rpc => }/runtime-api/Cargo.toml (78%) create mode 100644 frame/contracts/runtime-api/README.md rename frame/contracts/{rpc => }/runtime-api/src/lib.rs (94%) diff --git a/Cargo.lock b/Cargo.lock index de50d4ec27105..723a09ee9a39f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3371,7 +3371,7 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", + "pallet-contracts-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -4825,7 +4825,6 @@ version = "3.0.0-dev" dependencies = [ "jsonrpsee", "node-primitives", - "pallet-contracts-rpc", "pallet-mmr-rpc", "pallet-transaction-payment-rpc", "sc-chain-spec", @@ -5530,10 +5529,6 @@ version = "6.0.0" dependencies = [ "bitflags", "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-rpc", "sp-runtime", "sp-std", ] @@ -5548,24 +5543,7 @@ dependencies = [ ] [[package]] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -dependencies = [ - "jsonrpsee", - "pallet-contracts-primitives", - "pallet-contracts-rpc-runtime-api", - "parity-scale-codec", - "serde", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-rpc", - "sp-runtime", -] - -[[package]] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" dependencies = [ "pallet-contracts-primitives", diff --git a/Cargo.toml b/Cargo.toml index 25f12a2c9fd3f..02bc6aede8669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,8 @@ members = [ "frame/child-bounties", "frame/collective", "frame/contracts", - "frame/contracts/rpc", - "frame/contracts/rpc/runtime-api", + "frame/contracts/primitives", + "frame/contracts/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/fast-unstake", diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0b69ae27010fa..1f93feabf2f1e 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.15.1", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } -pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } pallet-transaction-payment-rpc = { version = "4.0.0-dev", path = "../../../frame/transaction-payment/rpc/" } sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 0e6b04087fa63..94e01619c6e63 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -108,7 +108,6 @@ where + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, @@ -118,7 +117,6 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use pallet_contracts_rpc::{Contracts, ContractsApiServer}; use pallet_mmr_rpc::{Mmr, MmrApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; @@ -150,7 +148,6 @@ where // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.merge(Contracts::new(client.clone()).into_rpc())?; io.merge(Mmr::new(client.clone()).into_rpc())?; io.merge(TransactionPayment::new(client.clone()).into_rpc())?; io.merge( diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index e722024231651..ac3afc19da50f 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -61,8 +61,8 @@ pallet-bounties = { version = "4.0.0-dev", default-features = false, path = "../ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path = "../../../frame/child-bounties" } pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } +pallet-contracts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } @@ -139,7 +139,7 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", - "pallet-contracts-rpc-runtime-api/std", + "pallet-contracts-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4fa4049e22682..f0c68b5b225cd 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1942,7 +1942,7 @@ impl_runtime_apis! { } } - impl pallet_contracts_rpc_runtime_api::ContractsApi< + impl pallet_contracts_runtime_api::ContractsApi< Block, AccountId, Balance, BlockNumber, Hash, > for Runtime diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 30fbad680ebe5..7c3b677e06436 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -36,7 +36,7 @@ rand_pcg = { version = "0.3", optional = true } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "common" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/primitives/Cargo.toml similarity index 70% rename from frame/contracts/common/Cargo.toml rename to frame/contracts/primitives/Cargo.toml index 49d7973ab155f..64e332007350b 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -15,23 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bitflags = "1.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1", features = ["derive"], optional = true } # Substrate Dependencies (This crate should not rely on frame) -sp-core = { version = "6.0.0", path = "../../../primitives/core", default-features = false } sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc", optional = true } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] std = [ "codec/std", - "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", - "sp-rpc", - "serde", ] diff --git a/frame/contracts/common/README.md b/frame/contracts/primitives/README.md similarity index 100% rename from frame/contracts/common/README.md rename to frame/contracts/primitives/README.md diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/primitives/src/lib.rs similarity index 74% rename from frame/contracts/common/src/lib.rs rename to frame/contracts/primitives/src/lib.rs index f810725afcd36..5daf875ac2651 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -21,32 +21,16 @@ use bitflags::bitflags; use codec::{Decode, Encode}; -use sp_core::Bytes; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "std")] -use sp_rpc::number::NumberOrHex; - /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "R: Serialize, Balance: Copy + Into"), - bound(deserialize = "R: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct ContractResult { /// How much gas was consumed during execution. pub gas_consumed: u64, @@ -80,7 +64,6 @@ pub struct ContractResult { /// /// The debug message is never generated during on-chain execution. It is reserved for /// RPC calls. - #[cfg_attr(feature = "std", serde(with = "as_string"))] pub debug_message: Vec, /// The execution result of the wasm code. pub result: R, @@ -113,8 +96,6 @@ pub enum ContractAccessError { bitflags! { /// Flags used by a contract to customize exit behaviour. #[derive(Encode, Decode)] - #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; @@ -123,13 +104,11 @@ bitflags! { /// Output of a contract call or instantiation which ran to completion. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Bytes, + pub data: Vec, } impl ExecReturnValue { @@ -141,8 +120,6 @@ impl ExecReturnValue { /// The result of a successful contract instantiation. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct InstantiateReturnValue { /// The output of the called constructor. pub result: ExecReturnValue, @@ -152,63 +129,40 @@ pub struct InstantiateReturnValue { /// The result of succesfully uploading a contract. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "CodeHash: Serialize, Balance: Copy + Into"), - bound(deserialize = "CodeHash: Deserialize<'de>, Balance: TryFrom") - ) -)] pub struct CodeUploadReturnValue { /// The key under which the new code is stored. pub code_hash: CodeHash, /// The deposit that was reserved at the caller. Is zero when the code already existed. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] pub deposit: Balance, } /// Reference to an existing code hash or a new wasm module. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub enum Code { /// A wasm module as raw bytes. - Upload(Bytes), + Upload(Vec), /// The code hash of an on-chain wasm blob. Existing(Hash), } impl>, Hash> From for Code { fn from(from: T) -> Self { - Code::Upload(Bytes(from.into())) + Code::Upload(from.into()) } } /// The amount of balance that was either charged or refunded in order to pay for storage. #[derive(Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "std", - serde( - rename_all = "camelCase", - bound(serialize = "Balance: Copy + Into"), - bound(deserialize = "Balance: TryFrom") - ) -)] pub enum StorageDeposit { /// The transaction reduced storage consumption. /// /// This means that the specified amount of balance was transferred from the involved /// contracts to the call origin. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Refund(Balance), /// The transaction increased overall storage usage. /// /// This means that the specified amount of balance was transferred from the call origin /// to the contracts involved. - #[cfg_attr(feature = "std", serde(with = "as_hex"))] Charge(Balance), } @@ -295,42 +249,3 @@ where } } } - -#[cfg(feature = "std")] -mod as_string { - use super::*; - use serde::{ser::Error, Deserializer, Serializer}; - - pub fn serialize(bytes: &Vec, serializer: S) -> Result { - std::str::from_utf8(bytes) - .map_err(|e| S::Error::custom(format!("Debug buffer contains invalid UTF8: {}", e)))? - .serialize(serializer) - } - - pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result, D::Error> { - Ok(String::deserialize(deserializer)?.into_bytes()) - } -} - -#[cfg(feature = "std")] -mod as_hex { - use super::*; - use serde::{de::Error as _, Deserializer, Serializer}; - - pub fn serialize(balance: &Balance, serializer: S) -> Result - where - S: Serializer, - Balance: Copy + Into, - { - Into::::into(*balance).serialize(serializer) - } - - pub fn deserialize<'de, D, Balance>(deserializer: D) -> Result - where - D: Deserializer<'de>, - Balance: TryFrom, - { - Balance::try_from(NumberOrHex::deserialize(deserializer)?) - .map_err(|_| D::Error::custom("Cannot decode NumberOrHex to Balance")) - } -} diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml deleted file mode 100644 index 7876c7cba40d0..0000000000000 --- a/frame/contracts/rpc/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "pallet-contracts-rpc" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Node-specific RPC methods for interaction with contracts." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } -serde = { version = "1", features = ["derive"] } - -# Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sp-core = { version = "6.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "6.0.0", path = "../../../primitives/rpc" } -sp-runtime = { version = "6.0.0", path = "../../../primitives/runtime" } - -[dev-dependencies] -serde_json = "1" diff --git a/frame/contracts/rpc/README.md b/frame/contracts/rpc/README.md deleted file mode 100644 index be6df237bf60d..0000000000000 --- a/frame/contracts/rpc/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Node-specific RPC methods for interaction with contracts. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/README.md b/frame/contracts/rpc/runtime-api/README.md deleted file mode 100644 index d57f29a93bd1d..0000000000000 --- a/frame/contracts/rpc/runtime-api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Runtime API definition required by Contracts RPC extensions. - -This API should be imported and implemented by the runtime, -of a node that wants to use the custom RPC extension -adding Contracts access methods. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs deleted file mode 100644 index 1df7a5753f77e..0000000000000 --- a/frame/contracts/rpc/src/lib.rs +++ /dev/null @@ -1,524 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Node-specific RPC methods for interaction with contracts. - -#![warn(unused_crate_dependencies)] - -use std::{marker::PhantomData, sync::Arc}; - -use codec::Codec; -use jsonrpsee::{ - core::{async_trait, Error as JsonRpseeError, RpcResult}, - proc_macros::rpc, - types::error::{CallError, ErrorCode, ErrorObject}, -}; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, -}; -use serde::{Deserialize, Serialize}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::HeaderBackend; -use sp_core::Bytes; -use sp_rpc::number::NumberOrHex; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; - -pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; - -const RUNTIME_ERROR: i32 = 1; -const CONTRACT_DOESNT_EXIST: i32 = 2; -const KEY_DECODING_FAILED: i32 = 3; - -pub type Weight = u64; - -/// A rough estimate of how much gas a decent hardware consumes per second, -/// using native execution. -/// This value is used to set the upper bound for maximal contract calls to -/// prevent blocking the RPC for too long. -/// -/// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which -/// determined runtime weights: -/// -const GAS_PER_SECOND: Weight = 1_000_000_000_000; - -/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. -/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. -const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; - -/// A private newtype for converting `ContractAccessError` into an RPC error. -struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); - -impl From for JsonRpseeError { - fn from(e: ContractAccessError) -> Self { - use pallet_contracts_primitives::ContractAccessError::*; - match e.0 { - DoesntExist => CallError::Custom(ErrorObject::owned( - CONTRACT_DOESNT_EXIST, - "The specified contract doesn't exist.", - None::<()>, - )) - .into(), - KeyDecodingFailed => CallError::Custom(ErrorObject::owned( - KEY_DECODING_FAILED, - "Failed to decode the specified storage key.", - None::<()>, - )) - .into(), - } - } -} - -/// A struct that encodes RPC parameters required for a call to a smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CallRequest { - origin: AccountId, - dest: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - input_data: Bytes, -} - -/// A struct that encodes RPC parameters required to instantiate a new smart-contract. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct InstantiateRequest { - origin: AccountId, - value: NumberOrHex, - gas_limit: NumberOrHex, - storage_deposit_limit: Option, - code: Code, - data: Bytes, - salt: Bytes, -} - -/// A struct that encodes RPC parameters required for a call to upload a new code. -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct CodeUploadRequest { - origin: AccountId, - code: Bytes, - storage_deposit_limit: Option, -} - -/// Contracts RPC methods. -#[rpc(client, server)] -pub trait ContractsApi -where - Balance: Copy + TryFrom + Into, -{ - /// Executes a call to a contract. - /// - /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. - /// - /// This method is useful for calling getter-like methods on contracts or to dry-run a - /// a contract call in order to determine the `gas_limit`. - #[method(name = "contracts_call")] - fn call( - &self, - call_request: CallRequest, - at: Option, - ) -> RpcResult>; - - /// Instantiate a new contract. - /// - /// This instantiate is performed locally without submitting any transactions. Thus the contract - /// is not actually created. - /// - /// This method is useful for UIs to dry-run contract instantiations. - #[method(name = "contracts_instantiate")] - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option, - ) -> RpcResult>; - - /// Upload new code without instantiating a contract from it. - /// - /// This upload is performed locally without submitting any transactions. Thus executing this - /// won't change any state. - /// - /// This method is useful for UIs to dry-run code upload. - #[method(name = "contracts_upload_code")] - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option, - ) -> RpcResult>; - - /// Returns the value under a specified storage `key` in a contract given by `address` param, - /// or `None` if it is not set. - #[method(name = "contracts_getStorage")] - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option, - ) -> RpcResult>; -} - -/// Contracts RPC methods. -pub struct Contracts { - client: Arc, - _marker: PhantomData, -} - -impl Contracts { - /// Create new `Contracts` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Self { client, _marker: Default::default() } - } -} - -#[async_trait] -impl - ContractsApiServer< - ::Hash, - <::Header as HeaderT>::Number, - AccountId, - Balance, - Hash, - > for Contracts -where - Block: BlockT, - Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - Client::Api: ContractsRuntimeApi< - Block, - AccountId, - Balance, - <::Header as HeaderT>::Number, - Hash, - >, - AccountId: Codec, - Balance: Codec + Copy + TryFrom + Into, - Hash: Codec, -{ - fn call( - &self, - call_request: CallRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CallRequest { origin, dest, value, gas_limit, storage_deposit_limit, input_data } = - call_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.call(&at, origin, dest, value, gas_limit, storage_deposit_limit, input_data.to_vec()) - .map_err(runtime_error_into_rpc_err) - } - - fn instantiate( - &self, - instantiate_request: InstantiateRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let InstantiateRequest { - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data, - salt, - } = instantiate_request; - - let value: Balance = decode_hex(value, "balance")?; - let gas_limit: u64 = decode_hex(gas_limit, "weight")?; - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - limit_gas(gas_limit)?; - - api.instantiate( - &at, - origin, - value, - gas_limit, - storage_deposit_limit, - code, - data.to_vec(), - salt.to_vec(), - ) - .map_err(runtime_error_into_rpc_err) - } - - fn upload_code( - &self, - upload_request: CodeUploadRequest, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let CodeUploadRequest { origin, code, storage_deposit_limit } = upload_request; - - let storage_deposit_limit: Option = - storage_deposit_limit.map(|l| decode_hex(l, "balance")).transpose()?; - - api.upload_code(&at, origin, code.to_vec(), storage_deposit_limit) - .map_err(runtime_error_into_rpc_err) - } - - fn get_storage( - &self, - address: AccountId, - key: Bytes, - at: Option<::Hash>, - ) -> RpcResult> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.to_vec()) - .map_err(runtime_error_into_rpc_err)? - .map_err(ContractAccessError)? - .map(Bytes); - - Ok(result) - } -} - -/// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { - CallError::Custom(ErrorObject::owned( - RUNTIME_ERROR, - "Runtime error", - Some(format!("{:?}", err)), - )) - .into() -} - -fn decode_hex>(from: H, name: &str) -> RpcResult { - from.try_into().map_err(|_| { - JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!("{:?} does not fit into the {} type", from, name), - None::<()>, - ))) - }) -} - -fn limit_gas(gas_limit: Weight) -> RpcResult<()> { - if gas_limit > GAS_LIMIT { - Err(JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( - ErrorCode::InvalidParams.code(), - format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, GAS_LIMIT - ), - None::<()>, - )))) - } else { - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use pallet_contracts_primitives::{ContractExecResult, ContractInstantiateResult}; - use sp_core::U256; - - fn trim(json: &str) -> String { - json.chars().filter(|c| !c.is_whitespace()).collect() - } - - #[test] - fn call_request_should_serialize_deserialize_properly() { - type Req = CallRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", - "value": "0x112210f4B16c1cb1", - "gasLimit": 1000000000000, - "storageDepositLimit": 5000, - "inputData": "0x8c97db39" - } - "#, - ) - .unwrap(); - assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); - } - - #[test] - fn instantiate_request_should_serialize_deserialize_properly() { - type Req = InstantiateRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "value": "0x88", - "gasLimit": 42, - "code": { "existing": "0x1122" }, - "data": "0x4299", - "salt": "0x9988" - } - "#, - ) - .unwrap(); - - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(req.value.into_u256(), 0x88.into()); - assert_eq!(req.gas_limit.into_u256(), 42.into()); - assert_eq!(req.storage_deposit_limit, None); - assert_eq!(&*req.data, [0x42, 0x99].as_ref()); - assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); - let code = match req.code { - Code::Existing(hash) => hash, - _ => panic!("json encoded an existing hash"), - }; - assert_eq!(&code, "0x1122"); - } - - #[test] - fn code_upload_request_should_serialize_deserialize_properly() { - type Req = CodeUploadRequest; - let req: Req = serde_json::from_str( - r#" - { - "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", - "code": "0x8c97db39", - "storageDepositLimit": 5000 - } - "#, - ) - .unwrap(); - assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); - assert_eq!(&*req.code, [0x8c, 0x97, 0xdb, 0x39].as_ref()); - assert_eq!(req.storage_deposit_limit.map(|l| l.into_u256()), Some(5000.into())); - } - - #[test] - fn call_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractExecResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"charge": 42000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "flags": 5, - "data": "0x1234" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn instantiate_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: ContractInstantiateResult = - serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "gasConsumed": 5000, - "gasRequired": 8000, - "storageDeposit": {"refund": 12000}, - "debugMessage": "HelloWorld", - "result": { - "Ok": { - "result": { - "flags": 5, - "data": "0x1234" - }, - "accountId": "5CiPP" - } - } - }"#, - ); - test( - r#"{ - "gasConsumed": 3400, - "gasRequired": 5200, - "storageDeposit": {"charge": 0}, - "debugMessage": "HelloWorld", - "result": { - "Err": "BadOrigin" - } - }"#, - ); - } - - #[test] - fn code_upload_result_should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: CodeUploadResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, trim(expected).as_str()); - } - test( - r#"{ - "Ok": { - "codeHash": 4711, - "deposit": 99 - } - }"#, - ); - test( - r#"{ - "Err": "BadOrigin" - }"#, - ); - } -} diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/runtime-api/Cargo.toml similarity index 78% rename from frame/contracts/rpc/runtime-api/Cargo.toml rename to frame/contracts/runtime-api/Cargo.toml index bd07d577ec272..05b0e05d4c568 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/runtime-api/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "pallet-contracts-rpc-runtime-api" +name = "pallet-contracts-runtime-api" version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2021" license = "Apache-2.0" homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" -description = "Runtime API definition required by Contracts RPC extensions." +description = "Runtime API definition used to provide dry-run capabilities" readme = "README.md" [package.metadata.docs.rs] @@ -17,10 +17,10 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } # Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../common" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../../primitives/std" } +pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../primitives" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } [features] default = ["std"] diff --git a/frame/contracts/runtime-api/README.md b/frame/contracts/runtime-api/README.md new file mode 100644 index 0000000000000..fed285b23b2ac --- /dev/null +++ b/frame/contracts/runtime-api/README.md @@ -0,0 +1,7 @@ +Runtime API definition used to provide dry-run capabilities + +This API should be imported and implemented by the runtime, +of a node that wants to provide clients with dry-run +capabilities. + +License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/runtime-api/src/lib.rs similarity index 94% rename from frame/contracts/rpc/runtime-api/src/lib.rs rename to frame/contracts/runtime-api/src/lib.rs index 9765b37057c7b..79fd20c8c0163 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/runtime-api/src/lib.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Runtime API definition required by Contracts RPC extensions. +//! Runtime API definition used to provide dry-run capabilities. //! //! This API should be imported and implemented by the runtime, -//! of a node that wants to use the custom RPC extension -//! adding Contracts access methods. +//! of a node that wants to provide clients with dry-run +//! capabilities. #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 6260dd41de707..bf35410d0bd4b 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -1384,7 +1384,6 @@ mod tests { use frame_system::{EventRecord, Phase}; use pallet_contracts_primitives::ReturnFlags; use pretty_assertions::assert_eq; - use sp_core::Bytes; use sp_runtime::{traits::Hash, DispatchError}; use std::{ cell::RefCell, @@ -1517,7 +1516,7 @@ mod tests { } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) } fn exec_trapped() -> ExecResult { @@ -1586,7 +1585,7 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1621,13 +1620,13 @@ mod tests { let success_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); let delegate_ch = MockLoader::insert(Call, move |ctx, _| { assert_eq!(ctx.ext.value_transferred(), value); let _ = ctx.ext.delegate_call(success_ch, Vec::new())?; - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1662,7 +1661,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) }); ExtBuilder::default().build().execute_with(|| { @@ -1715,7 +1714,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1736,7 +1735,7 @@ mod tests { let output = result.unwrap(); assert!(!output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -1747,7 +1746,7 @@ mod tests { let origin = ALICE; let dest = BOB; let return_ch = MockLoader::insert(Call, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) }); ExtBuilder::default().build().execute_with(|| { @@ -1768,7 +1767,7 @@ mod tests { let output = result.unwrap(); assert!(output.did_revert()); - assert_eq!(output.data, Bytes(vec![1, 2, 3, 4])); + assert_eq!(output.data, vec![1, 2, 3, 4]); }); } @@ -2115,7 +2114,7 @@ mod tests { #[test] fn instantiation_work_with_success_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2140,7 +2139,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address + Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address ); // Check that the newly created account has the expected code hash and @@ -2159,7 +2158,7 @@ mod tests { #[test] fn instantiation_fails_with_failing_output() { let dummy_ch = MockLoader::insert(Constructor, |_, _| { - Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -2184,7 +2183,7 @@ mod tests { &[], None, ), - Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address + Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address ); // Check that the account has not been created. diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index f9a1c8decf042..3aeb8742705c2 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -123,7 +123,7 @@ use pallet_contracts_primitives::{ StorageDeposit, }; use scale_info::TypeInfo; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; @@ -512,7 +512,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -743,7 +743,7 @@ pub mod pallet { value, gas_limit, storage_deposit_limit.map(Into::into), - Code::Upload(Bytes(code)), + Code::Upload(code), data, salt, None, @@ -1234,7 +1234,7 @@ where let try_exec = || { let schedule = T::Schedule::get(); let (extra_deposit, executable) = match code { - Code::Upload(Bytes(binary)) => { + Code::Upload(binary) => { let executable = PrefabWasmModule::from_code(binary, &schedule, origin.clone()) .map_err(|(err, msg)| { debug_message.as_mut().map(|buffer| buffer.extend(msg.as_bytes())); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e5893c3dbd112..b4a8f8f4c834f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -44,7 +44,6 @@ use frame_support::{ }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::{assert_eq, assert_ne}; -use sp_core::Bytes; use sp_io::hashing::blake2_256; use sp_keystore::{testing::KeyStore, KeystoreExt}; use sp_runtime::{ @@ -1722,7 +1721,7 @@ fn chain_extension_works() { let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, input.clone(), false); assert_eq!(TestExtension::last_seen_buffer(), input); - assert_eq!(result.result.unwrap().data, Bytes(input)); + assert_eq!(result.result.unwrap().data, input); // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( @@ -1787,7 +1786,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![42, 99])); + assert_eq!(result.data, vec![42, 99]); // diverging to second chain extension that sets flags to 0x1 and returns a fixed buffer // We set the MSB part to 1 (instead of 0) which routes the request into the second @@ -1804,7 +1803,7 @@ fn chain_extension_works() { .result .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, Bytes(vec![0x4B, 0x1D])); + assert_eq!(result.data, vec![0x4B, 0x1D]); // Diverging to third chain extension that is disabled // We set the MSB part to 2 (instead of 0) which routes the request into the third extension @@ -2672,7 +2671,7 @@ fn ecdsa_recover() { .result .unwrap(); assert!(!result.did_revert()); - assert_eq!(result.data.as_ref(), &EXPECTED_COMPRESSED_PUBLIC_KEY); + assert_eq!(result.data, EXPECTED_COMPRESSED_PUBLIC_KEY); }) } @@ -3503,7 +3502,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.result.flags, flags); - assert_eq!(result.result.data.0, buffer); + assert_eq!(result.result.data, buffer); assert!(!>::contains_key(result.account_id)); // Pass empty flags and therefore successfully instantiate the contract for later use. @@ -3539,7 +3538,7 @@ fn contract_reverted() { .result .unwrap(); assert_eq!(result.flags, flags); - assert_eq!(result.data.0, buffer); + assert_eq!(result.data, buffer); }); } @@ -3559,7 +3558,7 @@ fn code_rejected_error_works() { 0, GAS_LIMIT, None, - Code::Upload(Bytes(wasm)), + Code::Upload(wasm), vec![], vec![], true, diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index d8b4cd245356e..b341ae3bd155d 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -281,7 +281,7 @@ mod tests { }; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; - use sp_core::{Bytes, H256}; + use sp_core::H256; use sp_runtime::DispatchError; use std::{ borrow::BorrowMut, @@ -341,8 +341,8 @@ mod tests { } /// The call is mocked and just returns this hardcoded value. - fn call_return_data() -> Bytes { - Bytes(vec![0xDE, 0xAD, 0xBE, 0xEF]) + fn call_return_data() -> Vec { + vec![0xDE, 0xAD, 0xBE, 0xEF] } impl Default for MockExt { @@ -404,7 +404,7 @@ mod tests { }); Ok(( Contracts::::contract_address(&ALICE, &code_hash, salt), - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, + ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, )) } fn set_code_hash(&mut self, hash: CodeHash) -> Result<(), DispatchError> { @@ -804,7 +804,7 @@ mod tests { let mut mock_ext = MockExt::default(); let input = vec![0xff, 0x2a, 0x99, 0x88]; let result = execute(CODE, input.clone(), &mut mock_ext).unwrap(); - assert_eq!(result.data.0, input); + assert_eq!(result.data, input); assert_eq!( &mock_ext.calls, &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] @@ -907,15 +907,15 @@ mod tests { // value does not exist -> sentinel value returned let result = execute(CODE, [3u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value did exist -> success let result = execute(CODE, [1u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1,); // value did exist -> success (zero sized type) let result = execute(CODE, [2u8; 32].encode(), &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0,); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0,); } #[test] @@ -977,13 +977,13 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); // value exists let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); @@ -991,7 +991,7 @@ mod tests { let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // true as u32 returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // getter does not remove the value from storage assert_eq!(ext.storage.get(&[2u8; 19].to_vec()).unwrap(), &([] as [u8; 0])); } @@ -1234,7 +1234,7 @@ mod tests { let output = execute(CODE_ECDSA_TO_ETH_ADDRESS, vec![], MockExt::default()).unwrap(); assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x02; 20].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x02; 20].to_vec() } ); } @@ -1311,7 +1311,7 @@ mod tests { assert_eq!( output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } + ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() } ); } @@ -1630,10 +1630,7 @@ mod tests { fn return_from_start_fn() { let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1902,15 +1899,13 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes( - ( - array_bytes::hex2array_unchecked::<32>( - "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" - ), - 42u64, - ) - .encode() - ), + data: ( + array_bytes::hex2array_unchecked::<32>( + "000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F" + ), + 42u64, + ) + .encode() }, ); } @@ -2124,7 +2119,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes(array_bytes::hex2bytes_unchecked("445566778899")), + data: array_bytes::hex2bytes_unchecked("445566778899"), } ); assert!(!output.did_revert()); @@ -2143,7 +2138,7 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::REVERT, - data: Bytes(array_bytes::hex2bytes_unchecked("5566778899")), + data: array_bytes::hex2bytes_unchecked("5566778899"), } ); assert!(output.did_revert()); @@ -2306,7 +2301,7 @@ mod tests { let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); assert_eq!(*ext.runtime_calls.borrow(), vec![call]); // 0 = ReturnCode::Success - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); } #[test] @@ -2371,19 +2366,19 @@ mod tests { // value did not exist before -> sentinel returned let input = ([1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = ([1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = ([1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2442,19 +2437,19 @@ mod tests { // value did not exist before -> sentinel returned let input = (32, [1u8; 32], [42u8, 48]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[42u8, 48]); // value do exist -> length of old value returned let input = (32, [1u8; 32], [0u8; 0]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 2); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 2); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[0u8; 0]); // value do exist -> length of old value returned (test for zero sized val) let input = (32, [1u8; 32], [99u8]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); assert_eq!(ext.storage.get(&[1u8; 32].to_vec()).unwrap(), &[99u8]); } @@ -2527,7 +2522,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2535,21 +2530,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value exists (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); - assert_eq!(&result.data.0[4..], &([] as [u8; 0])); + assert_eq!(&result.data[4..], &([] as [u8; 0])); } #[test] @@ -2611,14 +2606,14 @@ mod tests { let input = (32, [3u8; 32]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[3u8; 32].to_vec()), None); // value did exist let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 1); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 1); // value cleared assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); @@ -2626,14 +2621,14 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // sentinel returned - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), crate::SENTINEL); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), crate::SENTINEL); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); // value exists let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); // length returned (test for 0 sized) - assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); + assert_eq!(u32::from_le_bytes(result.data.try_into().unwrap()), 0); // value cleared assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); } @@ -2710,7 +2705,7 @@ mod tests { let input = (63, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::KeyNotFound as u32 ); @@ -2718,21 +2713,21 @@ mod tests { let input = (64, [1u8; 64]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); - assert_eq!(&result.data.0[4..], &[42u8]); + assert_eq!(&result.data[4..], &[42u8]); // value did exist -> length returned (test for 0 sized) let input = (19, [2u8; 19]).encode(); let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( - u32::from_le_bytes(result.data.0[0..4].try_into().unwrap()), + u32::from_le_bytes(result.data[0..4].try_into().unwrap()), ReturnCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); - assert_eq!(&result.data.0[4..], &[0u8; 0]); + assert_eq!(&result.data[4..], &[0u8; 0]); } #[test] @@ -2769,10 +2764,7 @@ mod tests { let output = execute(CODE_IS_CONTRACT, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 1u32 (`true`). - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(1u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 1u32.encode() },); } #[test] @@ -2906,10 +2898,7 @@ mod tests { let output = execute(CODE_CALLER_IS_ORIGIN, vec![], MockExt::default()).unwrap(); // The mock ext just always returns 0u32 (`false`) - assert_eq!( - output, - ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(0u32.encode()) }, - ); + assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: 0u32.encode() },); } #[test] diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index edd413aa45bf0..3296492994071 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -30,7 +30,7 @@ use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; -use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_core::crypto::UncheckedFrom; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, Zero}; use sp_sandbox::SandboxMemory; @@ -483,10 +483,10 @@ where TrapReason::Return(ReturnData { flags, data }) => { let flags = ReturnFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?; - Ok(ExecReturnValue { flags, data: Bytes(data) }) + Ok(ExecReturnValue { flags, data }) }, TrapReason::Termination => - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), TrapReason::SupervisorError(error) => return Err(error.into()), } } @@ -494,7 +494,7 @@ where // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -879,7 +879,7 @@ where if let Ok(return_value) = call_outcome { return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), - data: return_value.data.0, + data: return_value.data, })) } } From 9472af8e2af41b47a471c084035bf8aecf61d8da Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 3 Oct 2022 16:00:57 +0300 Subject: [PATCH 197/348] Beefy on-demand justifications as a custom RequestResponse protocol (#12124) * client/beefy: create communication module and move gossip there * client/beefy: move beefy_protocol_name module to communication * client/beefy: move notification module under communication * client/beefy: add incoming request_response protocol handler * client/beefy: keep track of connected peers and their progress * client/beefy: add logic for generating Justif requests * client/beefy: cancel outdated on-demand justification requests * try Andre's suggestion for JustificationEngine * justif engine add justifs validation * client/beefy: impl OnDemandJustificationsEngine async next() * move beefy proto name test * client/beefy: initialize OnDemandJustificationsEngine * client/tests: allow for custom req-resp protocols * client/beefy: on-demand-justif: implement simple peer selection strategy * client/beefy: fix voter initialization Fix corner case where voter gets a single burst of finality notifications just when it starts. The notification stream was consumed by "wait_for_pallet" logic, then main loop would subscribe to finality notifications, but by that time some notifications might've been lost. Fix this by subscribing the main loop to notifications before waiting for pallet to become available. Share the same stream with the main loop so that notifications for blocks before pallet available are ignored, while _all_ notifications after pallet available are processed. Add regression test for this. Signed-off-by: acatangiu * client/beefy: make sure justif requests are always out for mandatory blocks * client/beefy: add test for on-demand justifications sync * client/beefy: tweak main loop event processing order * client/beefy: run on-demand-justif-handler under same async task as voter * client/beefy: add test for known-peers * client/beefy: reorg request-response module * client/beefy: add issue references for future work todos * client/beefy: consolidate on-demand-justifications engine state machine Signed-off-by: acatangiu * client/beefy: fix for polkadot companion * client/beefy: implement review suggestions * cargo fmt and clippy * fix merge damage * fix rust-doc * fix merge damage * fix merge damage * client/beefy: add test for justif proto name Signed-off-by: acatangiu --- client/beefy/rpc/src/lib.rs | 6 +- .../beefy/src/{ => communication}/gossip.rs | 20 +- client/beefy/src/communication/mod.rs | 118 +++++++ .../src/{ => communication}/notification.rs | 0 client/beefy/src/communication/peers.rs | 131 ++++++++ .../incoming_requests_handler.rs | 193 ++++++++++++ .../src/communication/request_response/mod.rs | 101 ++++++ .../outgoing_requests_engine.rs | 245 +++++++++++++++ client/beefy/src/import.rs | 2 +- client/beefy/src/lib.rs | 131 ++++---- client/beefy/src/round.rs | 20 +- client/beefy/src/tests.rs | 287 ++++++++++++------ client/beefy/src/worker.rs | 166 +++++++--- client/network/test/src/lib.rs | 7 +- 14 files changed, 1208 insertions(+), 219 deletions(-) rename client/beefy/src/{ => communication}/gossip.rs (94%) create mode 100644 client/beefy/src/communication/mod.rs rename client/beefy/src/{ => communication}/notification.rs (100%) create mode 100644 client/beefy/src/communication/peers.rs create mode 100644 client/beefy/src/communication/request_response/incoming_requests_handler.rs create mode 100644 client/beefy/src/communication/request_response/mod.rs create mode 100644 client/beefy/src/communication/request_response/outgoing_requests_engine.rs diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 3be182ceb8f39..0af474116e6d0 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -35,7 +35,9 @@ use jsonrpsee::{ }; use log::warn; -use beefy_gadget::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}; +use beefy_gadget::communication::notification::{ + BeefyBestBlockStream, BeefyVersionedFinalityProofStream, +}; mod notification; @@ -165,8 +167,8 @@ mod tests { use super::*; use beefy_gadget::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofSender}, }; use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; use codec::{Decode, Encode}; diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/communication/gossip.rs similarity index 94% rename from client/beefy/src/gossip.rs rename to client/beefy/src/communication/gossip.rs index 02d5efe9e0e58..6c41a2e48932a 100644 --- a/client/beefy/src/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{collections::BTreeMap, time::Duration}; +use std::{collections::BTreeMap, sync::Arc, time::Duration}; use sc_network::PeerId; use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; @@ -28,13 +28,12 @@ use log::{debug, trace}; use parking_lot::{Mutex, RwLock}; use wasm_timer::Instant; +use crate::{communication::peers::KnownPeers, keystore::BeefyKeystore}; use beefy_primitives::{ crypto::{Public, Signature}, VoteMessage, }; -use crate::keystore::BeefyKeystore; - // Timeout for rebroadcasting messages. const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -103,17 +102,19 @@ where topic: B::Hash, known_votes: RwLock>, next_rebroadcast: Mutex, + known_peers: Arc>>, } impl GossipValidator where B: Block, { - pub fn new() -> GossipValidator { + pub fn new(known_peers: Arc>>) -> GossipValidator { GossipValidator { topic: topic::(), known_votes: RwLock::new(KnownVotes::new()), next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), + known_peers, } } @@ -165,6 +166,7 @@ where if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { self.known_votes.write().add_known(&round, msg_hash); + self.known_peers.lock().note_vote_for(*sender, round); return ValidationResult::ProcessAndKeep(self.topic) } else { // TODO: report peer @@ -271,7 +273,7 @@ mod tests { #[test] fn note_and_drop_round_works() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(1u64); @@ -298,7 +300,7 @@ mod tests { #[test] fn note_same_round_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.note_round(3u64); gv.note_round(7u64); @@ -355,7 +357,7 @@ mod tests { #[test] fn should_avoid_verifying_signatures_twice() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let mut context = TestContext; @@ -391,7 +393,7 @@ mod tests { #[test] fn messages_allowed_and_expired() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); let intent = MessageIntent::Broadcast; @@ -434,7 +436,7 @@ mod tests { #[test] fn messages_rebroadcast() { - let gv = GossipValidator::::new(); + let gv = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = sc_network::PeerId::random(); let topic = Default::default(); diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs new file mode 100644 index 0000000000000..93646677c0ecd --- /dev/null +++ b/client/beefy/src/communication/mod.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Communication streams for the BEEFY networking protocols. + +pub mod notification; +pub mod request_response; + +pub(crate) mod gossip; +pub(crate) mod peers; + +pub(crate) mod beefy_protocol_name { + use array_bytes::bytes2hex; + use sc_network::ProtocolName; + + /// BEEFY votes gossip protocol name suffix. + const GOSSIP_NAME: &str = "/beefy/1"; + /// BEEFY justifications protocol name suffix. + const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; + + /// Old names for the gossip protocol, used for backward compatibility. + pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; + + /// Name of the votes gossip protocol used by BEEFY. + /// + /// Must be registered towards the networking in order for BEEFY voter to properly function. + pub fn gossip_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, GOSSIP_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), GOSSIP_NAME).into() + } + } + + /// Name of the BEEFY justifications request-response protocol. + pub fn justifications_protocol_name>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> ProtocolName { + let genesis_hash = genesis_hash.as_ref(); + if let Some(fork_id) = fork_id { + format!("/{}/{}{}", bytes2hex("", genesis_hash), fork_id, JUSTIFICATIONS_NAME).into() + } else { + format!("/{}{}", bytes2hex("", genesis_hash), JUSTIFICATIONS_NAME).into() + } + } +} + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +/// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. +pub fn beefy_peers_set_config( + gossip_protocol_name: sc_network::ProtocolName, +) -> sc_network_common::config::NonDefaultSetConfig { + let mut cfg = + sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); + + cfg.allow_non_reserved(25, 25); + cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); + cfg +} + +#[cfg(test)] +mod tests { + use super::*; + + use sp_core::H256; + + #[test] + fn beefy_protocols_names() { + use beefy_protocol_name::{gossip_protocol_name, justifications_protocol_name}; + // Create protocol name using random genesis hash. + let genesis_hash = H256::random(); + let genesis_hex = array_bytes::bytes2hex("", genesis_hash.as_ref()); + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + + // Create protocol name using hardcoded genesis hash. Verify exact representation. + let genesis_hash = [ + 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, + 94, 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, + ]; + let genesis_hex = "32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93"; + + let expected_gossip_name = format!("/{}/beefy/1", genesis_hex); + let gossip_proto_name = gossip_protocol_name(&genesis_hash, None); + assert_eq!(gossip_proto_name.to_string(), expected_gossip_name); + + let expected_justif_name = format!("/{}/beefy/justifications/1", genesis_hex); + let justif_proto_name = justifications_protocol_name(&genesis_hash, None); + assert_eq!(justif_proto_name.to_string(), expected_justif_name); + } +} diff --git a/client/beefy/src/notification.rs b/client/beefy/src/communication/notification.rs similarity index 100% rename from client/beefy/src/notification.rs rename to client/beefy/src/communication/notification.rs diff --git a/client/beefy/src/communication/peers.rs b/client/beefy/src/communication/peers.rs new file mode 100644 index 0000000000000..0e20a0f4e0ff6 --- /dev/null +++ b/client/beefy/src/communication/peers.rs @@ -0,0 +1,131 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Logic for keeping track of BEEFY peers. + +// TODO (issue #12296): replace this naive peer tracking with generic one that infers data +// from multiple network protocols. + +use sc_network::PeerId; +use sp_runtime::traits::{Block, NumberFor, Zero}; +use std::collections::{HashMap, VecDeque}; + +struct PeerData { + last_voted_on: NumberFor, +} + +impl Default for PeerData { + fn default() -> Self { + PeerData { last_voted_on: Zero::zero() } + } +} + +/// Keep a simple map of connected peers +/// and the most recent voting round they participated in. +pub struct KnownPeers { + live: HashMap>, +} + +impl KnownPeers { + pub fn new() -> Self { + Self { live: HashMap::new() } + } + + /// Add new connected `peer`. + pub fn add_new(&mut self, peer: PeerId) { + self.live.entry(peer).or_default(); + } + + /// Note vote round number for `peer`. + pub fn note_vote_for(&mut self, peer: PeerId, round: NumberFor) { + let data = self.live.entry(peer).or_default(); + data.last_voted_on = round.max(data.last_voted_on); + } + + /// Remove connected `peer`. + pub fn remove(&mut self, peer: &PeerId) { + self.live.remove(peer); + } + + /// Return _filtered and cloned_ list of peers that have voted on `block` or higher. + pub fn at_least_at_block(&self, block: NumberFor) -> VecDeque { + self.live + .iter() + .filter_map(|(k, v)| (v.last_voted_on >= block).then_some(k)) + .cloned() + .collect() + } + + /// Answer whether `peer` is part of `KnownPeers` set. + pub fn contains(&self, peer: &PeerId) -> bool { + self.live.contains_key(peer) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_track_known_peers_progress() { + let (alice, bob, charlie) = (PeerId::random(), PeerId::random(), PeerId::random()); + let mut peers = KnownPeers::::new(); + assert!(peers.live.is_empty()); + + // Alice and Bob new connected peers. + peers.add_new(alice); + peers.add_new(bob); + // 'Tracked' Bob seen voting for 5. + peers.note_vote_for(bob, 5); + // Previously unseen Charlie now seen voting for 10. + peers.note_vote_for(charlie, 10); + + assert_eq!(peers.live.len(), 3); + assert!(peers.contains(&alice)); + assert!(peers.contains(&bob)); + assert!(peers.contains(&charlie)); + + // Get peers at block >= 5 + let at_5 = peers.at_least_at_block(5); + // Should be Bob and Charlie + assert_eq!(at_5.len(), 2); + assert!(at_5.contains(&bob)); + assert!(at_5.contains(&charlie)); + + // 'Tracked' Alice seen voting for 10. + peers.note_vote_for(alice, 10); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Should be Charlie and Alice + assert_eq!(at_9.len(), 2); + assert!(at_9.contains(&charlie)); + assert!(at_9.contains(&alice)); + + // Remove Alice + peers.remove(&alice); + assert_eq!(peers.live.len(), 2); + assert!(!peers.contains(&alice)); + + // Get peers at block >= 9 + let at_9 = peers.at_least_at_block(9); + // Now should be just Charlie + assert_eq!(at_9.len(), 1); + assert!(at_9.contains(&charlie)); + } +} diff --git a/client/beefy/src/communication/request_response/incoming_requests_handler.rs b/client/beefy/src/communication/request_response/incoming_requests_handler.rs new file mode 100644 index 0000000000000..c0910a60fba3b --- /dev/null +++ b/client/beefy/src/communication/request_response/incoming_requests_handler.rs @@ -0,0 +1,193 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) BEEFY justifications requests from a remote peer. + +use beefy_primitives::BEEFY_ENGINE_ID; +use codec::Decode; +use futures::{ + channel::{mpsc, oneshot}, + StreamExt, +}; +use log::{debug, trace}; +use sc_client_api::BlockBackend; +use sc_network::{config as netconfig, config::RequestResponseConfig, PeerId, ReputationChange}; +use sc_network_common::protocol::ProtocolName; +use sp_runtime::{generic::BlockId, traits::Block}; +use std::{marker::PhantomData, sync::Arc}; + +use crate::communication::request_response::{ + on_demand_justifications_protocol_config, Error, JustificationRequest, +}; + +/// A request coming in, including a sender for sending responses. +#[derive(Debug)] +pub(crate) struct IncomingRequest { + /// `PeerId` of sending peer. + pub peer: PeerId, + /// The sent request. + pub payload: JustificationRequest, + /// Sender for sending response back. + pub pending_response: oneshot::Sender, +} + +impl IncomingRequest { + /// Create new `IncomingRequest`. + pub fn new( + peer: PeerId, + payload: JustificationRequest, + pending_response: oneshot::Sender, + ) -> Self { + Self { peer, payload, pending_response } + } + + /// Try building from raw network request. + /// + /// This function will fail if the request cannot be decoded and will apply passed in + /// reputation changes in that case. + /// + /// Params: + /// - The raw request to decode + /// - Reputation changes to apply for the peer in case decoding fails. + pub fn try_from_raw( + raw: netconfig::IncomingRequest, + reputation_changes: Vec, + ) -> Result { + let netconfig::IncomingRequest { payload, peer, pending_response } = raw; + let payload = match JustificationRequest::decode(&mut payload.as_ref()) { + Ok(payload) => payload, + Err(err) => { + let response = netconfig::OutgoingResponse { + result: Err(()), + reputation_changes, + sent_feedback: None, + }; + if let Err(_) = pending_response.send(response) { + return Err(Error::DecodingErrorNoReputationChange(peer, err)) + } + return Err(Error::DecodingError(peer, err)) + }, + }; + Ok(Self::new(peer, payload, pending_response)) + } +} + +/// Receiver for incoming BEEFY justifications requests. +/// +/// Takes care of decoding and handling of invalid encoded requests. +pub(crate) struct IncomingRequestReceiver { + raw: mpsc::Receiver, +} + +impl IncomingRequestReceiver { + pub fn new(inner: mpsc::Receiver) -> Self { + Self { raw: inner } + } + + /// Try to receive the next incoming request. + /// + /// Any received request will be decoded, on decoding errors the provided reputation changes + /// will be applied and an error will be reported. + pub async fn recv(&mut self, reputation_changes: F) -> Result, Error> + where + B: Block, + F: FnOnce() -> Vec, + { + let req = match self.raw.next().await { + None => return Err(Error::RequestChannelExhausted), + Some(raw) => IncomingRequest::::try_from_raw(raw, reputation_changes())?, + }; + Ok(req) + } +} + +/// Handler for incoming BEEFY justifications requests from a remote peer. +pub struct BeefyJustifsRequestHandler { + pub(crate) request_receiver: IncomingRequestReceiver, + pub(crate) justif_protocol_name: ProtocolName, + pub(crate) client: Arc, + pub(crate) _block: PhantomData, +} + +impl BeefyJustifsRequestHandler +where + B: Block, + Client: BlockBackend + Send + Sync, +{ + /// Create a new [`BeefyJustifsRequestHandler`]. + pub fn new>( + genesis_hash: Hash, + fork_id: Option<&str>, + client: Arc, + ) -> (Self, RequestResponseConfig) { + let (request_receiver, config) = + on_demand_justifications_protocol_config(genesis_hash, fork_id); + let justif_protocol_name = config.name.clone(); + + (Self { request_receiver, justif_protocol_name, client, _block: PhantomData }, config) + } + + /// Network request-response protocol name used by this handler. + pub fn protocol_name(&self) -> ProtocolName { + self.justif_protocol_name.clone() + } + + // Sends back justification response if justification found in client backend. + fn handle_request(&self, request: IncomingRequest) -> Result<(), Error> { + // TODO (issue #12293): validate `request` and change peer reputation for invalid requests. + + let maybe_encoded_proof = self + .client + .justifications(&BlockId::Number(request.payload.begin)) + .map_err(Error::Client)? + .and_then(|justifs| justifs.get(BEEFY_ENGINE_ID).cloned()) + // No BEEFY justification present. + .ok_or(()); + + request + .pending_response + .send(netconfig::OutgoingResponse { + result: maybe_encoded_proof, + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .map_err(|_| Error::SendResponse) + } + + /// Run [`BeefyJustifsRequestHandler`]. + pub async fn run(mut self) { + trace!(target: "beefy::sync", "🥩 Running BeefyJustifsRequestHandler"); + + while let Ok(request) = self.request_receiver.recv(|| vec![]).await { + let peer = request.peer; + match self.handle_request(request) { + Ok(()) => { + debug!( + target: "beefy::sync", + "🥩 Handled BEEFY justification request from {:?}.", peer + ) + }, + Err(e) => { + // TODO (issue #12293): apply reputation changes here based on error type. + debug!( + target: "beefy::sync", + "🥩 Failed to handle BEEFY justification request from {:?}: {}", peer, e, + ) + }, + } + } + } +} diff --git a/client/beefy/src/communication/request_response/mod.rs b/client/beefy/src/communication/request_response/mod.rs new file mode 100644 index 0000000000000..c83bb9d57e91b --- /dev/null +++ b/client/beefy/src/communication/request_response/mod.rs @@ -0,0 +1,101 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Request/response protocol for syncing BEEFY justifications. + +mod incoming_requests_handler; +pub(crate) mod outgoing_requests_engine; + +pub use incoming_requests_handler::BeefyJustifsRequestHandler; + +use futures::channel::mpsc; +use std::time::Duration; + +use codec::{Decode, Encode, Error as CodecError}; +use sc_network::{config::RequestResponseConfig, PeerId}; +use sp_runtime::traits::{Block, NumberFor}; + +use crate::communication::beefy_protocol_name::justifications_protocol_name; +use incoming_requests_handler::IncomingRequestReceiver; + +// 10 seems reasonable, considering justifs are explicitly requested only +// for mandatory blocks, by nodes that are syncing/catching-up. +const JUSTIF_CHANNEL_SIZE: usize = 10; + +const MAX_RESPONSE_SIZE: u64 = 1024 * 1024; +const JUSTIF_REQUEST_TIMEOUT: Duration = Duration::from_secs(3); + +/// Get the configuration for the BEEFY justifications Request/response protocol. +/// +/// Returns a receiver for messages received on this protocol and the requested +/// `ProtocolConfig`. +/// +/// Consider using [`BeefyJustifsRequestHandler`] instead of this low-level function. +pub(crate) fn on_demand_justifications_protocol_config>( + genesis_hash: Hash, + fork_id: Option<&str>, +) -> (IncomingRequestReceiver, RequestResponseConfig) { + let name = justifications_protocol_name(genesis_hash, fork_id); + let fallback_names = vec![]; + let (tx, rx) = mpsc::channel(JUSTIF_CHANNEL_SIZE); + let rx = IncomingRequestReceiver::new(rx); + let cfg = RequestResponseConfig { + name, + fallback_names, + max_request_size: 32, + max_response_size: MAX_RESPONSE_SIZE, + // We are connected to all validators: + request_timeout: JUSTIF_REQUEST_TIMEOUT, + inbound_queue: Some(tx), + }; + (rx, cfg) +} + +/// BEEFY justification request. +#[derive(Debug, Clone, Encode, Decode)] +pub struct JustificationRequest { + /// Start collecting proofs from this block. + pub begin: NumberFor, +} + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + Client(#[from] sp_blockchain::Error), + + #[error(transparent)] + RuntimeApi(#[from] sp_api::ApiError), + + /// Decoding failed, we were able to change the peer's reputation accordingly. + #[error("Decoding request failed for peer {0}.")] + DecodingError(PeerId, #[source] CodecError), + + /// Decoding failed, but sending reputation change failed. + #[error("Decoding request failed for peer {0}, and changing reputation failed.")] + DecodingErrorNoReputationChange(PeerId, #[source] CodecError), + + /// Incoming request stream exhausted. Should only happen on shutdown. + #[error("Incoming request channel got closed.")] + RequestChannelExhausted, + + #[error("Failed to send response.")] + SendResponse, + + #[error("Received invalid response.")] + InvalidResponse, +} diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs new file mode 100644 index 0000000000000..e22958e19cd2e --- /dev/null +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Generating request logic for request/response protocol for syncing BEEFY justifications. + +use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; +use codec::Encode; +use futures::{ + channel::{oneshot, oneshot::Canceled}, + stream::{self, StreamExt}, +}; +use log::{debug, error, warn}; +use parking_lot::Mutex; +use sc_network::{PeerId, ProtocolName}; +use sc_network_common::{ + request_responses::{IfDisconnected, RequestFailure}, + service::NetworkRequest, +}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::{ + generic::BlockId, + traits::{Block, NumberFor}, +}; +use std::{collections::VecDeque, result::Result, sync::Arc}; + +use crate::{ + communication::request_response::{Error, JustificationRequest}, + justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, + KnownPeers, +}; + +/// Response type received from network. +type Response = Result, RequestFailure>; +/// Used to receive a response from the network. +type ResponseReceiver = oneshot::Receiver; + +enum State { + Idle(stream::Pending>), + AwaitingResponse(PeerId, NumberFor, stream::Once), +} + +pub struct OnDemandJustificationsEngine { + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + + live_peers: Arc>>, + peers_cache: VecDeque, + + state: State, +} + +impl OnDemandJustificationsEngine +where + B: Block, + R: ProvideRuntimeApi, + R::Api: BeefyApi, +{ + pub fn new( + network: Arc, + runtime: Arc, + protocol_name: ProtocolName, + live_peers: Arc>>, + ) -> Self { + Self { + network, + runtime, + protocol_name, + live_peers, + peers_cache: VecDeque::new(), + state: State::Idle(stream::pending()), + } + } + + fn reset_peers_cache_for_block(&mut self, block: NumberFor) { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + self.peers_cache = self.live_peers.lock().at_least_at_block(block); + } + + fn try_next_peer(&mut self) -> Option { + // TODO (issue #12296): replace peer selection with generic one that involves all protocols. + let live = self.live_peers.lock(); + while let Some(peer) = self.peers_cache.pop_front() { + if live.contains(&peer) { + return Some(peer) + } + } + None + } + + fn request_from_peer(&mut self, peer: PeerId, block: NumberFor) { + debug!(target: "beefy::sync", "🥩 requesting justif #{:?} from peer {:?}", block, peer); + + let payload = JustificationRequest:: { begin: block }.encode(); + + let (tx, rx) = oneshot::channel(); + + self.network.start_request( + peer, + self.protocol_name.clone(), + payload, + tx, + IfDisconnected::ImmediateError, + ); + + self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + } + + /// If no other request is in progress, start new justification request for `block`. + pub fn request(&mut self, block: NumberFor) { + // ignore new requests while there's already one pending + match &self.state { + State::AwaitingResponse(_, _, _) => return, + State::Idle(_) => (), + } + self.reset_peers_cache_for_block(block); + + // Start the requests engine - each unsuccessful received response will automatically + // trigger a new request to the next peer in the `peers_cache` until there are none left. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + debug!(target: "beefy::sync", "🥩 no good peers to request justif #{:?} from", block); + } + } + + /// Cancel any pending request for block numbers smaller or equal to `block`. + pub fn cancel_requests_older_than(&mut self, block: NumberFor) { + match &self.state { + State::AwaitingResponse(_, number, _) if *number <= block => { + debug!( + target: "beefy::sync", + "🥩 cancel pending request for justification #{:?}", + number + ); + self.state = State::Idle(stream::pending()); + }, + _ => (), + } + } + + fn process_response( + &mut self, + peer: PeerId, + block: NumberFor, + validator_set: &ValidatorSet, + response: Result, + ) -> Result, Error> { + response + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} hung up: {:?}", + block, peer, e + ); + Error::InvalidResponse + })? + .map_err(|e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} error: {:?}", + block, peer, e + ); + Error::InvalidResponse + }) + .and_then(|encoded| { + decode_and_verify_finality_proof::(&encoded[..], block, &validator_set).map_err( + |e| { + debug!( + target: "beefy::sync", + "🥩 for on demand justification #{:?}, peer {:?} responded with invalid proof: {:?}", + block, peer, e + ); + Error::InvalidResponse + }, + ) + }) + } + + pub async fn next(&mut self) -> Option> { + let (peer, block, resp) = match &mut self.state { + State::Idle(pending) => { + let _ = pending.next().await; + // This never happens since 'stream::pending' never generates any items. + return None + }, + State::AwaitingResponse(peer, block, receiver) => { + let resp = receiver.next().await?; + (*peer, *block, resp) + }, + }; + // We received the awaited response. Our 'stream::once()' receiver will never generate any + // other response, meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle(stream::pending()); + + let block_id = BlockId::number(block); + let validator_set = self + .runtime + .runtime_api() + .validator_set(&block_id) + .map_err(|e| { + error!(target: "beefy::sync", "🥩 Runtime API error {:?} in on-demand justif engine.", e); + e + }) + .ok()? + .or_else(|| { + error!(target: "beefy::sync", "🥩 BEEFY pallet not available for block {:?}.", block); + None + })?; + + self.process_response(peer, block, &validator_set, resp) + .map_err(|_| { + // No valid justification received, try next peer in our set. + if let Some(peer) = self.try_next_peer() { + self.request_from_peer(peer, block); + } else { + warn!(target: "beefy::sync", "🥩 ran out of peers to request justif #{:?} from", block); + } + }) + .map(|proof| { + debug!( + target: "beefy::sync", + "🥩 received valid on-demand justif #{:?} from {:?}", + block, peer + ); + proof + }) + .ok() + } +} diff --git a/client/beefy/src/import.rs b/client/beefy/src/import.rs index db4d8bfba7450..89a4517334189 100644 --- a/client/beefy/src/import.rs +++ b/client/beefy/src/import.rs @@ -33,8 +33,8 @@ use sc_client_api::backend::Backend; use sc_consensus::{BlockCheckParams, BlockImport, BlockImportParams, ImportResult}; use crate::{ + communication::notification::BeefyVersionedFinalityProofSender, justification::{decode_and_verify_finality_proof, BeefyVersionedFinalityProof}, - notification::BeefyVersionedFinalityProofSender, }; /// A block-import handler for BEEFY. diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index ad527b2929585..7407f101e99a5 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -17,10 +17,12 @@ // along with this program. If not, see . use beefy_primitives::{BeefyApi, MmrRootHash}; +use parking_lot::Mutex; use prometheus::Registry; -use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; use sc_consensus::BlockImport; use sc_network::ProtocolName; +use sc_network_common::service::NetworkRequest; use sc_network_gossip::Network as GossipNetwork; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -28,68 +30,38 @@ use sp_consensus::{Error as ConsensusError, SyncOracle}; use sp_keystore::SyncCryptoStorePtr; use sp_mmr_primitives::MmrApi; use sp_runtime::traits::Block; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; mod error; -mod gossip; mod keystore; mod metrics; mod round; mod worker; +pub mod communication; pub mod import; pub mod justification; -pub mod notification; #[cfg(test)] mod tests; use crate::{ - import::BeefyBlockImport, - notification::{ - BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, - BeefyVersionedFinalityProofStream, + communication::{ + notification::{ + BeefyBestBlockSender, BeefyBestBlockStream, BeefyVersionedFinalityProofSender, + BeefyVersionedFinalityProofStream, + }, + peers::KnownPeers, + request_response::{ + outgoing_requests_engine::OnDemandJustificationsEngine, BeefyJustifsRequestHandler, + }, }, + import::BeefyBlockImport, }; -pub use beefy_protocol_name::standard_name as protocol_standard_name; - -pub(crate) mod beefy_protocol_name { - use sc_chain_spec::ChainSpec; - use sc_network::ProtocolName; - - const NAME: &str = "/beefy/1"; - /// Old names for the notifications protocol, used for backward compatibility. - pub(crate) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - - /// Name of the notifications protocol used by BEEFY. - /// - /// Must be registered towards the networking in order for BEEFY to properly function. - pub fn standard_name>( - genesis_hash: &Hash, - chain_spec: &Box, - ) -> ProtocolName { - let genesis_hash = genesis_hash.as_ref(); - let chain_prefix = match chain_spec.fork_id() { - Some(fork_id) => format!("/{}/{}", array_bytes::bytes2hex("", genesis_hash), fork_id), - None => format!("/{}", array_bytes::bytes2hex("", genesis_hash)), - }; - format!("{}{}", chain_prefix, NAME).into() - } -} - -/// Returns the configuration value to put in -/// [`sc_network::config::NetworkConfiguration::extra_sets`]. -/// For standard protocol name see [`beefy_protocol_name::standard_name`]. -pub fn beefy_peers_set_config( - protocol_name: ProtocolName, -) -> sc_network_common::config::NonDefaultSetConfig { - let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(protocol_name, 1024 * 1024); - - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); - cfg -} +pub use communication::beefy_protocol_name::{ + gossip_protocol_name, justifications_protocol_name as justifs_protocol_name, +}; /// A convenience BEEFY client trait that defines all the type bounds a BEEFY client /// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as @@ -159,13 +131,13 @@ where { // Voter -> RPC links let (to_rpc_justif_sender, from_voter_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); let (to_rpc_best_block_sender, from_voter_best_beefy_stream) = - notification::BeefyBestBlockStream::::channel(); + BeefyBestBlockStream::::channel(); // BlockImport -> Voter links let (to_voter_justif_sender, from_block_import_justif_stream) = - notification::BeefyVersionedFinalityProofStream::::channel(); + BeefyVersionedFinalityProofStream::::channel(); // BlockImport let import = @@ -180,6 +152,24 @@ where (import, voter_links, rpc_links) } +/// BEEFY gadget network parameters. +pub struct BeefyNetworkParams +where + B: Block, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, +{ + /// Network implementing gossip, requests and sync-oracle. + pub network: Arc, + /// Chain specific BEEFY gossip protocol name. See + /// [`communication::beefy_protocol_name::gossip_protocol_name`]. + pub gossip_protocol_name: ProtocolName, + /// Chain specific BEEFY on-demand justifications protocol name. See + /// [`communication::beefy_protocol_name::justifications_protocol_name`]. + pub justifications_protocol_name: ProtocolName, + + pub _phantom: PhantomData, +} + /// BEEFY gadget initialization parameters. pub struct BeefyParams where @@ -188,7 +178,7 @@ where C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { /// BEEFY client pub client: Arc, @@ -198,16 +188,16 @@ where pub runtime: Arc, /// Local key store pub key_store: Option, - /// Gossip network - pub network: N, + /// BEEFY voter network params + pub network_params: BeefyNetworkParams, /// Minimal delta between blocks, BEEFY should vote for pub min_block_delta: u32, /// Prometheus metric registry pub prometheus_registry: Option, - /// Chain specific GRANDPA protocol name. See [`beefy_protocol_name::standard_name`]. - pub protocol_name: ProtocolName, /// Links between the block importer, the background voter and the RPC layer. pub links: BeefyVoterLinks, + /// Handler for incoming BEEFY justifications requests from a remote peer. + pub on_demand_justifications_handler: BeefyJustifsRequestHandler, } /// Start the BEEFY gadget. @@ -217,32 +207,43 @@ pub async fn start_beefy_gadget(beefy_params: BeefyParams, - C: Client, + C: Client + BlockBackend, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - N: GossipNetwork + Clone + SyncOracle + Send + Sync + 'static, + N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, { let BeefyParams { client, backend, runtime, key_store, - network, + network_params, min_block_delta, prometheus_registry, - protocol_name, links, + on_demand_justifications_handler, } = beefy_params; - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(gossip::GossipValidator::new()); + let BeefyNetworkParams { network, gossip_protocol_name, justifications_protocol_name, .. } = + network_params; + + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = + Arc::new(communication::gossip::GossipValidator::new(known_peers.clone())); let gossip_engine = sc_network_gossip::GossipEngine::new( - network, - protocol_name, + network.clone(), + gossip_protocol_name, gossip_validator.clone(), None, ); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + runtime.clone(), + justifications_protocol_name, + known_peers.clone(), + ); + let metrics = prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( |result| match result { @@ -261,10 +262,12 @@ where client, backend, runtime, - sync_oracle, + network, key_store: key_store.into(), + known_peers, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, min_block_delta, @@ -272,5 +275,5 @@ where let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); - worker.run().await + futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; } diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs index c96613eb38a95..45d346ccd85eb 100644 --- a/client/beefy/src/round.rs +++ b/client/beefy/src/round.rs @@ -33,7 +33,7 @@ use sp_runtime::traits::{Block, NumberFor}; /// whether the local `self` validator has voted/signed. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). -#[derive(Default)] +#[derive(Debug, Default)] struct RoundTracker { self_vote: bool, votes: HashMap, @@ -69,6 +69,7 @@ pub fn threshold(authorities: usize) -> usize { /// Only round numbers > `best_done` are of interest, all others are considered stale. /// /// Does not do any validation on votes or signatures, layers above need to handle that (gossip). +#[derive(Debug)] pub(crate) struct Rounds { rounds: BTreeMap<(Payload, NumberFor), RoundTracker>, session_start: NumberFor, @@ -135,7 +136,7 @@ where } } - pub(crate) fn try_conclude( + pub(crate) fn should_conclude( &mut self, round: &(P, NumberFor), ) -> Option>> { @@ -148,7 +149,6 @@ where if done { let signatures = self.rounds.remove(round)?.votes; - self.conclude(round.1); Some( self.validators() .iter() @@ -279,7 +279,7 @@ mod tests { true )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // self vote already present, should not self vote assert!(!rounds.should_self_vote(&round)); @@ -296,7 +296,7 @@ mod tests { (Keyring::Dave.public(), Keyring::Dave.sign(b"I am committed")), false )); - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 2nd good vote assert!(rounds.add_vote( @@ -305,7 +305,7 @@ mod tests { false )); // round not concluded - assert!(rounds.try_conclude(&round).is_none()); + assert!(rounds.should_conclude(&round).is_none()); // add 3rd good vote assert!(rounds.add_vote( @@ -314,7 +314,8 @@ mod tests { false )); // round concluded - assert!(rounds.try_conclude(&round).is_some()); + assert!(rounds.should_conclude(&round).is_some()); + rounds.conclude(round.1); // Eve is a validator, but round was concluded, adding vote disallowed assert!(!rounds.add_vote( @@ -432,11 +433,12 @@ mod tests { assert_eq!(3, rounds.rounds.len()); // conclude unknown round - assert!(rounds.try_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); + assert!(rounds.should_conclude(&(H256::from_low_u64_le(5), 5)).is_none()); assert_eq!(3, rounds.rounds.len()); // conclude round 2 - let signatures = rounds.try_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + let signatures = rounds.should_conclude(&(H256::from_low_u64_le(2), 2)).unwrap(); + rounds.conclude(2); assert_eq!(1, rounds.rounds.len()); assert_eq!( diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 3e49f4e05cc91..8057bd7cab7a5 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -21,10 +21,9 @@ use futures::{future, stream::FuturesUnordered, Future, StreamExt}; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, sync::Arc, task::Poll}; +use std::{collections::HashMap, marker::PhantomData, sync::Arc, task::Poll}; use tokio::{runtime::Runtime, time::Duration}; -use sc_chain_spec::{ChainSpec, GenericChainSpec}; use sc_client_api::HeaderBackend; use sc_consensus::{ BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, @@ -33,7 +32,7 @@ use sc_consensus::{ use sc_keystore::LocalKeystore; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, PassThroughVerifier, Peer, PeersClient, - TestNetFactory, + PeersFullClient, TestNetFactory, }; use sc_utils::notification::NotificationReceiver; @@ -42,6 +41,7 @@ use beefy_primitives::{ BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; +use sc_network::{config::RequestResponseConfig, ProtocolName}; use sp_mmr_primitives::{ BatchProof, EncodableOpaqueLeaf, Error as MmrError, LeafIndex, MmrApi, Proof, }; @@ -60,11 +60,21 @@ use sp_runtime::{ use substrate_test_runtime_client::{runtime::Header, ClientExt}; use crate::{ - beefy_block_import_and_links, beefy_protocol_name, justification::*, - keystore::tests::Keyring as BeefyKeyring, BeefyRPCLinks, BeefyVoterLinks, + beefy_block_import_and_links, + communication::request_response::{ + on_demand_justifications_protocol_config, BeefyJustifsRequestHandler, + }, + gossip_protocol_name, + justification::*, + keystore::tests::Keyring as BeefyKeyring, + BeefyRPCLinks, BeefyVoterLinks, }; -pub(crate) const BEEFY_PROTOCOL_NAME: &'static str = "/beefy/1"; +const GENESIS_HASH: H256 = H256::zero(); +fn beefy_gossip_proto_name() -> ProtocolName { + gossip_protocol_name(GENESIS_HASH, None) +} + const GOOD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0xbf); const BAD_MMR_ROOT: MmrRootHash = MmrRootHash::repeat_byte(0x42); @@ -89,35 +99,12 @@ impl BuildStorage for Genesis { } } -#[test] -fn beefy_protocol_name() { - let chain_spec = GenericChainSpec::::from_json_bytes( - &include_bytes!("../../chain-spec/res/chain_spec.json")[..], - ) - .unwrap() - .cloned_box(); - - // Create protocol name using random genesis hash. - let genesis_hash = H256::random(); - let expected = format!("/{}/beefy/1", array_bytes::bytes2hex("", genesis_hash.as_ref())); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); - - // Create protocol name using hardcoded genesis hash. Verify exact representation. - let genesis_hash = [ - 50, 4, 60, 123, 58, 106, 216, 246, 194, 188, 139, 193, 33, 212, 202, 171, 9, 55, 123, 94, - 8, 43, 12, 251, 187, 57, 173, 19, 188, 74, 205, 147, - ]; - let expected = - "/32043c7b3a6ad8f6c2bc8bc121d4caab09377b5e082b0cfbbb39ad13bc4acd93/beefy/1".to_string(); - let proto_name = beefy_protocol_name::standard_name(&genesis_hash, &chain_spec); - assert_eq!(proto_name.to_string(), expected); -} - #[derive(Default)] pub(crate) struct PeerData { pub(crate) beefy_rpc_links: Mutex>>, pub(crate) beefy_voter_links: Mutex>>, + pub(crate) beefy_justif_req_handler: + Mutex>>, } #[derive(Default)] @@ -126,23 +113,34 @@ pub(crate) struct BeefyTestNet { } impl BeefyTestNet { - pub(crate) fn new(n_authority: usize, n_full: usize) -> Self { - let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority + n_full) }; - for _ in 0..n_authority { - net.add_authority_peer(); - } - for _ in 0..n_full { - net.add_full_peer(); + pub(crate) fn new(n_authority: usize) -> Self { + let mut net = BeefyTestNet { peers: Vec::with_capacity(n_authority) }; + + for i in 0..n_authority { + let (rx, cfg) = on_demand_justifications_protocol_config(GENESIS_HASH, None); + let justif_protocol_name = cfg.name.clone(); + + net.add_authority_peer(vec![cfg]); + + let client = net.peers[i].client().as_client(); + let justif_handler = BeefyJustifsRequestHandler { + request_receiver: rx, + justif_protocol_name, + client, + _block: PhantomData, + }; + *net.peers[i].data.beefy_justif_req_handler.lock() = Some(justif_handler); } net } - pub(crate) fn add_authority_peer(&mut self) { + pub(crate) fn add_authority_peer(&mut self, req_resp_cfgs: Vec) { self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], + notifications_protocols: vec![beefy_gossip_proto_name()], + request_response_protocols: req_resp_cfgs, is_authority: true, ..Default::default() - }) + }); } pub(crate) fn generate_blocks_and_sync( @@ -198,6 +196,7 @@ impl TestNetFactory for BeefyTestNet { let peer_data = PeerData { beefy_rpc_links: Mutex::new(Some(rpc_links)), beefy_voter_links: Mutex::new(Some(voter_links)), + ..Default::default() }; (BlockImportAdapter::new(block_import), None, peer_data) } @@ -215,11 +214,8 @@ impl TestNetFactory for BeefyTestNet { } fn add_full_peer(&mut self) { - self.add_full_peer_with_config(FullPeerConfig { - notifications_protocols: vec![BEEFY_PROTOCOL_NAME.into()], - is_authority: false, - ..Default::default() - }) + // `add_authority_peer()` used instead. + unimplemented!() } } @@ -354,7 +350,7 @@ where API: ProvideRuntimeApi + Default + Sync + Send, API::Api: BeefyApi + MmrApi, { - let voters = FuturesUnordered::new(); + let tasks = FuturesUnordered::new(); for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -362,31 +358,40 @@ where let keystore = create_beefy_keystore(*key); let (_, _, peer_data) = net.make_block_import(peer.client().clone()); - let PeerData { beefy_rpc_links, beefy_voter_links } = peer_data; + let PeerData { beefy_rpc_links, beefy_voter_links, .. } = peer_data; let beefy_voter_links = beefy_voter_links.lock().take(); *peer.data.beefy_rpc_links.lock() = beefy_rpc_links.lock().take(); *peer.data.beefy_voter_links.lock() = beefy_voter_links.clone(); + let on_demand_justif_handler = peer.data.beefy_justif_req_handler.lock().take().unwrap(); + + let network_params = crate::BeefyNetworkParams { + network: peer.network_service().clone(), + gossip_protocol_name: beefy_gossip_proto_name(), + justifications_protocol_name: on_demand_justif_handler.protocol_name(), + _phantom: PhantomData, + }; + let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api.clone(), key_store: Some(keystore), - network: peer.network_service().clone(), + network_params, links: beefy_voter_links.unwrap(), min_block_delta, prometheus_registry: None, - protocol_name: BEEFY_PROTOCOL_NAME.into(), + on_demand_justifications_handler: on_demand_justif_handler, }; - let gadget = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} - assert_send(&gadget); - voters.push(gadget); + assert_send(&task); + tasks.push(task); } - voters.for_each(|_| async move {}) + tasks.for_each(|_| async move {}) } fn block_until(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { @@ -404,18 +409,19 @@ fn run_for(duration: Duration, net: &Arc>, runtime: &mut Run pub(crate) fn get_beefy_streams( net: &mut BeefyTestNet, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator, ) -> (Vec>, Vec>>) { let mut best_block_streams = Vec::new(); let mut versioned_finality_proof_streams = Vec::new(); - for peer_id in 0..peers.len() { - let beefy_rpc_links = net.peer(peer_id).data.beefy_rpc_links.lock().clone().unwrap(); + peers.for_each(|(index, _)| { + let beefy_rpc_links = net.peer(index).data.beefy_rpc_links.lock().clone().unwrap(); let BeefyRPCLinks { from_voter_justif_stream, from_voter_best_beefy_stream } = beefy_rpc_links; best_block_streams.push(from_voter_best_beefy_stream.subscribe()); versioned_finality_proof_streams.push(from_voter_justif_stream.subscribe()); - } + }); (best_block_streams, versioned_finality_proof_streams) } @@ -493,18 +499,24 @@ fn streams_empty_after_timeout( fn finalize_block_and_wait_for_beefy( net: &Arc>, - peers: &[BeefyKeyring], + // peer index and key + peers: impl Iterator + Clone, runtime: &mut Runtime, finalize_targets: &[u64], expected_beefy: &[u64], ) { - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); for block in finalize_targets { let finalize = BlockId::number(*block); - for i in 0..peers.len() { - net.lock().peer(i).client().as_client().finalize_block(finalize, None).unwrap(); - } + peers.clone().for_each(|(index, _)| { + net.lock() + .peer(index) + .client() + .as_client() + .finalize_block(finalize, None) + .unwrap(); + }) } if expected_beefy.is_empty() { @@ -524,12 +536,12 @@ fn beefy_finalizing_blocks() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 10; let min_block_delta = 4; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); @@ -542,17 +554,18 @@ fn beefy_finalizing_blocks() { // Minimum BEEFY block delta is 4. + let peers = peers.into_iter().enumerate(); // finalize block #5 -> BEEFY should finalize #1 (mandatory) and #5 from diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[5], &[1, 5]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[5], &[1, 5]); // GRANDPA finalize #10 -> BEEFY finalize #10 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[10]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[10], &[10]); // GRANDPA finalize #18 -> BEEFY finalize #14, then #18 (diff-power-of-two rule) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[18], &[14, 18]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[18], &[14, 18]); // GRANDPA finalize #20 -> BEEFY finalize #20 (mandatory) - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[20], &[20]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[20], &[20]); // GRANDPA finalize #21 -> BEEFY finalize nothing (yet) because min delta is 4 finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[21], &[]); @@ -563,12 +576,12 @@ fn lagging_validators() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); @@ -578,13 +591,20 @@ fn lagging_validators() { let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // finalize block #15 -> BEEFY should finalize #1 (mandatory) and #9, #13, #14, #15 from // diff-power-of-two rule. - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[15], &[1, 9, 13, 14, 15]); + finalize_block_and_wait_for_beefy( + &net, + peers.clone(), + &mut runtime, + &[15], + &[1, 9, 13, 14, 15], + ); // Alice finalizes #25, Bob lags behind let finalize = BlockId::number(25); - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY let timeout = Some(Duration::from_millis(250)); @@ -592,21 +612,21 @@ fn lagging_validators() { streams_empty_after_timeout(versioned_finality_proof, &net, &mut runtime, None); // Bob catches up and also finalizes #25 - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); // expected beefy finalizes block #17 from diff-power-of-two wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[23, 24, 25]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[23, 24, 25]); // Both finalize #30 (mandatory session) and #32 -> BEEFY finalize #30 (mandatory), #31, #32 - finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[30, 32], &[30, 31, 32]); + finalize_block_and_wait_for_beefy(&net, peers.clone(), &mut runtime, &[30, 32], &[30, 31, 32]); // Verify that session-boundary votes get buffered by client and only processed once // session-boundary block is GRANDPA-finalized (this guarantees authenticity for the new session // validator set). // Alice finalizes session-boundary mandatory block #60, Bob lags behind - let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); + let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers.clone()); let finalize = BlockId::number(60); net.lock().peer(0).client().as_client().finalize_block(finalize, None).unwrap(); // verify nothing gets finalized by BEEFY @@ -617,7 +637,7 @@ fn lagging_validators() { // Bob catches up and also finalizes #60 (and should have buffered Alice's vote on #60) let (best_blocks, versioned_finality_proof) = get_beefy_streams(&mut net.lock(), peers); net.lock().peer(1).client().as_client().finalize_block(finalize, None).unwrap(); - // verify beefy skips intermediary votes, and successfully finalizes mandatory block #40 + // verify beefy skips intermediary votes, and successfully finalizes mandatory block #60 wait_for_best_beefy_blocks(best_blocks, &net, &mut runtime, &[60]); wait_for_beefy_signed_commitments(versioned_finality_proof, &net, &mut runtime, &[60]); } @@ -627,13 +647,12 @@ fn correct_beefy_payload() { sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); - let peers = - &[BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 20; let min_block_delta = 2; - let mut net = BeefyTestNet::new(4, 0); + let mut net = BeefyTestNet::new(4); // Alice, Bob, Charlie will vote on good payloads let good_api = Arc::new(four_validators::TestApi {}); @@ -649,15 +668,16 @@ fn correct_beefy_payload() { let bad_peers = vec![(3, &BeefyKeyring::Dave, bad_api)]; runtime.spawn(initialize_beefy(&mut net, bad_peers, min_block_delta)); - // push 10 blocks + // push 12 blocks net.generate_blocks_and_sync(12, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); + let peers = peers.into_iter().enumerate(); // with 3 good voters and 1 bad one, consensus should happen and best blocks produced. finalize_block_and_wait_for_beefy(&net, peers, &mut runtime, &[10], &[1, 9]); let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); // now 2 good validators and 1 bad one are voting net.lock() @@ -686,7 +706,7 @@ fn correct_beefy_payload() { // 3rd good validator catches up and votes as well let (best_blocks, versioned_finality_proof) = - get_beefy_streams(&mut net.lock(), &[BeefyKeyring::Alice]); + get_beefy_streams(&mut net.lock(), [(0, BeefyKeyring::Alice)].into_iter()); net.lock() .peer(2) .client() @@ -707,11 +727,11 @@ fn beefy_importing_blocks() { sp_tracing::try_init_simple(); - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let client = net.peer(0).client().clone(); let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); - let PeerData { beefy_rpc_links: _, beefy_voter_links } = peer_data; + let PeerData { beefy_voter_links, .. } = peer_data; let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; let params = |block: Block, justifications: Option| { @@ -826,18 +846,18 @@ fn voter_initialization() { // after waiting for BEEFY pallet availability. let mut runtime = Runtime::new().unwrap(); - let peers = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(peers), 0).unwrap(); + let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; + let validator_set = ValidatorSet::new(make_beefy_ids(&peers), 0).unwrap(); let session_len = 5; // Should vote on all mandatory blocks no matter the `min_block_delta`. let min_block_delta = 10; - let mut net = BeefyTestNet::new(2, 0); + let mut net = BeefyTestNet::new(2); let api = Arc::new(two_validators::TestApi {}); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); runtime.spawn(initialize_beefy(&mut net, beefy_peers, min_block_delta)); - // push 30 blocks + // push 26 blocks net.generate_blocks_and_sync(26, session_len, &validator_set, false); let net = Arc::new(Mutex::new(net)); @@ -846,9 +866,90 @@ fn voter_initialization() { // Expect voters to pick up all of them and BEEFY-finalize the mandatory blocks of each session. finalize_block_and_wait_for_beefy( &net, - peers, + peers.into_iter().enumerate(), &mut runtime, &[1, 6, 10, 17, 24, 26], &[1, 5, 10, 15, 20, 25], ); } + +#[test] +fn on_demand_beefy_justification_sync() { + sp_tracing::try_init_simple(); + + let mut runtime = Runtime::new().unwrap(); + let all_peers = + [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; + let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), 0).unwrap(); + let session_len = 5; + let min_block_delta = 5; + + let mut net = BeefyTestNet::new(4); + + // Alice, Bob, Charlie start first and make progress through voting. + let api = Arc::new(four_validators::TestApi {}); + let fast_peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; + let voting_peers = + fast_peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); + runtime.spawn(initialize_beefy(&mut net, voting_peers, min_block_delta)); + + // Dave will start late and have to catch up using on-demand justification requests (since + // in this test there is no block import queue to automatically import justifications). + let dave = vec![(3, &BeefyKeyring::Dave, api)]; + // Instantiate but don't run Dave, yet. + let dave_task = initialize_beefy(&mut net, dave, min_block_delta); + let dave_index = 3; + + // push 30 blocks + net.generate_blocks_and_sync(30, session_len, &validator_set, false); + + let fast_peers = fast_peers.into_iter().enumerate(); + let net = Arc::new(Mutex::new(net)); + // With 3 active voters and one inactive, consensus should happen and blocks BEEFY-finalized. + // Need to finalize at least one block in each session, choose randomly. + finalize_block_and_wait_for_beefy( + &net, + fast_peers.clone(), + &mut runtime, + &[1, 6, 10, 17, 24], + &[1, 5, 10, 15, 20], + ); + + // Spawn Dave, he's now way behind voting and can only catch up through on-demand justif sync. + runtime.spawn(dave_task); + // give Dave a chance to spawn and init. + run_for(Duration::from_millis(400), &net, &mut runtime); + + let (dave_best_blocks, _) = + get_beefy_streams(&mut net.lock(), [(dave_index, BeefyKeyring::Dave)].into_iter()); + net.lock() + .peer(dave_index) + .client() + .as_client() + .finalize_block(BlockId::number(1), None) + .unwrap(); + // Give Dave task some cpu cycles to process the finality notification, + run_for(Duration::from_millis(100), &net, &mut runtime); + // freshly spun up Dave now needs to listen for gossip to figure out the state of his peers. + + // Have the other peers do some gossip so Dave finds out about their progress. + finalize_block_and_wait_for_beefy(&net, fast_peers, &mut runtime, &[25], &[25]); + + // Now verify Dave successfully finalized #1 (through on-demand justification request). + wait_for_best_beefy_blocks(dave_best_blocks, &net, &mut runtime, &[1]); + + // Give Dave all tasks some cpu cycles to burn through their events queues, + run_for(Duration::from_millis(100), &net, &mut runtime); + // then verify Dave catches up through on-demand justification requests. + finalize_block_and_wait_for_beefy( + &net, + [(dave_index, BeefyKeyring::Dave)].into_iter(), + &mut runtime, + &[6, 10, 17, 24, 26], + &[5, 10, 15, 20, 25], + ); + + let all_peers = all_peers.into_iter().enumerate(); + // Now that Dave has caught up, sanity check voting works for all of them. + finalize_block_and_wait_for_beefy(&net, all_peers, &mut runtime, &[30], &[30]); +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 6e8c89d804984..832b43315515f 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -24,10 +24,15 @@ use std::{ }; use codec::{Codec, Decode, Encode}; -use futures::{stream::Fuse, StreamExt}; +use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; +use parking_lot::Mutex; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; +use sc_network_common::{ + protocol::event::Event as NetEvent, + service::{NetworkEventStream, NetworkRequest}, +}; use sc_network_gossip::GossipEngine; use sp_api::{BlockId, ProvideRuntimeApi}; @@ -48,14 +53,17 @@ use beefy_primitives::{ }; use crate::{ + communication::{ + gossip::{topic, GossipValidator}, + request_response::outgoing_requests_engine::OnDemandJustificationsEngine, + }, error::Error, - gossip::{topic, GossipValidator}, justification::BeefyVersionedFinalityProof, keystore::BeefyKeystore, metric_inc, metric_set, metrics::Metrics, round::Rounds, - BeefyVoterLinks, Client, + BeefyVoterLinks, Client, KnownPeers, }; enum RoundAction { @@ -113,6 +121,17 @@ impl VoterOracle { } } + /// Return current pending mandatory block, if any. + pub fn mandatory_pending(&self) -> Option> { + self.sessions.front().and_then(|round| { + if round.mandatory_done() { + None + } else { + Some(round.session_start()) + } + }) + } + /// Return `(A, B)` tuple representing inclusive [A, B] interval of votes to accept. pub fn accepted_interval( &self, @@ -175,29 +194,35 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, pub runtime: Arc, - pub sync_oracle: SO, + pub network: N, pub key_store: BeefyKeystore, + pub known_peers: Arc>>, pub gossip_engine: GossipEngine, pub gossip_validator: Arc>, + pub on_demand_justifications: OnDemandJustificationsEngine, pub links: BeefyVoterLinks, pub metrics: Option, pub min_block_delta: u32, } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, runtime: Arc, - sync_oracle: SO, + network: N, key_store: BeefyKeystore, + + // communication + known_peers: Arc>>, gossip_engine: GossipEngine, gossip_validator: Arc>, + on_demand_justifications: OnDemandJustificationsEngine, // channels /// Links between the block importer, the background voter and the RPC layer. @@ -218,14 +243,14 @@ pub(crate) struct BeefyWorker { voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, - SO: SyncOracle + Send + Sync + Clone + 'static, + N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, { /// Return a new BEEFY worker instance. /// @@ -233,15 +258,17 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, runtime, key_store, - sync_oracle, + network, gossip_engine, gossip_validator, + on_demand_justifications, + known_peers, links, metrics, min_block_delta, @@ -256,10 +283,12 @@ where client: client.clone(), backend, runtime, - sync_oracle, + network, + known_peers, key_store, gossip_engine, gossip_validator, + on_demand_justifications, links, metrics, best_grandpa_block_header: last_finalized_header, @@ -366,8 +395,6 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); - // TODO (grandpa-bridge-gadget/issues/20): when adding SYNC protocol, - // fire up a request for justification for this mandatory block here. } } } @@ -408,7 +435,10 @@ where let block_num = signed_commitment.commitment.block_number; let best_grandpa = *self.best_grandpa_block_header.number(); match self.voting_oracle.triage_round(block_num, best_grandpa)? { - RoundAction::Process => self.finalize(justification)?, + RoundAction::Process => { + debug!(target: "beefy", "🥩 Process justification for round: {:?}.", block_num); + self.finalize(justification)? + }, RoundAction::Enqueue => { debug!(target: "beefy", "🥩 Buffer justification for round: {:?}.", block_num); self.pending_justifications.entry(block_num).or_insert(justification); @@ -429,7 +459,7 @@ where let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if rounds.add_vote(&round, vote, self_vote) { - if let Some(signatures) = rounds.try_conclude(&round) { + if let Some(signatures) = rounds.should_conclude(&round) { self.gossip_validator.conclude_round(round.1); let block_num = round.1; @@ -474,6 +504,8 @@ where self.best_beefy_block = Some(block_num); metric_set!(self, beefy_best_block, block_num); + self.on_demand_justifications.cancel_requests_older_than(block_num); + if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), (BEEFY_ENGINE_ID, finality_proof.clone().encode()), @@ -735,7 +767,7 @@ where let at = BlockId::hash(notif.header.hash()); if let Some(active) = self.runtime.runtime_api().validator_set(&at).ok().flatten() { self.initialize_voter(¬if.header, active); - if !self.sync_oracle.is_major_syncing() { + if !self.network.is_major_syncing() { if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); } @@ -768,6 +800,7 @@ where self.wait_for_runtime_pallet(&mut finality_notifications).await; trace!(target: "beefy", "🥩 BEEFY pallet available, starting voter."); + let mut network_events = self.network.event_stream("network-gossip").fuse(); let mut votes = Box::pin( self.gossip_engine .messages_for(topic::()) @@ -788,15 +821,38 @@ where // The branches below only change 'state', actual voting happen afterwards, // based on the new resulting 'state'. futures::select_biased! { + // Use `select_biased!` to prioritize order below. + // Make sure to pump gossip engine. + _ = gossip_engine => { + error!(target: "beefy", "🥩 Gossip engine has terminated, closing worker."); + return; + }, + // Keep track of connected peers. + net_event = network_events.next() => { + if let Some(net_event) = net_event { + self.handle_network_event(net_event); + } else { + error!(target: "beefy", "🥩 Network events stream terminated, closing worker."); + return; + } + }, + // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { if let Some(notification) = notification { self.handle_finality_notification(¬ification); } else { + error!(target: "beefy", "🥩 Finality stream terminated, closing worker."); return; } }, - // TODO: when adding SYNC protocol, join the on-demand justifications stream to - // this one, and handle them both here. + // Process incoming justifications as these can make some in-flight votes obsolete. + justif = self.on_demand_justifications.next().fuse() => { + if let Some(justif) = justif { + if let Err(err) = self.triage_incoming_justif(justif) { + debug!(target: "beefy", "🥩 {}", err); + } + } + }, justif = block_import_justif.next() => { if let Some(justif) = justif { // Block import justifications have already been verified to be valid @@ -805,9 +861,11 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Block import stream terminated, closing worker."); return; } }, + // Finally process incoming votes. vote = votes.next() => { if let Some(vote) = vote { // Votes have already been verified to be valid by the gossip validator. @@ -815,13 +873,10 @@ where debug!(target: "beefy", "🥩 {}", err); } } else { + error!(target: "beefy", "🥩 Votes gossiping stream terminated, closing worker."); return; } }, - _ = gossip_engine => { - error!(target: "beefy", "🥩 Gossip engine has terminated."); - return; - } } // Handle pending justifications and/or votes for now GRANDPA finalized blocks. @@ -829,8 +884,14 @@ where debug!(target: "beefy", "🥩 {}", err); } - // Don't bother voting during major sync. - if !self.sync_oracle.is_major_syncing() { + // Don't bother voting or requesting justifications during major sync. + if !self.network.is_major_syncing() { + // If the current target is a mandatory block, + // make sure there's also an on-demand justification request out for it. + if let Some(block) = self.voting_oracle.mandatory_pending() { + // This only starts new request if there isn't already an active one. + self.on_demand_justifications.request(block); + } // There were external events, 'state' is changed, author a vote if needed/possible. if let Err(err) = self.try_to_vote() { debug!(target: "beefy", "🥩 {}", err); @@ -840,6 +901,20 @@ where } } } + + /// Update known peers based on network events. + fn handle_network_event(&mut self, event: NetEvent) { + match event { + NetEvent::SyncConnected { remote } => { + self.known_peers.lock().add_new(remote); + }, + NetEvent::SyncDisconnected { remote } => { + self.known_peers.lock().remove(&remote); + }, + // We don't care about other events. + _ => (), + } + } } /// Extract the MMR root hash from a digest in the given header, if it exists. @@ -932,11 +1007,11 @@ where pub(crate) mod tests { use super::*; use crate::{ + communication::notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, keystore::tests::Keyring, - notification::{BeefyBestBlockStream, BeefyVersionedFinalityProofStream}, tests::{ create_beefy_keystore, get_beefy_streams, make_beefy_ids, two_validators::TestApi, - BeefyPeer, BeefyTestNet, BEEFY_PROTOCOL_NAME, + BeefyPeer, BeefyTestNet, }, BeefyRPCLinks, }; @@ -979,21 +1054,29 @@ pub(crate) mod tests { let api = Arc::new(TestApi {}); let network = peer.network_service().clone(); - let sync_oracle = network.clone(); - let gossip_validator = Arc::new(crate::gossip::GossipValidator::new()); + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let gossip_validator = Arc::new(GossipValidator::new(known_peers.clone())); let gossip_engine = - GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); + GossipEngine::new(network.clone(), "/beefy/1", gossip_validator.clone(), None); + let on_demand_justifications = OnDemandJustificationsEngine::new( + network.clone(), + api.clone(), + "/beefy/justifs/1".into(), + known_peers.clone(), + ); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), runtime: api, key_store: Some(keystore).into(), + known_peers, links, gossip_engine, gossip_validator, min_block_delta, metrics: None, - sync_oracle, + network, + on_demand_justifications, }; BeefyWorker::<_, _, _, _, _>::new(worker_params) } @@ -1245,7 +1328,7 @@ pub(crate) mod tests { fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); // keystore doesn't contain other keys than validators' @@ -1266,13 +1349,15 @@ pub(crate) mod tests { #[test] fn should_finalize_correctly() { - let keys = &[Keyring::Alice]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let keys = [Keyring::Alice]; + let validator_set = ValidatorSet::new(make_beefy_ids(&keys), 0).unwrap(); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let keys = keys.iter().cloned().enumerate(); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); @@ -1294,7 +1379,8 @@ pub(crate) mod tests { })); // unknown hash for block #1 - let (mut best_block_streams, mut finality_proofs) = get_beefy_streams(&mut net, keys); + let (mut best_block_streams, mut finality_proofs) = + get_beefy_streams(&mut net, keys.clone()); let mut best_block_stream = best_block_streams.drain(..).next().unwrap(); let mut finality_proof = finality_proofs.drain(..).next().unwrap(); let justif = create_finality_proof(1); @@ -1355,7 +1441,7 @@ pub(crate) mod tests { fn should_init_session() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); assert!(worker.voting_oracle.sessions.is_empty()); @@ -1389,7 +1475,7 @@ pub(crate) mod tests { fn should_triage_votes_and_process_later() { let keys = &[Keyring::Alice, Keyring::Bob]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 0).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let mut worker = create_beefy_worker(&net.peer(0), &keys[0], 1); fn new_vote( @@ -1450,7 +1536,7 @@ pub(crate) mod tests { fn should_initialize_correct_voter() { let keys = &[Keyring::Alice]; let validator_set = ValidatorSet::new(make_beefy_ids(keys), 1).unwrap(); - let mut net = BeefyTestNet::new(1, 0); + let mut net = BeefyTestNet::new(1); let backend = net.peer(0).client().as_backend(); // push 15 blocks with `AuthorityChange` digests every 10 blocks diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 2f6b788e368b3..9d5abf98ceff0 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,7 +48,7 @@ use sc_consensus::{ Verifier, }; use sc_network::{ - config::{NetworkConfiguration, Role, SyncMode}, + config::{NetworkConfiguration, RequestResponseConfig, Role, SyncMode}, Multiaddr, NetworkService, NetworkWorker, }; use sc_network_common::{ @@ -688,6 +688,8 @@ pub struct FullPeerConfig { pub block_announce_validator: Option + Send + Sync>>, /// List of notification protocols that the network must support. pub notifications_protocols: Vec, + /// List of request-response protocols that the network must support. + pub request_response_protocols: Vec, /// The indices of the peers the peer should be connected to. /// /// If `None`, it will be connected to all other peers. @@ -790,6 +792,9 @@ where network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + network_config + .request_response_protocols + .extend(config.request_response_protocols); network_config.extra_sets = config .notifications_protocols .into_iter() From 25795506052363e8b5795eb3526e61ef2a27d89a Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 3 Oct 2022 15:17:59 +0200 Subject: [PATCH 198/348] Fix `Weight::is_zero` (#12396) * Fix Weight::is_zero Signed-off-by: Oliver Tale-Yazdi * Add test Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- primitives/weights/src/weight_v2.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/primitives/weights/src/weight_v2.rs b/primitives/weights/src/weight_v2.rs index a8eaf79a28711..8596a782c1fa7 100644 --- a/primitives/weights/src/weight_v2.rs +++ b/primitives/weights/src/weight_v2.rs @@ -318,7 +318,7 @@ impl Zero for Weight { } fn is_zero(&self) -> bool { - self.ref_time == 0 + self == &Self::zero() } } @@ -447,3 +447,16 @@ impl SubAssign for Weight { }; } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_zero_works() { + assert!(Weight::zero().is_zero()); + assert!(!Weight::from_components(1, 0).is_zero()); + assert!(!Weight::from_components(0, 1).is_zero()); + assert!(!Weight::MAX.is_zero()); + } +} From 1b23ec9f6d7880b358072b97d0030d3352cb20aa Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Mon, 3 Oct 2022 23:50:00 +0800 Subject: [PATCH 199/348] Remove unnecessary Clone trait bounds on CountedStorageMap (#12402) * Remove unnecessary Clone trait bounds on CountedStorageMap * cargo fmt --- .../support/src/storage/types/counted_map.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/frame/support/src/storage/types/counted_map.rs b/frame/support/src/storage/types/counted_map.rs index c4027acfe7232..8c19434767f49 100644 --- a/frame/support/src/storage/types/counted_map.rs +++ b/frame/support/src/storage/types/counted_map.rs @@ -143,10 +143,7 @@ where } /// Store a value to be associated with the given key from the map. - pub fn insert + Clone, ValArg: EncodeLike>( - key: KeyArg, - val: ValArg, - ) { + pub fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { if !::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_inc()); } @@ -154,7 +151,7 @@ where } /// Remove the value under a key. - pub fn remove + Clone>(key: KeyArg) { + pub fn remove>(key: KeyArg) { if ::Map::contains_key(Ref::from(&key)) { CounterFor::::mutate(|value| value.saturating_dec()); } @@ -162,7 +159,7 @@ where } /// Mutate the value under a key. - pub fn mutate + Clone, R, F: FnOnce(&mut QueryKind::Query) -> R>( + pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( key: KeyArg, f: F, ) -> R { @@ -173,7 +170,7 @@ where /// Mutate the item, only if an `Ok` value is returned. pub fn try_mutate(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut QueryKind::Query) -> Result, { Self::try_mutate_exists(key, |option_value_ref| { @@ -187,7 +184,7 @@ where } /// Mutate the value under a key. Deletes the item if mutated to a `None`. - pub fn mutate_exists + Clone, R, F: FnOnce(&mut Option) -> R>( + pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, f: F, ) -> R { @@ -200,7 +197,7 @@ where /// or if the storage item does not exist (`None`), independent of the `QueryType`. pub fn try_mutate_exists(key: KeyArg, f: F) -> Result where - KeyArg: EncodeLike + Clone, + KeyArg: EncodeLike, F: FnOnce(&mut Option) -> Result, { ::Map::try_mutate_exists(key, |option_value| { @@ -222,7 +219,7 @@ where } /// Take the value under a key. - pub fn take + Clone>(key: KeyArg) -> QueryKind::Query { + pub fn take>(key: KeyArg) -> QueryKind::Query { let removed_value = ::Map::mutate_exists(key, |value| value.take()); if removed_value.is_some() { CounterFor::::mutate(|value| value.saturating_dec()); @@ -240,7 +237,7 @@ where /// `[item]`. Any default value set for the storage item will be ignored on overwrite. pub fn append(key: EncodeLikeKey, item: EncodeLikeItem) where - EncodeLikeKey: EncodeLike + Clone, + EncodeLikeKey: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageAppend, @@ -355,7 +352,7 @@ where /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> where - KArg: EncodeLike + Clone, + KArg: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageTryAppend, From 6d7f76b5de00d4d4fdc55596abe86beb7d55f0b3 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 09:58:05 +0300 Subject: [PATCH 200/348] docs/CODEOWNERS: add @acatangiu as MMR owner (#12406) --- docs/CODEOWNERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 0b9e6e7783058..cf2067d19450d 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -36,11 +36,13 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas -# BEEFY +# BEEFY, MMR /client/beefy/ @acatangiu /frame/beefy/ @acatangiu /frame/beefy-mmr/ @acatangiu +/frame/merkle-mountain-range/ @acatangiu /primitives/beefy/ @acatangiu +/primitives/merkle-mountain-range/ @acatangiu # Contracts /frame/contracts/ @athei From 594d71afca8e70ed84297b01472bb1250d89ebd1 Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 4 Oct 2022 17:01:50 +0900 Subject: [PATCH 201/348] Add @koute to `docs/CODEOWNERS` and update stale paths (#12408) --- docs/CODEOWNERS | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index cf2067d19450d..133ba7b094d43 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -24,8 +24,19 @@ /.gitlab-ci.yml @paritytech/ci # Sandboxing capability of Substrate Runtime -/primitives/sr-sandbox/ @pepyakin -/primitives/core/src/sandbox.rs @pepyakin +/primitives/sandbox/ @pepyakin @koute + +# WASM executor, low-level client <-> WASM interface and other WASM-related code +/client/executor/ @koute +/client/allocator/ @koute +/primitives/wasm-interface/ @koute +/primitives/runtime-interface/ @koute +/primitives/panic-handler/ @koute +/utils/wasm-builder/ @koute + +# Systems-related bits and bobs on the client side +/client/sysinfo/ @koute +/client/tracing/ @koute # GRANDPA, BABE, consensus stuff /frame/babe/ @andresilva From e77cbe39c4d1bfd978bb03c686fc9f60d86a3d06 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Tue, 4 Oct 2022 11:47:13 +0300 Subject: [PATCH 202/348] BEEFY: Simplify hashing for pallet-beefy-mmr (#12393) * beefy-mmr: reuse sp_runtime::traits::Keccak256 * beefy-mmr: use sp_runtime::traits:Hash for generating merkle proofs * beefy-mmr: use sp_runtime::traits:Hash for validating merkle proofs * beefy-mmr: remove primitives::Hasher and primitives::Hash * fixes * beefy-mmr: reduce the number of generic parameters for merkle_root() * fix * compute upper Vec capacity more accurately --- Cargo.lock | 2 +- frame/beefy-mmr/primitives/Cargo.toml | 8 +- frame/beefy-mmr/primitives/src/lib.rs | 236 ++++++++++++-------------- frame/beefy-mmr/src/lib.rs | 27 +-- frame/beefy-mmr/src/mock.rs | 3 +- 5 files changed, 118 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 723a09ee9a39f..2f0a2df0f101b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -525,7 +525,7 @@ dependencies = [ "env_logger", "log", "sp-api", - "tiny-keccak", + "sp-runtime", ] [[package]] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml index 1aa2573c7f680..a097da0fc30fd 100644 --- a/frame/beefy-mmr/primitives/Cargo.toml +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -11,10 +11,10 @@ homepage = "https://substrate.io" [dependencies] array-bytes = { version = "4.1", optional = true } log = { version = "0.4", default-features = false, optional = true } -tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } beefy-primitives = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/beefy" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] array-bytes = "4.1" @@ -22,9 +22,9 @@ env_logger = "0.9" [features] debug = ["array-bytes", "log"] -default = ["debug", "keccak", "std"] -keccak = ["tiny-keccak"] +default = ["debug", "std"] std = [ "beefy-primitives/std", - "sp-api/std" + "sp-api/std", + "sp-runtime/std" ] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs index 38831d7914715..f56be8bcafe5b 100644 --- a/frame/beefy-mmr/primitives/src/lib.rs +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -25,88 +25,49 @@ //! compilation targets. //! //! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the -//! same [Hasher] as the inner nodes. +//! same hasher as the inner nodes. //! Inner nodes are created by concatenating child hashes and hashing again. The implementation //! does not perform any sorting of the input data (leaves) nor when inner nodes are created. //! //! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. -#[cfg(not(feature = "std"))] -extern crate alloc; -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; +pub use sp_runtime::traits::Keccak256; +use sp_runtime::{app_crypto::sp_core, sp_std, traits::Hash as HashT}; +use sp_std::{vec, vec::Vec}; use beefy_primitives::mmr::{BeefyAuthoritySet, BeefyNextAuthoritySet}; -/// Supported hashing output size. -/// -/// The size is restricted to 32 bytes to allow for a more optimised implementation. -pub type Hash = [u8; 32]; - -/// Generic hasher trait. -/// -/// Implement the function to support custom way of hashing data. -/// The implementation must return a [Hash](type@Hash) type, so only 32-byte output hashes are -/// supported. -pub trait Hasher { - /// Hash given arbitrary-length piece of data. - fn hash(data: &[u8]) -> Hash; -} - -#[cfg(feature = "keccak")] -mod keccak256 { - use tiny_keccak::{Hasher as _, Keccak}; - - /// Keccak256 hasher implementation. - pub struct Keccak256; - impl Keccak256 { - /// Hash given data. - pub fn hash(data: &[u8]) -> super::Hash { - ::hash(data) - } - } - impl super::Hasher for Keccak256 { - fn hash(data: &[u8]) -> super::Hash { - let mut keccak = Keccak::v256(); - keccak.update(data); - let mut output = [0_u8; 32]; - keccak.finalize(&mut output); - output - } - } -} -#[cfg(feature = "keccak")] -pub use keccak256::Keccak256; - /// Construct a root hash of a Binary Merkle Tree created from given leaves. /// /// See crate-level docs for details about Merkle Tree construction. /// /// In case an empty list of leaves is passed the function returns a 0-filled hash. -pub fn merkle_root(leaves: I) -> Hash +pub fn merkle_root(leaves: I) -> H::Output where - H: Hasher, - I: IntoIterator, - T: AsRef<[u8]>, + H: HashT, + H::Output: Default + AsRef<[u8]>, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); - merkelize::(iter, &mut ()) + let iter = leaves.into_iter().map(|l| ::hash(l.as_ref())); + merkelize::(iter, &mut ()).into() } -fn merkelize(leaves: I, visitor: &mut V) -> Hash +fn merkelize(leaves: I, visitor: &mut V) -> H::Output where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: Default + AsRef<[u8]>, + V: Visitor, + I: Iterator, { - let upper = Vec::with_capacity(leaves.size_hint().0); + let upper = Vec::with_capacity((leaves.size_hint().1.unwrap_or(0).saturating_add(1)) / 2); let mut next = match merkelize_row::(leaves, upper, visitor) { Ok(root) => return root, - Err(next) if next.is_empty() => return Hash::default(), + Err(next) if next.is_empty() => return H::Output::default(), Err(next) => next, }; - let mut upper = Vec::with_capacity((next.len() + 1) / 2); + let mut upper = Vec::with_capacity((next.len().saturating_add(1)) / 2); loop { visitor.move_up(); @@ -125,14 +86,14 @@ where /// /// The structure contains all necessary data to later on verify the proof and the leaf itself. #[derive(Debug, PartialEq, Eq)] -pub struct MerkleProof { +pub struct MerkleProof { /// Root hash of generated merkle tree. - pub root: Hash, + pub root: H, /// Proof items (does not contain the leaf hash, nor the root obviously). /// /// This vec contains all inner node hashes necessary to reconstruct the root hash given the /// leaf hash. - pub proof: Vec, + pub proof: Vec, /// Number of leaves in the original tree. /// /// This is needed to detect a case where we have an odd number of leaves that "get promoted" @@ -141,14 +102,14 @@ pub struct MerkleProof { /// Index of the leaf the proof is for (0-based). pub leaf_index: usize, /// Leaf content. - pub leaf: T, + pub leaf: L, } /// A trait of object inspecting merkle root creation. /// /// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified /// about tree traversal. -trait Visitor { +trait Visitor { /// We are moving one level up in the tree. fn move_up(&mut self); @@ -158,13 +119,13 @@ trait Visitor { /// The method will also visit the `root` hash (level 0). /// /// The `index` is an index of `left` item. - fn visit(&mut self, index: usize, left: &Option, right: &Option); + fn visit(&mut self, index: usize, left: &Option, right: &Option); } /// No-op implementation of the visitor. -impl Visitor for () { +impl Visitor for () { fn move_up(&mut self) {} - fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} } /// Construct a Merkle Proof for leaves given by indices. @@ -177,16 +138,17 @@ impl Visitor for () { /// # Panic /// /// The function will panic if given `leaf_index` is greater than the number of leaves. -pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof where - H: Hasher, + H: HashT, + H::Output: Default + Copy + AsRef<[u8]>, I: IntoIterator, I::IntoIter: ExactSizeIterator, T: AsRef<[u8]>, { let mut leaf = None; let iter = leaves.into_iter().enumerate().map(|(idx, l)| { - let hash = H::hash(l.as_ref()); + let hash = ::hash(l.as_ref()); if idx == leaf_index { leaf = Some(l); } @@ -194,23 +156,23 @@ where }); /// The struct collects a proof for single leaf. - struct ProofCollection { - proof: Vec, + struct ProofCollection { + proof: Vec, position: usize, } - impl ProofCollection { + impl ProofCollection { fn new(position: usize) -> Self { ProofCollection { proof: Default::default(), position } } } - impl Visitor for ProofCollection { + impl Visitor for ProofCollection { fn move_up(&mut self) { self.position /= 2; } - fn visit(&mut self, index: usize, left: &Option, right: &Option) { + fn visit(&mut self, index: usize, left: &Option, right: &Option) { // we are at left branch - right goes to the proof. if self.position == index { if let Some(right) = right { @@ -238,7 +200,7 @@ where collect_proof .proof .iter() - .map(|s| array_bytes::bytes2hex("", s)) + .map(|s| array_bytes::bytes2hex("", s.as_ref())) .collect::>() ); @@ -250,25 +212,19 @@ where /// Can be either a value that needs to be hashed first, /// or the hash itself. #[derive(Debug, PartialEq, Eq)] -pub enum Leaf<'a> { +pub enum Leaf<'a, H> { /// Leaf content. Value(&'a [u8]), /// Hash of the leaf content. - Hash(Hash), + Hash(H), } -impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { +impl<'a, H, T: AsRef<[u8]>> From<&'a T> for Leaf<'a, H> { fn from(v: &'a T) -> Self { Leaf::Value(v.as_ref()) } } -impl<'a> From for Leaf<'a> { - fn from(v: Hash) -> Self { - Leaf::Hash(v) - } -} - /// Verify Merkle Proof correctness versus given root hash. /// /// The proof is NOT expected to contain leaf hash as the first @@ -277,45 +233,47 @@ impl<'a> From for Leaf<'a> { /// /// The proof must not contain the root hash. pub fn verify_proof<'a, H, P, L>( - root: &'a Hash, + root: &'a H::Output, proof: P, number_of_leaves: usize, leaf_index: usize, leaf: L, ) -> bool where - H: Hasher, - P: IntoIterator, - L: Into>, + H: HashT, + H::Output: PartialEq + AsRef<[u8]>, + P: IntoIterator, + L: Into>, { if leaf_index >= number_of_leaves { return false } let leaf_hash = match leaf.into() { - Leaf::Value(content) => H::hash(content), + Leaf::Value(content) => ::hash(content), Leaf::Hash(hash) => hash, }; - let mut combined = [0_u8; 64]; + let hash_len = ::LENGTH; + let mut combined = vec![0_u8; hash_len * 2]; let mut position = leaf_index; let mut width = number_of_leaves; let computed = proof.into_iter().fold(leaf_hash, |a, b| { if position % 2 == 1 || position + 1 == width { - combined[0..32].copy_from_slice(&b); - combined[32..64].copy_from_slice(&a); + combined[..hash_len].copy_from_slice(&b.as_ref()); + combined[hash_len..].copy_from_slice(&a.as_ref()); } else { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(&a.as_ref()); + combined[hash_len..].copy_from_slice(&b.as_ref()); } - let hash = H::hash(&combined); + let hash = ::hash(&combined); #[cfg(feature = "debug")] log::debug!( "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", - array_bytes::bytes2hex("", &a), - array_bytes::bytes2hex("", &b), - array_bytes::bytes2hex("", &hash), - array_bytes::bytes2hex("", &combined) + array_bytes::bytes2hex("", &a.as_ref()), + array_bytes::bytes2hex("", &b.as_ref()), + array_bytes::bytes2hex("", &hash.as_ref()), + array_bytes::bytes2hex("", &combined.as_ref()) ); position /= 2; width = ((width - 1) / 2) + 1; @@ -332,20 +290,22 @@ where /// empty iterator) an `Err` with the inner nodes of upper layer is returned. fn merkelize_row( mut iter: I, - mut next: Vec, + mut next: Vec, visitor: &mut V, -) -> Result> +) -> Result> where - H: Hasher, - V: Visitor, - I: Iterator, + H: HashT, + H::Output: AsRef<[u8]>, + V: Visitor, + I: Iterator, { #[cfg(feature = "debug")] log::debug!("[merkelize_row]"); next.clear(); + let hash_len = ::LENGTH; let mut index = 0; - let mut combined = [0_u8; 64]; + let mut combined = vec![0_u8; hash_len * 2]; loop { let a = iter.next(); let b = iter.next(); @@ -354,17 +314,17 @@ where #[cfg(feature = "debug")] log::debug!( " {:?}\n {:?}", - a.as_ref().map(|s| array_bytes::bytes2hex("", s)), - b.as_ref().map(|s| array_bytes::bytes2hex("", s)) + a.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())), + b.as_ref().map(|s| array_bytes::bytes2hex("", s.as_ref())) ); index += 2; match (a, b) { (Some(a), Some(b)) => { - combined[0..32].copy_from_slice(&a); - combined[32..64].copy_from_slice(&b); + combined[..hash_len].copy_from_slice(a.as_ref()); + combined[hash_len..].copy_from_slice(b.as_ref()); - next.push(H::hash(&combined)); + next.push(::hash(&combined)); }, // Odd number of items. Promote the item to the upper layer. (Some(a), None) if !next.is_empty() => { @@ -377,7 +337,7 @@ where #[cfg(feature = "debug")] log::debug!( "[merkelize_row] Next: {:?}", - next.iter().map(|s| array_bytes::bytes2hex("", s)).collect::>() + next.iter().map(|s| array_bytes::bytes2hex("", s.as_ref())).collect::>() ); return Err(next) }, @@ -389,7 +349,6 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where - H: From + Into, BeefyAuthoritySet: sp_api::Decode, { /// Return the currently active BEEFY authority set proof. @@ -403,6 +362,7 @@ sp_api::decl_runtime_apis! { #[cfg(test)] mod tests { use super::*; + use crate::sp_core::H256; #[test] fn should_generate_empty_root() { @@ -411,11 +371,11 @@ mod tests { let data: Vec<[u8; 1]> = Default::default(); // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "0000000000000000000000000000000000000000000000000000000000000000" ); } @@ -429,11 +389,11 @@ mod tests { )]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" ); } @@ -448,11 +408,11 @@ mod tests { ]; // when - let out = merkle_root::(data); + let out = merkle_root::(data); // then assert_eq!( - array_bytes::bytes2hex("", &out), + array_bytes::bytes2hex("", out.as_ref()), "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" ); } @@ -461,7 +421,10 @@ mod tests { fn should_generate_root_complex() { let _ = env_logger::try_init(); let test = |root, data| { - assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); + assert_eq!( + array_bytes::bytes2hex("", &merkle_root::(data).as_ref()), + root + ); }; test( @@ -521,18 +484,19 @@ mod tests { // then assert_eq!( - array_bytes::bytes2hex("", &proof0.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof0.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert_eq!( - array_bytes::bytes2hex("", &proof2.root), - array_bytes::bytes2hex("", &proof1.root) + array_bytes::bytes2hex("", &proof2.root.as_ref()), + array_bytes::bytes2hex("", &proof1.root.as_ref()) ); assert!(!verify_proof::( &array_bytes::hex2array_unchecked( "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239" - ), + ) + .into(), proof0.proof, data.len(), proof0.leaf_index, @@ -540,7 +504,7 @@ mod tests { )); assert!(!verify_proof::( - &proof0.root, + &proof0.root.into(), vec![], data.len(), proof0.leaf_index, @@ -796,9 +760,10 @@ mod tests { "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", ]; - let root = array_bytes::hex2array_unchecked( + let root: H256 = array_bytes::hex2array_unchecked( "72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53", - ); + ) + .into(); let data = addresses .into_iter() @@ -808,7 +773,10 @@ mod tests { for l in 0..data.len() { // when let proof = merkle_proof::(data.clone(), l); - assert_eq!(array_bytes::bytes2hex("", &proof.root), array_bytes::bytes2hex("", &root)); + assert_eq!( + array_bytes::bytes2hex("", &proof.root.as_ref()), + array_bytes::bytes2hex("", &root.as_ref()) + ); assert_eq!(proof.leaf_index, l); assert_eq!(&proof.leaf, &data[l]); @@ -831,16 +799,20 @@ mod tests { proof: vec![ array_bytes::hex2array_unchecked( "340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f" - ), + ) + .into(), array_bytes::hex2array_unchecked( "d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79" - ), + ) + .into(), array_bytes::hex2array_unchecked( "ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e" - ), + ) + .into(), ], number_of_leaves: data.len(), leaf_index: data.len() - 1, diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 456d6e77aa8eb..5b82c89ce84b6 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -33,7 +33,7 @@ //! //! and thanks to versioning can be easily updated in the future. -use sp_runtime::traits::{Convert, Hash, Member}; +use sp_runtime::traits::{Convert, Member}; use sp_std::prelude::*; use beefy_primitives::{ @@ -142,10 +142,7 @@ pub mod pallet { StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; } -impl LeafDataProvider for Pallet -where - MerkleRootOf: From + Into, -{ +impl LeafDataProvider for Pallet { type LeafData = MmrLeaf< ::BlockNumber, ::Hash, @@ -163,19 +160,9 @@ where } } -impl beefy_merkle_tree::Hasher for Pallet -where - MerkleRootOf: Into, -{ - fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { - ::Hashing::hash(data).into() - } -} - impl beefy_primitives::OnNewValidatorSet<::BeefyId> for Pallet where T: pallet::Config, - MerkleRootOf: From + Into, { /// Compute and cache BEEFY authority sets based on updated BEEFY validator sets. fn on_new_validator_set( @@ -190,10 +177,7 @@ where } } -impl Pallet -where - MerkleRootOf: From + Into, -{ +impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { Pallet::::beefy_authorities() @@ -220,7 +204,10 @@ where .map(T::BeefyAuthorityToMerkleLeaf::convert) .collect::>(); let len = beefy_addresses.len() as u32; - let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); + let root = beefy_merkle_tree::merkle_root::<::Hashing, _>( + beefy_addresses, + ) + .into(); BeefyAuthoritySet { id, len, root } } } diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs index 602d0aa5fe1a6..0a64ad3fc9976 100644 --- a/frame/beefy-mmr/src/mock.rs +++ b/frame/beefy-mmr/src/mock.rs @@ -147,9 +147,10 @@ impl BeefyDataProvider> for DummyDataProvider { fn extra_data() -> Vec { let mut col = vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])]; col.sort(); - beefy_merkle_tree::merkle_root::, _, _>( + beefy_merkle_tree::merkle_root::<::Hashing, _>( col.into_iter().map(|pair| pair.encode()), ) + .as_ref() .to_vec() } } From 005195011f303bf5e2e468dbf379dd42971bf6ae Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 4 Oct 2022 14:34:54 +0300 Subject: [PATCH 203/348] client/beefy: small code improvements (#12414) * client/beefy: remove bounds on type definitions * client/beefy: remove gossip protocol legacy name * client/beefy: simplify justification request response engine Signed-off-by: Adrian Catangiu --- client/beefy/src/communication/mod.rs | 5 --- .../outgoing_requests_engine.rs | 34 ++++++++----------- client/beefy/src/lib.rs | 16 ++------- 3 files changed, 17 insertions(+), 38 deletions(-) diff --git a/client/beefy/src/communication/mod.rs b/client/beefy/src/communication/mod.rs index 93646677c0ecd..91798d4ae0d33 100644 --- a/client/beefy/src/communication/mod.rs +++ b/client/beefy/src/communication/mod.rs @@ -33,9 +33,6 @@ pub(crate) mod beefy_protocol_name { /// BEEFY justifications protocol name suffix. const JUSTIFICATIONS_NAME: &str = "/beefy/justifications/1"; - /// Old names for the gossip protocol, used for backward compatibility. - pub(super) const LEGACY_NAMES: [&str; 1] = ["/paritytech/beefy/1"]; - /// Name of the votes gossip protocol used by BEEFY. /// /// Must be registered towards the networking in order for BEEFY voter to properly function. @@ -73,9 +70,7 @@ pub fn beefy_peers_set_config( ) -> sc_network_common::config::NonDefaultSetConfig { let mut cfg = sc_network_common::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); - cfg.allow_non_reserved(25, 25); - cfg.add_fallback_names(beefy_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect()); cfg } diff --git a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs index e22958e19cd2e..c4d3c926190e6 100644 --- a/client/beefy/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/beefy/src/communication/request_response/outgoing_requests_engine.rs @@ -20,10 +20,7 @@ use beefy_primitives::{crypto::AuthorityId, BeefyApi, ValidatorSet}; use codec::Encode; -use futures::{ - channel::{oneshot, oneshot::Canceled}, - stream::{self, StreamExt}, -}; +use futures::channel::{oneshot, oneshot::Canceled}; use log::{debug, error, warn}; use parking_lot::Mutex; use sc_network::{PeerId, ProtocolName}; @@ -50,8 +47,8 @@ type Response = Result, RequestFailure>; type ResponseReceiver = oneshot::Receiver; enum State { - Idle(stream::Pending>), - AwaitingResponse(PeerId, NumberFor, stream::Once), + Idle, + AwaitingResponse(PeerId, NumberFor, ResponseReceiver), } pub struct OnDemandJustificationsEngine { @@ -83,7 +80,7 @@ where protocol_name, live_peers, peers_cache: VecDeque::new(), - state: State::Idle(stream::pending()), + state: State::Idle, } } @@ -118,15 +115,14 @@ where IfDisconnected::ImmediateError, ); - self.state = State::AwaitingResponse(peer, block, stream::once(rx)); + self.state = State::AwaitingResponse(peer, block, rx); } /// If no other request is in progress, start new justification request for `block`. pub fn request(&mut self, block: NumberFor) { // ignore new requests while there's already one pending - match &self.state { - State::AwaitingResponse(_, _, _) => return, - State::Idle(_) => (), + if matches!(self.state, State::AwaitingResponse(_, _, _)) { + return } self.reset_peers_cache_for_block(block); @@ -148,7 +144,7 @@ where "🥩 cancel pending request for justification #{:?}", number ); - self.state = State::Idle(stream::pending()); + self.state = State::Idle; }, _ => (), } @@ -194,19 +190,19 @@ where pub async fn next(&mut self) -> Option> { let (peer, block, resp) = match &mut self.state { - State::Idle(pending) => { - let _ = pending.next().await; - // This never happens since 'stream::pending' never generates any items. + State::Idle => { + futures::pending!(); + // Doesn't happen as 'futures::pending!()' is an 'await' barrier that never passes. return None }, State::AwaitingResponse(peer, block, receiver) => { - let resp = receiver.next().await?; + let resp = receiver.await; (*peer, *block, resp) }, }; - // We received the awaited response. Our 'stream::once()' receiver will never generate any - // other response, meaning we're done with current state. Move the engine to `State::Idle`. - self.state = State::Idle(stream::pending()); + // We received the awaited response. Our 'receiver' will never generate any other response, + // meaning we're done with current state. Move the engine to `State::Idle`. + self.state = State::Idle; let block_id = BlockId::number(block); let validator_set = self diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 7407f101e99a5..760fc753b18a3 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -153,11 +153,7 @@ where } /// BEEFY gadget network parameters. -pub struct BeefyNetworkParams -where - B: Block, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyNetworkParams { /// Network implementing gossip, requests and sync-oracle. pub network: Arc, /// Chain specific BEEFY gossip protocol name. See @@ -171,15 +167,7 @@ where } /// BEEFY gadget initialization parameters. -pub struct BeefyParams -where - B: Block, - BE: Backend, - C: Client, - R: ProvideRuntimeApi, - R::Api: BeefyApi + MmrApi, - N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, -{ +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend From 07e5ec5eb8a9c470bf7dc9afbdb3ac0579be4f4b Mon Sep 17 00:00:00 2001 From: Roman Useinov Date: Tue, 4 Oct 2022 14:26:14 +0200 Subject: [PATCH 204/348] [Fix] Rename VoterBagsList -> VoterList to match pdot (#12416) --- bin/node/runtime/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f0c68b5b225cd..c2d29731ea2e6 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -569,7 +569,7 @@ impl pallet_staking::Config for Runtime { type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::UnboundedExecution; - type VoterList = VoterBagsList; + type VoterList = VoterList; // This a placeholder, to be introduced in the next PR as an instance of bags-list type TargetList = pallet_staking::UseValidatorsMap; type MaxUnlockingChunks = ConstU32<32>; @@ -1651,7 +1651,7 @@ construct_runtime!( Gilt: pallet_gilt, Uniques: pallet_uniques, TransactionStorage: pallet_transaction_storage, - VoterBagsList: pallet_bags_list::, + VoterList: pallet_bags_list::, StateTrieMigration: pallet_state_trie_migration, ChildBounties: pallet_child_bounties, Referenda: pallet_referenda, @@ -1739,7 +1739,7 @@ mod benches { [pallet_alliance, Alliance] [pallet_assets, Assets] [pallet_babe, Babe] - [pallet_bags_list, VoterBagsList] + [pallet_bags_list, VoterList] [pallet_balances, Balances] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] From d11dd02dda6a00800a13cd59a0c2f07ac75e082d Mon Sep 17 00:00:00 2001 From: Muharem Ismailov Date: Tue, 4 Oct 2022 15:15:57 +0200 Subject: [PATCH 205/348] Use saturating add for alliance::disband witness data (#12418) --- frame/alliance/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/alliance/src/lib.rs b/frame/alliance/src/lib.rs index 24111b44ced9e..fca17e69c7652 100644 --- a/frame/alliance/src/lib.rs +++ b/frame/alliance/src/lib.rs @@ -706,7 +706,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::disband( witness.voting_members, witness.ally_members, - witness.voting_members + witness.ally_members, + witness.voting_members.saturating_add(witness.ally_members), ))] pub fn disband( origin: OriginFor, From 91d072df4273bbc04f8152099b23a66a0c1d531b Mon Sep 17 00:00:00 2001 From: Chevdor Date: Tue, 4 Oct 2022 21:30:45 +0200 Subject: [PATCH 206/348] Bump prost to 0.11+ (#12419) --- Cargo.lock | 16 ++++++++-------- client/authority-discovery/Cargo.toml | 4 ++-- client/network/Cargo.toml | 2 +- client/network/common/Cargo.toml | 2 +- client/network/light/Cargo.toml | 4 ++-- client/network/sync/Cargo.toml | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f0a2df0f101b..a35dbba7d089e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7830,8 +7830,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "rand 0.7.3", "sc-client-api", @@ -8486,7 +8486,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "pin-project", - "prost 0.10.3", + "prost 0.11.0", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -8553,7 +8553,7 @@ dependencies = [ "libp2p", "linked_hash_set", "parity-scale-codec", - "prost-build 0.10.4", + "prost-build 0.11.1", "sc-consensus", "sc-peerset", "serde", @@ -8595,8 +8595,8 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "sc-client-api", "sc-network-common", "sc-peerset", @@ -8617,8 +8617,8 @@ dependencies = [ "log", "lru", "parity-scale-codec", - "prost 0.10.3", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "quickcheck", "sc-block-builder", "sc-client-api", diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index d9e9df4f2a97c..37377cdc6dde3 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } @@ -23,7 +23,7 @@ futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.46.1", default-features = false, features = ["kad"] } log = "0.4.17" -prost = "0.10" +prost = "0.11" rand = "0.7.2" thiserror = "1.0" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index e96749df40aa2..8e3d68851c423 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -33,7 +33,7 @@ log = "0.4.17" lru = "0.7.5" parking_lot = "0.12.1" pin-project = "1.0.10" -prost = "0.10" +prost = "0.11" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" diff --git a/client/network/common/Cargo.toml b/client/network/common/Cargo.toml index 1ee7b15538366..0e9801ec79e63 100644 --- a/client/network/common/Cargo.toml +++ b/client/network/common/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] async-trait = "0.1.57" diff --git a/client/network/light/Cargo.toml b/client/network/light/Cargo.toml index c2a77c3b577ba..a1a5dcf85eb5d 100644 --- a/client/network/light/Cargo.toml +++ b/client/network/light/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -24,7 +24,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.16" -prost = "0.10" +prost = "0.11" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-network-common = { version = "0.10.0-dev", path = "../common" } diff --git a/client/network/sync/Cargo.toml b/client/network/sync/Cargo.toml index 269214aeff3f7..24d418f7233d7 100644 --- a/client/network/sync/Cargo.toml +++ b/client/network/sync/Cargo.toml @@ -14,7 +14,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [build-dependencies] -prost-build = "0.10" +prost-build = "0.11" [dependencies] array-bytes = "4.1" @@ -25,7 +25,7 @@ futures = "0.3.21" libp2p = "0.46.1" log = "0.4.17" lru = "0.7.5" -prost = "0.10" +prost = "0.11" smallvec = "1.8.0" thiserror = "1.0" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } From 241b0d0455453499763d0db0b4ea4188012b372f Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Wed, 5 Oct 2022 00:16:07 +0200 Subject: [PATCH 207/348] Improved election pallet testing (#12327) * Improved election pallet testing * fmt * remove comment * more checks * fixes in logic * roll_to_signed * switch to roll_to_signed * Update frame/election-provider-multi-phase/src/mock.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * remove useless checks * remove warning * add checks to signed.rs * add some checks to unsigned.rs * fmt * use roll_to_signed and roll_to_unsigned * remove nonsense * remove even more nonsense * fix * fix * remove useless checks Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: parity-processbot <> --- .../election-provider-multi-phase/src/lib.rs | 161 +++++++++++++++--- .../election-provider-multi-phase/src/mock.rs | 11 ++ .../src/signed.rs | 130 +++++++++++--- .../src/unsigned.rs | 75 +++++--- 4 files changed, 305 insertions(+), 72 deletions(-) diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index fb17bd25ea541..3dc6161bb202a 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -1842,9 +1842,9 @@ mod tests { use super::*; use crate::{ mock::{ - multi_phase_events, raw_solution, roll_to, AccountId, ExtBuilder, MockWeightInfo, - MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxSubmissions, System, - TargetIndex, Targets, + multi_phase_events, raw_solution, roll_to, roll_to_signed, roll_to_unsigned, AccountId, + ExtBuilder, MockWeightInfo, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, + SignedMaxSubmissions, System, TargetIndex, Targets, }, Phase, }; @@ -1868,7 +1868,7 @@ mod tests { assert!(MultiPhase::snapshot().is_none()); assert_eq!(MultiPhase::round(), 1); - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert!(MultiPhase::snapshot().is_some()); @@ -1879,7 +1879,7 @@ mod tests { assert!(MultiPhase::snapshot().is_some()); assert_eq!(MultiPhase::round(), 1); - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); assert_eq!( multi_phase_events(), @@ -1912,11 +1912,29 @@ mod tests { roll_to(44); assert!(MultiPhase::current_phase().is_off()); - roll_to(45); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); roll_to(55); assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + }, + Event::SignedPhaseStarted { round: 2 }, + Event::UnsignedPhaseStarted { round: 2 } + ] + ); }) } @@ -1940,6 +1958,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); } @@ -1952,7 +1985,7 @@ mod tests { roll_to(19); assert!(MultiPhase::current_phase().is_off()); - roll_to(20); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert!(MultiPhase::snapshot().is_some()); @@ -1963,6 +1996,21 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ) }); } @@ -1985,6 +2033,14 @@ mod tests { assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -1993,16 +2049,13 @@ mod tests { // An early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // Signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); // An unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // We surely can't have any feasible solutions. This will cause an on-chain election. @@ -2031,10 +2084,8 @@ mod tests { // an early termination in the signed phase, with no queued solution. ExtBuilder::default().build_and_execute(|| { // signed phase started at block 15 and will end at 25. - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - roll_to(15); + roll_to_signed(); assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted { round: 1 }]); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!(MultiPhase::round(), 1); @@ -2052,7 +2103,6 @@ mod tests { } // an unexpected call to elect. - roll_to(20); assert_ok!(MultiPhase::elect()); // all storage items must be cleared. @@ -2062,16 +2112,38 @@ mod tests { assert!(MultiPhase::desired_targets().is_none()); assert!(MultiPhase::queued_solution().is_none()); assert!(MultiPhase::signed_submissions().is_empty()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::Slashed { account: 99, value: 5 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } #[test] fn check_events_with_compute_signed() { ExtBuilder::default().build_and_execute(|| { - roll_to(14); - assert_eq!(MultiPhase::current_phase(), Phase::Off); - - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -2106,7 +2178,7 @@ mod tests { #[test] fn check_events_with_compute_unsigned() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -2125,7 +2197,6 @@ mod tests { )); assert!(MultiPhase::queued_solution().is_some()); - roll_to(30); assert_ok!(MultiPhase::elect()); assert_eq!( @@ -2153,7 +2224,7 @@ mod tests { #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far, but we get a result. @@ -2166,11 +2237,27 @@ mod tests { (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) ] - ) + ); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }); ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2178,13 +2265,22 @@ mod tests { assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFailed + ] + ); }) } #[test] fn governance_fallback_works() { ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // Zilch solutions thus far. @@ -2243,9 +2339,16 @@ mod tests { assert_eq!(MultiPhase::current_phase(), Phase::Off); // On-chain backup works though. - roll_to(29); let supports = MultiPhase::elect().unwrap(); assert!(supports.len() > 0); + + assert_eq!( + multi_phase_events(), + vec![Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { minimal_stake: 0, sum_stake: 0, sum_stake_squared: 0 } + }] + ); }); } @@ -2269,6 +2372,8 @@ mod tests { let err = MultiPhase::elect().unwrap_err(); assert_eq!(err, ElectionError::Fallback("NoFallback.")); assert_eq!(MultiPhase::current_phase(), Phase::Emergency); + + assert_eq!(multi_phase_events(), vec![Event::ElectionFailed]); }); } @@ -2282,7 +2387,7 @@ mod tests { crate::mock::MaxElectingVoters::set(2); // Signed phase opens just fine. - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert_eq!( @@ -2295,7 +2400,7 @@ mod tests { #[test] fn untrusted_score_verification_is_respected() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); // set the solution balancing to get the desired score. diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index d3082be0cf750..2615d863c91e0 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -99,6 +99,17 @@ pub fn roll_to(n: BlockNumber) { } } +pub fn roll_to_unsigned() { + while !matches!(MultiPhase::current_phase(), Phase::Unsigned(_)) { + roll_to(System::block_number() + 1); + } +} +pub fn roll_to_signed() { + while !matches!(MultiPhase::current_phase(), Phase::Signed) { + roll_to(System::block_number() + 1); + } +} + pub fn roll_to_with_ocw(n: BlockNumber) { let now = System::block_number(); for i in now + 1..=n { diff --git a/frame/election-provider-multi-phase/src/signed.rs b/frame/election-provider-multi-phase/src/signed.rs index 2e01d99be0a42..175c92757f35e 100644 --- a/frame/election-provider-multi-phase/src/signed.rs +++ b/frame/election-provider-multi-phase/src/signed.rs @@ -528,10 +528,11 @@ mod tests { use super::*; use crate::{ mock::{ - balances, raw_solution, roll_to, Balances, ExtBuilder, MockedWeightInfo, MultiPhase, - Runtime, RuntimeOrigin, SignedMaxRefunds, SignedMaxSubmissions, SignedMaxWeight, + balances, multi_phase_events, raw_solution, roll_to, roll_to_signed, Balances, + ExtBuilder, MockedWeightInfo, MultiPhase, Runtime, RuntimeOrigin, SignedMaxRefunds, + SignedMaxSubmissions, SignedMaxWeight, }, - Error, Perbill, Phase, + Error, Event, Perbill, Phase, }; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; @@ -555,7 +556,7 @@ mod tests { #[test] fn should_pay_deposit() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -565,13 +566,21 @@ mod tests { assert_eq!(balances(&99), (95, 5)); assert_eq!(MultiPhase::signed_submissions().iter().next().unwrap().deposit, 5); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false } + ] + ); }) } #[test] fn good_solution_is_rewarded() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -582,13 +591,22 @@ mod tests { assert!(MultiPhase::finalize_signed_phase()); assert_eq!(balances(&99), (100 + 7 + 8, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn bad_solution_is_slashed() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -604,13 +622,22 @@ mod tests { assert!(!MultiPhase::finalize_signed_phase()); // and the bond is gone. assert_eq!(balances(&99), (95, 0)); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 99, value: 5 } + ] + ); }) } #[test] fn suppressed_solution_gets_bond_back() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = raw_solution(); @@ -633,13 +660,22 @@ mod tests { assert_eq!(balances(&99), (100 + 7 + 8, 0)); // 999 gets everything back, including the call fee. assert_eq!(balances(&999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } #[test] fn cannot_submit_worse_with_full_queue() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -667,7 +703,7 @@ mod tests { #[test] fn call_fee_refund_is_limited_by_signed_max_refunds() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(SignedMaxRefunds::get(), 1); assert!(SignedMaxSubmissions::get() > 2); @@ -683,7 +719,7 @@ mod tests { assert_eq!(balances(&account), (95, 5)); } - assert!(MultiPhase::finalize_signed_phase()); + assert_ok!(MultiPhase::do_elect()); for s in 0..SignedMaxSubmissions::get() { let account = 99 + s as u64; @@ -699,6 +735,26 @@ mod tests { assert_eq!(balances(&account), (100, 0)); } } + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 }, + Event::ElectionFinalized { + compute: ElectionCompute::Signed, + score: ElectionScore { + minimal_stake: 40, + sum_stake: 100, + sum_stake_squared: 5200 + } + } + ] + ); }); } @@ -708,7 +764,7 @@ mod tests { .signed_max_submission(1) .better_signed_threshold(Perbill::from_percent(20)) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let mut solution = RawSolution { @@ -747,13 +803,27 @@ mod tests { }; assert_ok!(MultiPhase::submit(RuntimeOrigin::signed(99), Box::new(solution))); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: false + }, + Event::SolutionStored { + compute: ElectionCompute::Signed, + prev_ejected: true + } + ] + ); }) } #[test] fn weakest_is_removed_if_better_provided() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -800,7 +870,7 @@ mod tests { #[test] fn replace_weakest_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 1..SignedMaxSubmissions::get() { @@ -847,7 +917,7 @@ mod tests { #[test] fn early_ejected_solution_gets_bond_back() { ExtBuilder::default().signed_deposit(2, 0, 0).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -878,7 +948,7 @@ mod tests { #[test] fn equally_good_solution_is_not_accepted() { ExtBuilder::default().signed_max_submission(3).build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for i in 0..SignedMaxSubmissions::get() { @@ -915,7 +985,7 @@ mod tests { // - bad_solution_is_slashed // - suppressed_solution_gets_bond_back ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); assert_eq!(balances(&99), (100, 0)); @@ -951,6 +1021,17 @@ mod tests { assert_eq!(balances(&999), (95, 0)); // 9999 gets everything back, including the call fee. assert_eq!(balances(&9999), (100 + 8, 0)); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Slashed { account: 999, value: 5 }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } @@ -960,7 +1041,7 @@ mod tests { .signed_weight(Weight::from_ref_time(40).set_proof_size(u64::MAX)) .mock_weight_info(MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -994,7 +1075,7 @@ mod tests { #[test] fn insufficient_deposit_does_not_store_submission() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1014,7 +1095,7 @@ mod tests { #[test] fn insufficient_deposit_with_full_queue_works_properly() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); for s in 0..SignedMaxSubmissions::get() { @@ -1060,7 +1141,7 @@ mod tests { #[test] fn finalize_signed_phase_is_idempotent_given_submissions() { ExtBuilder::default().build_and_execute(|| { - roll_to(15); + roll_to_signed(); assert!(MultiPhase::current_phase().is_signed()); let solution = raw_solution(); @@ -1073,6 +1154,15 @@ mod tests { // calling it again doesn't change anything assert_storage_noop!(MultiPhase::finalize_signed_phase()); + + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::SolutionStored { compute: ElectionCompute::Signed, prev_ejected: false }, + Event::Rewarded { account: 99, value: 7 } + ] + ); }) } } diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index 025ff832bb08a..7340605dfe621 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -1050,11 +1050,12 @@ mod tests { use super::*; use crate::{ mock::{ - roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, - MinerMaxWeight, MultiPhase, Runtime, RuntimeCall, RuntimeOrigin, System, - TestNposSolution, TrimHelpers, UnsignedPhase, + multi_phase_events, roll_to, roll_to_signed, roll_to_unsigned, roll_to_with_ocw, + trim_helpers, witness, BlockNumber, ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, + Runtime, RuntimeCall, RuntimeOrigin, System, TestNposSolution, TrimHelpers, + UnsignedPhase, }, - CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + CurrentPhase, Event, InvalidTransaction, Phase, QueuedSolution, TransactionSource, TransactionValidityError, }; use codec::Decode; @@ -1100,7 +1101,7 @@ mod tests { )); // signed - roll_to(15); + roll_to_signed(); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert!(matches!( ::validate_unsigned( @@ -1116,7 +1117,7 @@ mod tests { )); // unsigned - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert!(::validate_unsigned( @@ -1147,7 +1148,7 @@ mod tests { #[test] fn validate_unsigned_retracts_low_score() { ExtBuilder::default().desired_targets(0).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1193,7 +1194,7 @@ mod tests { #[test] fn validate_unsigned_retracts_incorrect_winner_count() { ExtBuilder::default().desired_targets(1).build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let raw = RawSolution:: { @@ -1222,7 +1223,7 @@ mod tests { .miner_tx_priority(20) .desired_targets(0) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let solution = RawSolution:: { @@ -1253,7 +1254,7 @@ mod tests { Some(\"PreDispatchWrongWinnerCount\") })")] fn unfeasible_solution_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This is in itself an invalid BS solution. @@ -1275,7 +1276,7 @@ mod tests { deprive validator from their authoring reward.")] fn wrong_witness_panics() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // This solution is unfeasible as well, but we won't even get there. @@ -1299,7 +1300,7 @@ mod tests { #[test] fn miner_works() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // ensure we have snapshots in place. @@ -1317,6 +1318,17 @@ mod tests { witness )); assert!(MultiPhase::queued_solution().is_some()); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::SolutionStored { + compute: ElectionCompute::Unsigned, + prev_ejected: false + } + ] + ); }) } @@ -1326,7 +1338,7 @@ mod tests { .miner_weight(Weight::from_ref_time(100).set_proof_size(u64::MAX)) .mock_weight_info(crate::mock::MockedWeightInfo::Basic) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); let (raw, witness) = MultiPhase::mine_solution().unwrap(); @@ -1360,7 +1372,7 @@ mod tests { fn miner_will_not_submit_if_not_enough_winners() { let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // Force the number of winners to be bigger to fail @@ -1386,7 +1398,7 @@ mod tests { .add_voter(8, 5, bounded_vec![10]) .better_unsigned_threshold(Perbill::from_percent(50)) .build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); assert_eq!(MultiPhase::desired_targets().unwrap(), 1); @@ -1488,7 +1500,7 @@ mod tests { ext.execute_with(|| { let offchain_repeat = ::OffchainRepeat::get(); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // first execution -- okay. @@ -1529,7 +1541,7 @@ mod tests { let guard = StorageValueRef::persistent(&OFFCHAIN_LOCK); let last_block = StorageValueRef::persistent(OFFCHAIN_LAST_BLOCK); - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // initially, the lock is not set. @@ -1550,7 +1562,7 @@ mod tests { // ensure that if the guard is in hold, a new execution is not allowed. let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert!(MultiPhase::current_phase().is_unsigned()); // artificially set the value, as if another thread is mid-way. @@ -1578,7 +1590,7 @@ mod tests { fn ocw_only_runs_when_unsigned_open_now() { let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); + roll_to_unsigned(); assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); // we must clear the offchain storage to ensure the offchain execution check doesn't get @@ -1658,6 +1670,21 @@ mod tests { // the submitted solution changes because the cache was cleared. assert_eq!(tx_cache_1, tx_cache_3); + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted { round: 1 }, + Event::UnsignedPhaseStarted { round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + } + ] + ); }) } @@ -1797,7 +1824,7 @@ mod tests { #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1822,7 +1849,7 @@ mod tests { #[test] fn trim_assignments_length_modifies_when_too_long() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); @@ -1848,7 +1875,7 @@ mod tests { #[test] fn trim_assignments_length_trims_lowest_stake() { ExtBuilder::default().build().execute_with(|| { - roll_to(25); + roll_to_unsigned(); // given let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = @@ -1911,7 +1938,7 @@ mod tests { // or when we trim it to zero. ExtBuilder::default().build_and_execute(|| { // we need snapshot for `trim_helpers` to work. - roll_to(25); + roll_to_unsigned(); let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); assert!(assignments.len() > 0); @@ -1933,7 +1960,7 @@ mod tests { #[test] fn mine_solution_solutions_always_within_acceptable_length() { ExtBuilder::default().build_and_execute(|| { - roll_to(25); + roll_to_unsigned(); // how long would the default solution be? let solution = MultiPhase::mine_solution().unwrap(); From 7a8de4995715cc6cd11a79eb262bf41e5b190943 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Wed, 5 Oct 2022 13:10:20 +0200 Subject: [PATCH 208/348] Adapt `pallet-contracts` to WeightV2 (#12421) * Replace contract access weight by proper PoV component * Return the whole weight struct from dry-runs * Fixup `seal_call` and `seal_instantiate` * Fix duplicate extrinsics * Remove ContractAccessWeight from runtime * Fix doc link * Remove leftover debugging output --- Cargo.lock | 1 + bin/node/runtime/src/lib.rs | 1 - frame/contracts/primitives/Cargo.toml | 1 + frame/contracts/primitives/src/lib.rs | 11 +- frame/contracts/src/gas.rs | 61 +++++---- frame/contracts/src/lib.rs | 181 +++++-------------------- frame/contracts/src/tests.rs | 36 ++--- frame/contracts/src/wasm/code_cache.rs | 15 +- 8 files changed, 96 insertions(+), 211 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a35dbba7d089e..309742e5bf17e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5531,6 +5531,7 @@ dependencies = [ "parity-scale-codec", "sp-runtime", "sp-std", + "sp-weights", ] [[package]] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c2d29731ea2e6..4898312f9608f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1175,7 +1175,6 @@ impl pallet_contracts::Config for Runtime { type DeletionWeightLimit = DeletionWeightLimit; type Schedule = Schedule; type AddressGenerator = pallet_contracts::DefaultAddressGenerator; - type ContractAccessWeight = pallet_contracts::DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } diff --git a/frame/contracts/primitives/Cargo.toml b/frame/contracts/primitives/Cargo.toml index 64e332007350b..c8b7c4a2f7c37 100644 --- a/frame/contracts/primitives/Cargo.toml +++ b/frame/contracts/primitives/Cargo.toml @@ -19,6 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = # Substrate Dependencies (This crate should not rely on frame) sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-weights = { version = "4.0.0", default-features = false, path = "../../../primitives/weights" } [features] default = ["std"] diff --git a/frame/contracts/primitives/src/lib.rs b/frame/contracts/primitives/src/lib.rs index 5daf875ac2651..4faea9eb3ee75 100644 --- a/frame/contracts/primitives/src/lib.rs +++ b/frame/contracts/primitives/src/lib.rs @@ -26,17 +26,18 @@ use sp_runtime::{ DispatchError, RuntimeDebug, }; use sp_std::prelude::*; +use sp_weights::Weight; /// Result type of a `bare_call` or `bare_instantiate` call. /// /// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct ContractResult { - /// How much gas was consumed during execution. - pub gas_consumed: u64, - /// How much gas is required as gas limit in order to execute this call. + /// How much weight was consumed during execution. + pub gas_consumed: Weight, + /// How much weight is required as gas limit in order to execute this call. /// - /// This value should be used to determine the gas limit for on-chain execution. + /// This value should be used to determine the weight limit for on-chain execution. /// /// # Note /// @@ -44,7 +45,7 @@ pub struct ContractResult { /// is used. Currently, only `seal_call_runtime` makes use of pre charging. /// Additionally, any `seal_call` or `seal_instantiate` makes use of pre-charging /// when a non-zero `gas_limit` argument is supplied. - pub gas_required: u64, + pub gas_required: Weight, /// How much balance was deposited and reserved during execution in order to pay for storage. /// /// The storage deposit is never actually charged from the caller in case of [`Self::result`] diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 215b4d42daa06..d0076652dd6d4 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -107,32 +107,45 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == Weight::zero() { self.gas_left } else { amount }; - // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. - if self.gas_left.any_lt(amount) { - Err(>::OutOfGas.into()) - } else { - self.gas_left -= amount; - Ok(GasMeter::new(amount)) - } + let amount = Weight::from_components( + if amount.ref_time().is_zero() { + self.gas_left().ref_time() + } else { + amount.ref_time() + }, + if amount.proof_size().is_zero() { + self.gas_left().proof_size() + } else { + amount.proof_size() + }, + ); + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| >::OutOfGas)?; + Ok(GasMeter::new(amount)) } /// Absorb the remaining gas of a nested meter after we are done using it. pub fn absorb_nested(&mut self, nested: Self) { - if self.gas_left == Weight::zero() { + if self.gas_left.ref_time().is_zero() { // All of the remaining gas was inherited by the nested gas meter. When absorbing // we can therefore safely inherit the lowest gas that the nested gas meter experienced // as long as it is lower than the lowest gas that was experienced by the parent. // We cannot call `self.gas_left_lowest()` here because in the state that this // code is run the parent gas meter has `0` gas left. - self.gas_left_lowest = nested.gas_left_lowest().min(self.gas_left_lowest); + *self.gas_left_lowest.ref_time_mut() = + nested.gas_left_lowest().ref_time().min(self.gas_left_lowest.ref_time()); } else { // The nested gas meter was created with a fixed amount that did not consume all of the // parents (self) gas. The lowest gas that self will experience is when the nested // gas was pre charged with the fixed amount. - self.gas_left_lowest = self.gas_left_lowest(); + *self.gas_left_lowest.ref_time_mut() = self.gas_left_lowest().ref_time(); + } + if self.gas_left.proof_size().is_zero() { + *self.gas_left_lowest.proof_size_mut() = + nested.gas_left_lowest().proof_size().min(self.gas_left_lowest.proof_size()); + } else { + *self.gas_left_lowest.proof_size_mut() = self.gas_left_lowest().proof_size(); } self.gas_left += nested.gas_left; } @@ -155,17 +168,11 @@ where ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } - let amount = token.weight(); - let new_value = self.gas_left.checked_sub(&amount); - - // We always consume the gas even if there is not enough gas. - self.gas_left = new_value.unwrap_or_else(Zero::zero); - - match new_value { - Some(_) => Ok(ChargedAmount(amount)), - None => Err(Error::::OutOfGas.into()), - } + // It is OK to not charge anything on failure because we always charge _before_ we perform + // any action + self.gas_left = self.gas_left.checked_sub(&amount).ok_or_else(|| Error::::OutOfGas)?; + Ok(ChargedAmount(amount)) } /// Adjust a previously charged amount down to its actual amount. @@ -298,20 +305,16 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Make sure that if the gas meter is charged by exceeding amount then not only an error - // returned for that charge, but also for all consequent charges. - // - // This is not strictly necessary, because the execution should be interrupted immediately - // if the gas meter runs out of gas. However, this is just a nice property to have. + // Make sure that the gas meter does not charge in case of overcharger #[test] - fn overcharge_is_unrecoverable() { + fn overcharge_does_not_charge() { let mut gas_meter = GasMeter::::new(Weight::from_ref_time(200)); // The first charge is should lead to OOG. assert!(gas_meter.charge(SimpleToken(300)).is_err()); - // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(SimpleToken(1)).is_err()); + // The gas meter should still contain the full 200. + assert!(gas_meter.charge(SimpleToken(200)).is_ok()); } // Charging the exact amount that the user paid for should be diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 3aeb8742705c2..0c90c3ff433b4 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -107,7 +107,7 @@ use crate::{ }; use codec::{Encode, HasCompact}; use frame_support::{ - dispatch::{DispatchClass, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, + dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, traits::{ tokens::fungible::Inspect, ConstU32, Contains, Currency, Get, Randomness, @@ -116,7 +116,7 @@ use frame_support::{ weights::{OldWeight, Weight}, BoundedVec, WeakBoundedVec, }; -use frame_system::{limits::BlockWeights, Pallet as System}; +use frame_system::Pallet as System; use pallet_contracts_primitives::{ Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue, @@ -199,29 +199,6 @@ where } } -/// A conservative implementation to be used for [`pallet::Config::ContractAccessWeight`]. -/// -/// This derives the weight from the [`BlockWeights`] passed as `B` and the `maxPovSize` passed -/// as `P`. The default value for `P` is the `maxPovSize` used by Polkadot and Kusama. -/// -/// It simply charges from the weight meter pro rata: If loading the contract code would consume -/// 50% of the max storage proof then this charges 50% of the max block weight. -pub struct DefaultContractAccessWeight, const P: u32 = 5_242_880>( - PhantomData, -); - -impl, const P: u32> Get for DefaultContractAccessWeight { - fn get() -> Weight { - let block_weights = B::get(); - block_weights - .per_class - .get(DispatchClass::Normal) - .max_total - .unwrap_or(block_weights.max_block) / - u64::from(P) - } -} - #[frame_support::pallet] pub mod pallet { use super::*; @@ -334,27 +311,6 @@ pub mod pallet { #[pallet::constant] type DepositPerByte: Get>; - /// The weight per byte of code that is charged when loading a contract from storage. - /// - /// Currently, FRAME only charges fees for computation incurred but not for PoV - /// consumption caused for storage access. This is usually not exploitable because - /// accessing storage carries some substantial weight costs, too. However in case - /// of contract code very much PoV consumption can be caused while consuming very little - /// computation. This could be used to keep the chain busy without paying the - /// proper fee for it. Until this is resolved we charge from the weight meter for - /// contract access. - /// - /// For more information check out: - /// - /// [`DefaultContractAccessWeight`] is a safe default to be used for Polkadot or Kusama - /// parachains. - /// - /// # Note - /// - /// This is only relevant for parachains. Set to zero in case of a standalone chain. - #[pallet::constant] - type ContractAccessWeight: Get; - /// The amount of balance a caller has to pay for each storage item. /// /// # Note @@ -413,23 +369,8 @@ pub mod pallet { T::AccountId: AsRef<[u8]>, as HasCompact>::Type: Clone + Eq + PartialEq + Debug + TypeInfo + Encode, { - /// Makes a call to an account, optionally transferring some balance. - /// - /// # Parameters - /// - /// * `dest`: Address of the contract to call. - /// * `value`: The balance to transfer from the `origin` to `dest`. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged from the - /// caller to pay for the storage consumed. - /// * `data`: The input data to pass to the contract. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - #[pallet::weight(T::WeightInfo::call().saturating_add((*gas_limit).into()))] + /// Deprecated version if [`Self::call`] for use in an in-storage `Call`. + #[pallet::weight(T::WeightInfo::call().saturating_add(>::compat_weight(*gas_limit)))] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `call`")] pub fn call_old_weight( @@ -440,55 +381,20 @@ pub mod pallet { storage_deposit_limit: Option< as codec::HasCompact>::Type>, data: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - let mut output = Self::internal_call( + Self::call( origin, dest, value, - gas_limit, - storage_deposit_limit.map(Into::into), + >::compat_weight(gas_limit), + storage_deposit_limit, data, - None, - ); - if let Ok(retval) = &output.result { - if retval.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) + ) } - /// Instantiates a new contract from the supplied `code` optionally transferring - /// some balance. - /// - /// This dispatchable has the same effect as calling [`Self::upload_code`] + - /// [`Self::instantiate`]. Bundling them together provides efficiency gains. Please - /// also check the documentation of [`Self::upload_code`]. - /// - /// # Parameters - /// - /// * `value`: The balance to transfer from the `origin` to the newly created contract. - /// * `gas_limit`: The gas limit enforced when executing the constructor. - /// * `storage_deposit_limit`: The maximum amount of balance that can be charged/reserved - /// from the caller to pay for the storage consumed. - /// * `code`: The contract code to deploy in raw bytes. - /// * `data`: The input data to pass to the contract constructor. - /// * `salt`: Used for the address derivation. See [`Pallet::contract_address`]. - /// - /// Instantiation is executed as follows: - /// - /// - The supplied `code` is instrumented, deployed, and a `code_hash` is created for that - /// code. - /// - If the `code_hash` already exists on the chain the underlying `code` will be shared. - /// - The destination address is computed based on the sender, code_hash and the salt. - /// - The smart-contract account is created at the computed address. - /// - The `value` is transferred to the new account. - /// - The `deploy` function is executed in the context of the newly-created account. + /// Deprecated version if [`Self::instantiate_with_code`] for use in an in-storage `Call`. #[pallet::weight( T::WeightInfo::instantiate_with_code(code.len() as u32, salt.len() as u32) - .saturating_add((*gas_limit).into()) + .saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated( @@ -503,38 +409,20 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let code_len = code.len() as u32; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate_with_code( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Upload(code), + >::compat_weight(gas_limit), + storage_deposit_limit, + code, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, result)| result), - T::WeightInfo::instantiate_with_code(code_len, salt_len), ) } - /// Instantiates a contract from a previously deployed wasm binary. - /// - /// This function is identical to [`Self::instantiate_with_code`] but without the - /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary - /// must be supplied. + /// Deprecated version if [`Self::instantiate`] for use in an in-storage `Call`. #[pallet::weight( - T::WeightInfo::instantiate(salt.len() as u32).saturating_add((*gas_limit).into()) + T::WeightInfo::instantiate(salt.len() as u32).saturating_add(>::compat_weight(*gas_limit)) )] #[allow(deprecated)] #[deprecated(note = "1D weight is used in this extrinsic, please migrate to `instantiate`")] @@ -547,27 +435,14 @@ pub mod pallet { data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { - let gas_limit: Weight = gas_limit.into(); - let origin = ensure_signed(origin)?; - let salt_len = salt.len() as u32; - let mut output = Self::internal_instantiate( + Self::instantiate( origin, value, - gas_limit, - storage_deposit_limit.map(Into::into), - Code::Existing(code_hash), + >::compat_weight(gas_limit), + storage_deposit_limit, + code_hash, data, salt, - None, - ); - if let Ok(retval) = &output.result { - if retval.1.did_revert() { - output.result = Err(>::ContractReverted.into()); - } - } - output.gas_meter.into_dispatch_result( - output.result.map(|(_address, output)| output), - T::WeightInfo::instantiate(salt_len), ) } @@ -1059,8 +934,8 @@ where ); ContractExecResult { result: output.result.map_err(|r| r.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1104,8 +979,8 @@ where .result .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) .map_err(|e| e.error), - gas_consumed: output.gas_meter.gas_consumed().ref_time(), - gas_required: output.gas_meter.gas_required().ref_time(), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), storage_deposit: output.storage_deposit, debug_message: debug_message.unwrap_or_default(), } @@ -1287,4 +1162,12 @@ where fn min_balance() -> BalanceOf { >>::minimum_balance() } + + /// Convert a 1D Weight to a 2D weight. + /// + /// Used by backwards compatible extrinsics. We cannot just set the proof to zero + /// or an old `Call` will just fail. + fn compat_weight(gas_limit: OldWeight) -> Weight { + Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index b4a8f8f4c834f..6a2144840143a 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -26,8 +26,8 @@ use crate::{ tests::test_utils::{get_contract, get_contract_checked}, wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, - DefaultContractAccessWeight, DeletionQueue, Error, Pallet, Schedule, + BalanceOf, Code, CodeStorage, Config, ContractInfoOf, DefaultAddressGenerator, DeletionQueue, + Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; @@ -404,7 +404,6 @@ impl Config for Test { type DepositPerByte = DepositPerByte; type DepositPerItem = DepositPerItem; type AddressGenerator = DefaultAddressGenerator; - type ContractAccessWeight = DefaultContractAccessWeight; type MaxCodeLen = ConstU32<{ 128 * 1024 }>; type MaxStorageKeyLen = ConstU32<128>; } @@ -414,7 +413,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(u64::MAX); +pub const GAS_LIMIT: Weight = Weight::from_ref_time(100_000_000_000).set_proof_size(256 * 1024); pub struct ExtBuilder { existential_deposit: u64, @@ -674,7 +673,7 @@ fn run_out_of_gas() { RuntimeOrigin::signed(ALICE), addr, // newly created account 0, - Weight::from_ref_time(1_000_000_000_000), + Weight::from_ref_time(1_000_000_000_000).set_proof_size(u64::MAX), None, vec![], ), @@ -1760,7 +1759,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 42); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 42); let result = Contracts::bare_call( ALICE, addr.clone(), @@ -1771,7 +1770,7 @@ fn chain_extension_works() { false, ); assert_ok!(result.result); - assert_eq!(result.gas_consumed, gas_consumed + 95); + assert_eq!(result.gas_consumed.ref_time(), gas_consumed.ref_time() + 95); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer let result = Contracts::bare_call( @@ -2409,10 +2408,11 @@ fn reinstrument_does_charge() { let result2 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, None, zero.clone(), false); assert!(!result2.result.unwrap().did_revert()); - assert!(result2.gas_consumed > result1.gas_consumed); + assert!(result2.gas_consumed.ref_time() > result1.gas_consumed.ref_time()); assert_eq!( - result2.gas_consumed, - result1.gas_consumed + ::WeightInfo::reinstrument(code_len).ref_time(), + result2.gas_consumed.ref_time(), + result1.gas_consumed.ref_time() + + ::WeightInfo::reinstrument(code_len).ref_time(), ); }); } @@ -2536,7 +2536,7 @@ fn gas_estimation_nested_call_fixed_limit() { assert_ok!(&result.result); // We have a subcall with a fixed gas limit. This constitutes precharging. - assert!(result.gas_required > result.gas_consumed); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the estimated gas. Should succeed. assert_ok!( @@ -2544,7 +2544,7 @@ fn gas_estimation_nested_call_fixed_limit() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, Some(result.storage_deposit.charge_or_zero()), input, false, @@ -2557,6 +2557,7 @@ fn gas_estimation_nested_call_fixed_limit() { #[test] #[cfg(feature = "unstable-interface")] fn gas_estimation_call_runtime() { + use codec::Decode; let (caller_code, caller_hash) = compile_module::("call_runtime").unwrap(); let (callee_code, callee_hash) = compile_module::("dummy").unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -2591,7 +2592,7 @@ fn gas_estimation_call_runtime() { let call = RuntimeCall::Contracts(crate::Call::call { dest: addr_callee, value: 0, - gas_limit: GAS_LIMIT / 3, + gas_limit: GAS_LIMIT.set_ref_time(GAS_LIMIT.ref_time() / 3), storage_deposit_limit: None, data: vec![], }); @@ -2604,9 +2605,10 @@ fn gas_estimation_call_runtime() { call.encode(), false, ); - assert_ok!(&result.result); - - assert!(result.gas_required > result.gas_consumed); + // contract encodes the result of the dispatch runtime + let outcome = u32::decode(&mut result.result.unwrap().data.as_ref()).unwrap(); + assert_eq!(outcome, 0); + assert!(result.gas_required.ref_time() > result.gas_consumed.ref_time()); // Make the same call using the required gas. Should succeed. assert_ok!( @@ -2614,7 +2616,7 @@ fn gas_estimation_call_runtime() { ALICE, addr_caller, 0, - Weight::from_ref_time(result.gas_required).set_proof_size(u64::MAX), + result.gas_required, None, call.encode(), false, diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 137eccf3db686..09e51d981360b 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -228,16 +228,11 @@ impl Token for CodeToken { // contract code. This is why we subtract `T::*::(0)`. We need to do this at this // point because when charging the general weight for calling the contract we not know the // size of the contract. - let ref_time_weight = match *self { + match *self { Reinstrument(len) => T::WeightInfo::reinstrument(len), - Load(len) => { - let computation = T::WeightInfo::call_with_code_per_byte(len) - .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)); - let bandwidth = T::ContractAccessWeight::get().saturating_mul(len as u64); - computation.max(bandwidth) - }, - }; - - ref_time_weight + Load(len) => T::WeightInfo::call_with_code_per_byte(len) + .saturating_sub(T::WeightInfo::call_with_code_per_byte(0)) + .set_proof_size(len.into()), + } } } From 2cd40882d27ef10437c691b2705e67fa97f7c074 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 5 Oct 2022 17:11:50 +0300 Subject: [PATCH 209/348] MMR: impl `TypeInfo` for some structures (#12423) * BEEFY client: avoid unnecessary clone * MMR: impl TypeInfo for some structures --- Cargo.lock | 1 + client/beefy/rpc/src/lib.rs | 2 +- client/beefy/src/worker.rs | 2 +- primitives/beefy/src/mmr.rs | 4 ++-- primitives/merkle-mountain-range/Cargo.toml | 1 + primitives/merkle-mountain-range/src/lib.rs | 5 +++-- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 309742e5bf17e..647511373cc31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9999,6 +9999,7 @@ dependencies = [ "array-bytes", "log", "parity-scale-codec", + "scale-info", "serde", "sp-api", "sp-core", diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 0af474116e6d0..6f21abc616db8 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -84,7 +84,7 @@ impl From for JsonRpseeError { // Provides RPC methods for interacting with BEEFY. #[rpc(client, server)] pub trait BeefyApi { - /// Returns the block most recently finalized by BEEFY, alongside side its justification. + /// Returns the block most recently finalized by BEEFY, alongside its justification. #[subscription( name = "beefy_subscribeJustifications" => "beefy_justifications", unsubscribe = "beefy_unsubscribeJustifications", diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 832b43315515f..5bdc72357c412 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -508,7 +508,7 @@ where if let Err(e) = self.backend.append_justification( BlockId::Number(block_num), - (BEEFY_ENGINE_ID, finality_proof.clone().encode()), + (BEEFY_ENGINE_ID, finality_proof.encode()), ) { error!(target: "beefy", "🥩 Error {:?} on appending justification: {:?}", e, finality_proof); } diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 761eee9f8ef85..471cb96841b8e 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -44,7 +44,7 @@ impl BeefyDataProvider> for () { } /// A standard leaf that gets added every block to the MMR constructed by Substrate's `pallet_mmr`. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeaf { /// Version of the leaf format. /// @@ -73,7 +73,7 @@ pub struct MmrLeaf { /// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be /// still decoded, the new fields will simply be ignored). We expect the major version to be bumped /// very rarely (hopefuly never). -#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode)] +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] pub struct MmrLeafVersion(u8); impl MmrLeafVersion { /// Create new version object from `major` and `minor` components. diff --git a/primitives/merkle-mountain-range/Cargo.toml b/primitives/merkle-mountain-range/Cargo.toml index e7e203942e845..0be53132f3eec 100644 --- a/primitives/merkle-mountain-range/Cargo.toml +++ b/primitives/merkle-mountain-range/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } diff --git a/primitives/merkle-mountain-range/src/lib.rs b/primitives/merkle-mountain-range/src/lib.rs index c40a594739ec1..7a26cae839ea9 100644 --- a/primitives/merkle-mountain-range/src/lib.rs +++ b/primitives/merkle-mountain-range/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] +use scale_info::TypeInfo; use sp_debug_derive::RuntimeDebug; use sp_runtime::traits; #[cfg(not(feature = "std"))] @@ -69,7 +70,7 @@ impl OnNewRoot for () { } /// A MMR proof data for one of the leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] pub struct Proof { /// The index of the leaf the proof is for. pub leaf_index: LeafIndex, @@ -352,7 +353,7 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3); impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); /// A MMR proof data for a group of leaves. -#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] pub struct BatchProof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, From 4cd3248c6ac9e29a45e2b52b92859cbe12769500 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 5 Oct 2022 16:45:56 +0200 Subject: [PATCH 210/348] Don't send back empty proofs if light request fails (#12372) --- .../src/light_client_requests/handler.rs | 50 ++++++++----------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/client/network/light/src/light_client_requests/handler.rs b/client/network/light/src/light_client_requests/handler.rs index 727a9b0d7e820..5efdc3ff6a18b 100644 --- a/client/network/light/src/light_client_requests/handler.rs +++ b/client/network/light/src/light_client_requests/handler.rs @@ -28,7 +28,7 @@ use futures::{channel::mpsc, prelude::*}; use libp2p::PeerId; use log::{debug, trace}; use prost::Message; -use sc_client_api::{BlockBackend, ProofProvider, StorageProof}; +use sc_client_api::{BlockBackend, ProofProvider}; use sc_network_common::{ config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, @@ -176,12 +176,15 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = + let response = match self .client .execution_proof(&BlockId::Hash(block), &request.method, &request.data) { - Ok((_, proof)) => proof, + Ok((_, proof)) => { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteCallResponse(r)) + }, Err(e) => { trace!( "remote call request from {} ({} at {:?}) failed with: {}", @@ -190,16 +193,11 @@ where request.block, e, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteCallResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } fn on_remote_read_request( @@ -221,11 +219,14 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self + let response = match self .client .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => proof, + Ok(proof) => { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteReadResponse(r)) + }, Err(error) => { trace!( "remote read request from {} ({} at {:?}) failed with: {}", @@ -234,16 +235,11 @@ where request.block, error, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } fn on_remote_read_child_request( @@ -271,14 +267,17 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err(sp_blockchain::Error::InvalidChildStorageKey), }; - let proof = match child_info.and_then(|child_info| { + let response = match child_info.and_then(|child_info| { self.client.read_child_proof( &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), ) }) { - Ok(proof) => proof, + Ok(proof) => { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + Some(schema::v1::light::response::Response::RemoteReadResponse(r)) + }, Err(error) => { trace!( "remote read child request from {} ({} {} at {:?}) failed with: {}", @@ -288,16 +287,11 @@ where request.block, error, ); - StorageProof::empty() + None }, }; - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; - schema::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) + Ok(schema::v1::light::Response { response }) } } From 87224cf2cdafbacd0acac33c43a1063c02e02147 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:11:05 +0200 Subject: [PATCH 211/348] Implement `Clone` and `Default` for `Config` (#12397) * Implement `Clone` and `Default` for `Config` * `cargo fmt` * Remove default config implementation --- client/executor/wasmtime/src/runtime.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 5925a1792aef2..e3509351022bc 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -544,6 +544,7 @@ pub struct Semantics { pub max_memory_size: Option, } +#[derive(Clone)] pub struct Config { /// The WebAssembly standard requires all imports of an instantiated module to be resolved, /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is From eefba93cf62ff80e1011dbe1cd3a543b711f7bb9 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Wed, 5 Oct 2022 19:21:37 +0100 Subject: [PATCH 212/348] Bound uses of `Call` (#11649) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Introduce preimages module in traits * Multisize Preimages * Len not actually necessary * Tweaks to the preimage API * Fixes * Get Scheduler building with new API * Scheduler tests pass * Bounded Scheduler 🎉 * Use Agenda holes and introduce IncompleteSince to avoid need to reschedule * Tests pass with new weight system * New benchmarks * Add missing file * Drop preimage when permenantly overeight * Drop preimage when permenantly overeight * Referenda uses latest preimage API * Testing ok * Adding tests Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Add preimage migration Signed-off-by: Oliver Tale-Yazdi * Docs * Remove dbg * Refactor Democracy * Refactor Democracy * Add final MEL * Remove silly maps * Fixes * Minor refactor * Formatting * Fixes * Fixes * Fixes * Update frame/preimage/src/lib.rs Co-authored-by: Shawn Tabrizi * Add migrations to Democracy * WIP Signed-off-by: Oliver Tale-Yazdi * Resolve conflicts Signed-off-by: Oliver Tale-Yazdi * Revert "Resolve conflicts" This reverts commit a89cd0a073a7aabbb8ce6bcb12d4687717aac60e. * Undo wrong resolves... Signed-off-by: Oliver Tale-Yazdi * WIP Signed-off-by: Oliver Tale-Yazdi * Make compile Signed-off-by: Oliver Tale-Yazdi * massage clippy Signed-off-by: Oliver Tale-Yazdi * More clippy Signed-off-by: Oliver Tale-Yazdi * clippy annoyance Signed-off-by: Oliver Tale-Yazdi * clippy annoyance Signed-off-by: Oliver Tale-Yazdi * Fix benchmarks Signed-off-by: Oliver Tale-Yazdi * add missing file * Test Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Clippy harassment Signed-off-by: Oliver Tale-Yazdi * Add test Signed-off-by: Oliver Tale-Yazdi * clippy Signed-off-by: Oliver Tale-Yazdi * Fixup tests Signed-off-by: Oliver Tale-Yazdi * Remove old stuff Signed-off-by: Oliver Tale-Yazdi * fmt Signed-off-by: Oliver Tale-Yazdi * Test trait functions Signed-off-by: Oliver Tale-Yazdi * Update pallet-ui tests Why is this needed? Should not be the case unless master is broken... Signed-off-by: Oliver Tale-Yazdi * More scheduler trait test Signed-off-by: Oliver Tale-Yazdi * More tests Signed-off-by: Oliver Tale-Yazdi * Apply review suggestion Signed-off-by: Oliver Tale-Yazdi * Beauty fixes Signed-off-by: Oliver Tale-Yazdi * Add Scheduler test migration_v3_to_v4_works Signed-off-by: Oliver Tale-Yazdi * Merge fixup Signed-off-by: Oliver Tale-Yazdi * Keep referenda benchmarks instantiatable Signed-off-by: Oliver Tale-Yazdi * Update weights Signed-off-by: Oliver Tale-Yazdi * Use new scheduler weight functions Signed-off-by: Oliver Tale-Yazdi * Use new democracy weight functions Signed-off-by: Oliver Tale-Yazdi * Use weight compare functions Signed-off-by: Oliver Tale-Yazdi * Update pallet-ui tests Signed-off-by: Oliver Tale-Yazdi * More renaming… Signed-off-by: Oliver Tale-Yazdi * More renaming… Signed-off-by: Oliver Tale-Yazdi * Add comment Signed-off-by: Oliver Tale-Yazdi * Implement OnRuntimeUpgrade for scheduler::v3_to_v4 migration Put the migration into a proper `MigrateToV4` struct and implement the OnRuntimeUpgrade hooks for it. Also move the test to use that instead. This should make it easier for adding it to Polkadot. Signed-off-by: Oliver Tale-Yazdi * Clippy Signed-off-by: Oliver Tale-Yazdi * Handle undecodable Agendas Signed-off-by: Oliver Tale-Yazdi * Remove trash Signed-off-by: Oliver Tale-Yazdi * Fix test Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade functions Signed-off-by: Oliver Tale-Yazdi * fix test Signed-off-by: Oliver Tale-Yazdi * Fix BoundedSlice::truncate_from Co-authored-by: jakoblell Signed-off-by: Oliver Tale-Yazdi * Fix pre_upgrade hook return values Signed-off-by: Oliver Tale-Yazdi * Add more error logging Signed-off-by: Oliver Tale-Yazdi * Find too large preimages in the pre_upgrade hook Signed-off-by: Oliver Tale-Yazdi * Test that too large Calls in agendas are ignored Signed-off-by: Oliver Tale-Yazdi * Use new OnRuntimeUpgrade hooks Why did the CI not catch this?! Signed-off-by: Oliver Tale-Yazdi * works fine - just more logs Signed-off-by: Oliver Tale-Yazdi * Fix staking migration Causing issues on Kusama... Signed-off-by: Oliver Tale-Yazdi * Fix UI tests No idea why this is needed. This is actually undoing an earlier change. Maybe the CI has different rustc versions!? Signed-off-by: Oliver Tale-Yazdi * Remove multisig's Calls (#12072) * Remove multisig's Calls * Multisig: Fix tests and re-introduce reserve logic (#12241) * Fix tests and re-introduce reserve logic * fix benches * add todo * remove irrelevant bench * [Feature] Add a migration that drains and refunds stored calls (#12313) * [Feature] Add a migration that drains and refunds stored calls * migration fixes * fixes * address review comments * consume the whole block weight * fix assertions * license header * fix interface Co-authored-by: parity-processbot <> Co-authored-by: parity-processbot <> Co-authored-by: Roman Useinov * Fix test Signed-off-by: Oliver Tale-Yazdi * Fix multisig benchmarks Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/bench-bot.sh" pallet dev pallet_democracy * ".git/.scripts/bench-bot.sh" pallet dev pallet_scheduler * ".git/.scripts/bench-bot.sh" pallet dev pallet_preimage Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi Co-authored-by: parity-processbot <> Co-authored-by: Roman Useinov --- Cargo.lock | 4 + bin/node/runtime/src/lib.rs | 16 +- frame/bounties/src/lib.rs | 2 +- frame/contracts/src/storage.rs | 2 +- frame/conviction-voting/src/lib.rs | 2 +- frame/democracy/Cargo.toml | 7 +- frame/democracy/src/benchmarking.rs | 394 ++--- frame/democracy/src/conviction.rs | 16 +- frame/democracy/src/lib.rs | 746 +++------ frame/democracy/src/migrations.rs | 236 +++ frame/democracy/src/tests.rs | 76 +- frame/democracy/src/tests/cancellation.rs | 30 +- frame/democracy/src/tests/decoders.rs | 40 +- frame/democracy/src/tests/delegation.rs | 8 +- .../democracy/src/tests/external_proposing.rs | 104 +- frame/democracy/src/tests/fast_tracking.rs | 27 +- frame/democracy/src/tests/lock_voting.rs | 36 +- frame/democracy/src/tests/preimage.rs | 237 --- frame/democracy/src/tests/public_proposals.rs | 61 +- frame/democracy/src/tests/scheduling.rs | 35 +- frame/democracy/src/tests/voting.rs | 10 +- frame/democracy/src/types.rs | 28 +- frame/democracy/src/vote.rs | 48 +- frame/democracy/src/vote_threshold.rs | 6 +- frame/democracy/src/weights.rs | 346 ++-- frame/identity/src/tests.rs | 2 +- frame/multisig/Cargo.toml | 3 + frame/multisig/src/benchmarking.rs | 124 +- frame/multisig/src/lib.rs | 148 +- frame/multisig/src/migrations.rs | 86 + frame/multisig/src/tests.rs | 293 +--- frame/nicks/src/lib.rs | 4 +- frame/preimage/Cargo.toml | 6 +- frame/preimage/src/benchmarking.rs | 26 +- frame/preimage/src/lib.rs | 237 ++- frame/preimage/src/migration.rs | 263 +++ frame/preimage/src/mock.rs | 1 - frame/preimage/src/tests.rs | 279 +++- frame/preimage/src/weights.rs | 124 +- frame/recovery/src/lib.rs | 4 +- frame/referenda/src/benchmarking.rs | 16 +- frame/referenda/src/lib.rs | 104 +- frame/referenda/src/mock.rs | 18 +- frame/referenda/src/tests.rs | 32 +- frame/referenda/src/types.rs | 22 +- frame/scheduler/src/benchmarking.rs | 323 ++-- frame/scheduler/src/lib.rs | 984 +++++++---- frame/scheduler/src/migration.rs | 402 +++++ frame/scheduler/src/mock.rs | 56 +- frame/scheduler/src/tests.rs | 1482 ++++++++++++----- frame/scheduler/src/weights.rs | 316 ++-- .../src/construct_runtime/expand/origin.rs | 27 +- frame/support/src/dispatch.rs | 21 +- frame/support/src/traits.rs | 15 +- frame/support/src/traits/dispatch.rs | 38 +- frame/support/src/traits/misc.rs | 2 +- frame/support/src/traits/preimages.rs | 317 ++++ frame/support/src/traits/schedule.rs | 99 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 6 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 6 +- .../pallet_ui/storage_info_unsatisfied.stderr | 2 +- .../storage_info_unsatisfied_nmap.stderr | 2 +- frame/system/src/lib.rs | 5 +- frame/whitelist/src/mock.rs | 1 - primitives/core/src/bounded/bounded_vec.rs | 30 +- primitives/runtime/src/lib.rs | 55 +- test-utils/runtime/src/lib.rs | 35 +- 67 files changed, 5067 insertions(+), 3466 deletions(-) create mode 100644 frame/democracy/src/migrations.rs delete mode 100644 frame/democracy/src/tests/preimage.rs create mode 100644 frame/multisig/src/migrations.rs create mode 100644 frame/preimage/src/migration.rs create mode 100644 frame/scheduler/src/migration.rs create mode 100644 frame/support/src/traits/preimages.rs diff --git a/Cargo.lock b/Cargo.lock index 647511373cc31..53f370a930626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5581,7 +5581,9 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", + "pallet-preimage", "pallet-scheduler", "parity-scale-codec", "scale-info", @@ -5899,6 +5901,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6065,6 +6068,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "scale-info", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4898312f9608f..d10448cc2d183 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -340,8 +340,6 @@ impl pallet_proxy::Config for Runtime { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; - // Retry a scheduled item every 10 blocks (1 minute) until the preimage exists. - pub const NoPreimagePostponement: Option = Some(10); } impl pallet_scheduler::Config for Runtime { @@ -351,11 +349,10 @@ impl pallet_scheduler::Config for Runtime { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = ConstU32<50>; + type MaxScheduledPerBlock = ConstU32<512>; type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = NoPreimagePostponement; + type Preimages = Preimage; } parameter_types! { @@ -370,7 +367,6 @@ impl pallet_preimage::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = PreimageMaxSize; type BaseDeposit = PreimageBaseDeposit; type ByteDeposit = PreimageByteDeposit; } @@ -862,6 +858,7 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; + type Preimages = Preimage; } impl pallet_referenda::Config for Runtime { @@ -881,6 +878,7 @@ impl pallet_referenda::Config for Runtime { type UndecidingTimeout = UndecidingTimeout; type AlarmInterval = AlarmInterval; type Tracks = TracksInfo; + type Preimages = Preimage; } impl pallet_ranked_collective::Config for Runtime { @@ -909,7 +907,6 @@ parameter_types! { } impl pallet_democracy::Config for Runtime { - type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type Currency = Balances; type EnactmentPeriod = EnactmentPeriod; @@ -949,14 +946,15 @@ impl pallet_democracy::Config for Runtime { // only do it once and it lasts only for the cool-off period. type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; - type PreimageByteDeposit = PreimageByteDeposit; - type OperationalPreimageOrigin = pallet_collective::EnsureMember; type Slash = Treasury; type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = ConstU32<100>; type WeightInfo = pallet_democracy::weights::SubstrateWeight; type MaxProposals = MaxProposals; + type Preimages = Preimage; + type MaxDeposits = ConstU32<100>; + type MaxBlacklisted = ConstU32<100>; } parameter_types! { diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index b95940a2835ce..d947226f87fa0 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -819,7 +819,7 @@ impl, I: 'static> Pallet { value: BalanceOf, ) -> DispatchResult { let bounded_description: BoundedVec<_, _> = - description.try_into().map_err(|()| Error::::ReasonTooBig)?; + description.try_into().map_err(|_| Error::::ReasonTooBig)?; ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); let index = Self::bounty_count(); diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index c3fc0840d8649..cf10c3225c920 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -340,7 +340,7 @@ where let queue: Vec = (0..T::DeletionQueueDepth::get()) .map(|_| DeletedContract { trie_id: TrieId::default() }) .collect(); - let bounded: BoundedVec<_, _> = queue.try_into().unwrap(); + let bounded: BoundedVec<_, _> = queue.try_into().map_err(|_| ()).unwrap(); >::put(bounded); } } diff --git a/frame/conviction-voting/src/lib.rs b/frame/conviction-voting/src/lib.rs index 534941d6f7f66..b876a9354ee59 100644 --- a/frame/conviction-voting/src/lib.rs +++ b/frame/conviction-voting/src/lib.rs @@ -400,7 +400,7 @@ impl, I: 'static> Pallet { Err(i) => { votes .try_insert(i, (poll_index, vote)) - .map_err(|()| Error::::MaxVotesReached)?; + .map_err(|_| Error::::MaxVotesReached)?; }, } // Shouldn't be possible to fail, but we handle it gracefully. diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index f0ab3162c892b..e50d39ff76902 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -24,11 +24,13 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } +log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } pallet-scheduler = { version = "4.0.0-dev", path = "../scheduler" } -sp-core = { version = "6.0.0", path = "../../primitives/core" } +pallet-preimage = { version = "4.0.0-dev", path = "../preimage" } [features] default = ["std"] @@ -42,6 +44,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "sp-core/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", @@ -49,4 +52,4 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = ["frame-support/try-runtime",] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index ab7ee3331e319..424192e2521da 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -22,24 +22,16 @@ use super::*; use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_noop, assert_ok, - codec::Decode, - traits::{ - schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, - }, + traits::{Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable}, }; -use frame_system::{Pallet as System, RawOrigin}; -use sp_runtime::traits::{BadOrigin, Bounded, One}; +use frame_system::RawOrigin; +use sp_core::H256; +use sp_runtime::{traits::Bounded, BoundedVec}; use crate::Pallet as Democracy; +const REFERENDUM_COUNT_HINT: u32 = 10; const SEED: u32 = 0; -const MAX_REFERENDUMS: u32 = 99; -const MAX_SECONDERS: u32 = 100; -const MAX_BYTES: u32 = 16_384; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - frame_system::Pallet::::assert_last_event(generic_event.into()); -} fn funded_account(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); @@ -49,37 +41,32 @@ fn funded_account(name: &'static str, index: u32) -> T::AccountId { caller } -fn add_proposal(n: u32) -> Result { +fn make_proposal(n: u32) -> BoundedCallOf { + let call: CallOf = frame_system::Call::remark { remark: n.encode() }.into(); + ::Preimages::bound(call).unwrap() +} + +fn add_proposal(n: u32) -> Result { let other = funded_account::("proposer", n); let value = T::MinimumDeposit::get(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&n); - - Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value)?; - - Ok(proposal_hash) + let proposal = make_proposal::(n); + Democracy::::propose(RawOrigin::Signed(other).into(), proposal.clone(), value)?; + Ok(proposal.hash()) } -fn add_referendum(n: u32) -> Result { - let proposal_hash: T::Hash = T::Hashing::hash_of(&n); +fn add_referendum(n: u32) -> (ReferendumIndex, H256) { let vote_threshold = VoteThreshold::SimpleMajority; - - Democracy::::inject_referendum( - T::LaunchPeriod::get(), - proposal_hash, - vote_threshold, - 0u32.into(), - ); - let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; - T::Scheduler::schedule_named( - (DEMOCRACY_ID, referendum_index).encode(), - DispatchTime::At(2u32.into()), - None, - 63, - frame_system::RawOrigin::Root.into(), - Call::enact_proposal { proposal_hash, index: referendum_index }.into(), + let proposal = make_proposal::(n); + let hash = proposal.hash(); + ( + Democracy::::inject_referendum( + T::LaunchPeriod::get(), + proposal, + vote_threshold, + 0u32.into(), + ), + hash, ) - .map_err(|_| "failed to schedule named")?; - Ok(referendum_index) } fn account_vote(b: BalanceOf) -> AccountVote> { @@ -97,95 +84,90 @@ benchmarks! { } let caller = funded_account::("caller", 0); - let proposal_hash: T::Hash = T::Hashing::hash_of(&0); + let proposal = make_proposal::(0); let value = T::MinimumDeposit::get(); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash, value) + }: _(RawOrigin::Signed(caller), proposal, value) verify { assert_eq!(Democracy::::public_props().len(), p as usize, "Proposals not created."); } second { - let s in 0 .. MAX_SECONDERS; - let caller = funded_account::("caller", 0); - let proposal_hash = add_proposal::(s)?; + add_proposal::(0)?; // Create s existing "seconds" - for i in 0 .. s { + // we must reserve one deposit for the `proposal` and one for our benchmarked `second` call. + for i in 0 .. T::MaxDeposits::get() - 2 { let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0, u32::MAX)?; + Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; } let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (s + 1) as usize, "Seconds not recorded"); + assert_eq!(deposits.0.len(), (T::MaxDeposits::get() - 1) as usize, "Seconds not recorded"); whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), 0, u32::MAX) + }: _(RawOrigin::Signed(caller), 0) verify { let deposits = Democracy::::deposit_of(0).ok_or("Proposal not created")?; - assert_eq!(deposits.0.len(), (s + 2) as usize, "`second` benchmark did not work"); + assert_eq!(deposits.0.len(), (T::MaxDeposits::get()) as usize, "`second` benchmark did not work"); } vote_new { - let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + for i in 0 .. T::MaxVotes::get() - 1 { + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), r as usize, "Votes were not recorded."); + assert_eq!(votes.len(), (T::MaxVotes::get() - 1) as usize, "Votes were not recorded."); - let referendum_index = add_referendum::(r)?; + let ref_index = add_referendum::(T::MaxVotes::get() - 1).0; whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), referendum_index, account_vote) + }: vote(RawOrigin::Signed(caller.clone()), ref_index, account_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Vote was not recorded."); + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was not recorded."); } vote_existing { - let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); // We need to create existing direct votes - for i in 0 ..=r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + for i in 0..T::MaxVotes::get() { + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Votes were not recorded."); + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Votes were not recorded."); // Change vote from aye to nay let nay = Vote { aye: false, conviction: Conviction::Locked1x }; let new_vote = AccountVote::Standard { vote: nay, balance: 1000u32.into() }; - let referendum_index = Democracy::::referendum_count() - 1; + let ref_index = Democracy::::referendum_count() - 1; // This tests when a user changes a vote whitelist_account!(caller); - }: vote(RawOrigin::Signed(caller.clone()), referendum_index, new_vote) + }: vote(RawOrigin::Signed(caller.clone()), ref_index, new_vote) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, _ => return Err("Votes are not direct".into()), }; - assert_eq!(votes.len(), (r + 1) as usize, "Vote was incorrectly added"); - let referendum_info = Democracy::::referendum_info(referendum_index) + assert_eq!(votes.len(), T::MaxVotes::get() as usize, "Vote was incorrectly added"); + let referendum_info = Democracy::::referendum_info(ref_index) .ok_or("referendum doesn't exist")?; let tally = match referendum_info { ReferendumInfo::Ongoing(r) => r.tally, @@ -196,61 +178,55 @@ benchmarks! { emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); - let referendum_index = add_referendum::(0)?; - assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, referendum_index) + let ref_index = add_referendum::(0).0; + assert_ok!(Democracy::::referendum_status(ref_index)); + }: _(origin, ref_index) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(referendum_index), + Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid, ); } blacklist { - let p in 1 .. T::MaxProposals::get(); - // Place our proposal at the end to make sure it's worst case. - for i in 0 .. p - 1 { + for i in 0 .. T::MaxProposals::get() - 1 { add_proposal::(i)?; } // We should really add a lot of seconds here, but we're not doing it elsewhere. + // Add a referendum of our proposal. + let (ref_index, hash) = add_referendum::(0); + assert_ok!(Democracy::::referendum_status(ref_index)); // Place our proposal in the external queue, too. - let hash = T::Hashing::hash_of(&0); assert_ok!( - Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash) + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), make_proposal::(0)) ); let origin = T::BlacklistOrigin::successful_origin(); - // Add a referendum of our proposal. - let referendum_index = add_referendum::(0)?; - assert_ok!(Democracy::::referendum_status(referendum_index)); - }: _(origin, hash, Some(referendum_index)) + }: _(origin, hash, Some(ref_index)) verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(referendum_index), + Democracy::::referendum_status(ref_index), Error::::ReferendumInvalid ); } // Worst case scenario, we external propose a previously blacklisted proposal external_propose { - let v in 1 .. MAX_VETOERS as u32; - let origin = T::ExternalOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); + let proposal = make_proposal::(0); // Add proposal to blacklist with block number 0 - let addresses = (0..v) + let addresses: BoundedVec<_, _> = (0..(T::MaxBlacklisted::get() - 1)) .into_iter() .map(|i| account::("blacklist", i, SEED)) - .collect::>(); - Blacklist::::insert( - proposal_hash, - (T::BlockNumber::zero(), addresses), - ); - }: _(origin, proposal_hash) + .collect::>() + .try_into() + .unwrap(); + Blacklist::::insert(proposal.hash(), (T::BlockNumber::zero(), addresses)); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -258,8 +234,8 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + let proposal = make_proposal::(0); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -267,8 +243,8 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&0); - }: _(origin, proposal_hash) + let proposal = make_proposal::(0); + }: _(origin, proposal) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -276,8 +252,9 @@ benchmarks! { fast_track { let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&0); - Democracy::::external_propose_default(origin_propose, proposal_hash)?; + let proposal = make_proposal::(0); + let proposal_hash = proposal.hash(); + Democracy::::external_propose_default(origin_propose, proposal)?; // NOTE: Instant origin may invoke a little bit more logic, but may not always succeed. let origin_fast_track = T::FastTrackOrigin::successful_origin(); @@ -289,17 +266,15 @@ benchmarks! { } veto_external { - // Existing veto-ers - let v in 0 .. MAX_VETOERS as u32; - - let proposal_hash: T::Hash = T::Hashing::hash_of(&v); + let proposal = make_proposal::(0); + let proposal_hash = proposal.hash(); let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - Democracy::::external_propose_default(origin_propose, proposal_hash)?; + Democracy::::external_propose_default(origin_propose, proposal)?; - let mut vetoers: Vec = Vec::new(); - for i in 0 .. v { - vetoers.push(account::("vetoer", i, SEED)); + let mut vetoers: BoundedVec = Default::default(); + for i in 0 .. (T::MaxBlacklisted::get() - 1) { + vetoers.try_push(account::("vetoer", i, SEED)).unwrap(); } vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); @@ -310,42 +285,27 @@ benchmarks! { verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; - assert_eq!(new_vetoers.len(), (v + 1) as usize, "vetoers not added"); + assert_eq!(new_vetoers.len(), T::MaxBlacklisted::get() as usize, "vetoers not added"); } cancel_proposal { - let p in 1 .. T::MaxProposals::get(); - // Place our proposal at the end to make sure it's worst case. - for i in 0 .. p { + for i in 0 .. T::MaxProposals::get() { add_proposal::(i)?; } - let cancel_origin = T::CancelProposalOrigin::successful_origin(); }: _(cancel_origin, 0) cancel_referendum { - let referendum_index = add_referendum::(0)?; - }: _(RawOrigin::Root, referendum_index) - - cancel_queued { - let r in 1 .. MAX_REFERENDUMS; - - for i in 0..r { - add_referendum::(i)?; // This add one element in the scheduler - } - - let referendum_index = add_referendum::(r)?; - }: _(RawOrigin::Root, referendum_index) + let ref_index = add_referendum::(0).0; + }: _(RawOrigin::Root, ref_index) - // This measures the path of `launch_next` external. Not currently used as we simply - // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_external { - let r in 0 .. MAX_REFERENDUMS; + let r in 0 .. REFERENDUM_COUNT_HINT; for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -354,8 +314,8 @@ benchmarks! { LastTabledWasExternal::::put(false); let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&r); - let call = Call::::external_propose_majority { proposal_hash }; + let proposal = make_proposal::(r); + let call = Call::::external_propose_majority { proposal }; call.dispatch_bypass_filter(origin)?; // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -379,14 +339,12 @@ benchmarks! { } } - // This measures the path of `launch_next` public. Not currently used as we simply - // assume the weight is `MaxBlockWeight` when executing. #[extra] on_initialize_public { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); @@ -415,10 +373,10 @@ benchmarks! { // No launch no maturing referenda. on_initialize_base { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -445,10 +403,10 @@ benchmarks! { } on_initialize_base_with_launch_period { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); for i in 0..r { - add_referendum::(i)?; + add_referendum::(i); } for (key, mut info) in ReferendumInfoOf::::iter() { @@ -477,7 +435,7 @@ benchmarks! { } delegate { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -504,8 +462,8 @@ benchmarks! { let account_vote = account_vote::(initial_balance); // We need to create existing direct votes for the `new_delegate` for i in 0..r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(new_delegate.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&new_delegate) { Voting::Direct { votes, .. } => votes, @@ -529,7 +487,7 @@ benchmarks! { } undelegate { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let initial_balance: BalanceOf = 100u32.into(); let delegated_balance: BalanceOf = 1000u32.into(); @@ -553,10 +511,10 @@ benchmarks! { // We need to create votes direct votes for the `delegate` let account_vote = account_vote::(initial_balance); for i in 0..r { - let ref_idx = add_referendum::(i)?; + let ref_index = add_referendum::(i).0; Democracy::::vote( RawOrigin::Signed(the_delegate.clone()).into(), - ref_idx, + ref_index, account_vote )?; } @@ -580,71 +538,9 @@ benchmarks! { }: _(RawOrigin::Root) - note_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let caller = funded_account::("caller", 0); - let encoded_proposal = vec![1; b as usize]; - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - } - - note_imminent_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - // d + 1 to include the one we are testing - let encoded_proposal = vec![1; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - let block_number = T::BlockNumber::one(); - Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); - - let caller = funded_account::("caller", 0); - let encoded_proposal = vec![1; b as usize]; - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), encoded_proposal.clone()) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - } - - reap_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let encoded_proposal = vec![1; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - - let submitter = funded_account::("submitter", b); - Democracy::::note_preimage(RawOrigin::Signed(submitter).into(), encoded_proposal.clone())?; - - // We need to set this otherwise we get `Early` error. - let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); - System::::set_block_number(block_number); - - assert!(Preimages::::contains_key(proposal_hash)); - - let caller = funded_account::("caller", 0); - whitelist_account!(caller); - }: _(RawOrigin::Signed(caller), proposal_hash, u32::MAX) - verify { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - assert!(!Preimages::::contains_key(proposal_hash)); - } - // Test when unlock will remove locks unlock_remove { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); @@ -653,9 +549,9 @@ benchmarks! { let small_vote = account_vote::(base_balance); // Vote and immediately unvote for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_idx)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; } let caller = funded_account::("caller", 0); @@ -669,7 +565,7 @@ benchmarks! { // Test when unlock will set a new value unlock_set { - let r in 1 .. MAX_REFERENDUMS; + let r in 0 .. (T::MaxVotes::get() - 1); let locker = funded_account::("locker", 0); let locker_lookup = T::Lookup::unlookup(locker.clone()); @@ -677,14 +573,14 @@ benchmarks! { let base_balance: BalanceOf = 100u32.into(); let small_vote = account_vote::(base_balance); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_idx, small_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, small_vote)?; } // Create a big vote so lock increases let big_vote = account_vote::(base_balance * 10u32.into()); - let referendum_index = add_referendum::(r)?; - Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), referendum_index, big_vote)?; + let ref_index = add_referendum::(r).0; + Democracy::::vote(RawOrigin::Signed(locker.clone()).into(), ref_index, big_vote)?; let votes = match VotingOf::::get(&locker) { Voting::Direct { votes, .. } => votes, @@ -695,7 +591,7 @@ benchmarks! { let voting = VotingOf::::get(&locker); assert_eq!(voting.locked_balance(), base_balance * 10u32.into()); - Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), referendum_index)?; + Democracy::::remove_vote(RawOrigin::Signed(locker.clone()).into(), ref_index)?; let caller = funded_account::("caller", 0); whitelist_account!(caller); @@ -709,18 +605,18 @@ benchmarks! { let voting = VotingOf::::get(&locker); // Note that we may want to add a `get_lock` api to actually verify - assert_eq!(voting.locked_balance(), base_balance); + assert_eq!(voting.locked_balance(), if r > 0 { base_balance } else { 0u32.into() }); } remove_vote { - let r in 1 .. MAX_REFERENDUMS; + let r in 1 .. T::MaxVotes::get(); let caller = funded_account::("caller", 0); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -729,9 +625,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let referendum_index = r - 1; + let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), referendum_index) + }: _(RawOrigin::Signed(caller.clone()), ref_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -742,15 +638,15 @@ benchmarks! { // Worst case is when target == caller and referendum is ongoing remove_other_vote { - let r in 1 .. MAX_REFERENDUMS; + let r in 1 .. T::MaxVotes::get(); let caller = funded_account::("caller", r); let caller_lookup = T::Lookup::unlookup(caller.clone()); let account_vote = account_vote::(100u32.into()); for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote)?; + let ref_index = add_referendum::(i).0; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_index, account_vote)?; } let votes = match VotingOf::::get(&caller) { @@ -759,9 +655,9 @@ benchmarks! { }; assert_eq!(votes.len(), r as usize, "Votes not created"); - let referendum_index = r - 1; + let ref_index = r - 1; whitelist_account!(caller); - }: _(RawOrigin::Signed(caller.clone()), caller_lookup, referendum_index) + }: _(RawOrigin::Signed(caller.clone()), caller_lookup, ref_index) verify { let votes = match VotingOf::::get(&caller) { Voting::Direct { votes, .. } => votes, @@ -770,54 +666,6 @@ benchmarks! { assert_eq!(votes.len(), (r - 1) as usize, "Vote was not removed"); } - #[extra] - enact_proposal_execute { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let proposer = funded_account::("proposer", 0); - let raw_call = Call::note_preimage { encoded_proposal: vec![1; b as usize] }; - let generic_call: T::Proposal = raw_call.into(); - let encoded_proposal = generic_call.encode(); - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; - - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - }: enact_proposal(RawOrigin::Root, proposal_hash, 0) - verify { - // Fails due to mismatched origin - assert_last_event::(Event::::Executed { ref_index: 0, result: Err(BadOrigin.into()) }.into()); - } - - #[extra] - enact_proposal_slash { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; - - let proposer = funded_account::("proposer", 0); - // Random invalid bytes - let encoded_proposal = vec![200; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Democracy::::note_preimage(RawOrigin::Signed(proposer).into(), encoded_proposal)?; - - match Preimages::::get(proposal_hash) { - Some(PreimageStatus::Available { .. }) => (), - _ => return Err("preimage not available".into()) - } - let origin = RawOrigin::Root.into(); - let call = Call::::enact_proposal { proposal_hash, index: 0 }.encode(); - }: { - assert_eq!( - as Decode>::decode(&mut &*call) - .expect("call is encoded above, encoding must be correct") - .dispatch_bypass_filter(origin), - Err(Error::::PreimageInvalid.into()) - ); - } - impl_benchmark_test_suite!( Democracy, crate::tests::new_test_ext(), diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index 57d631e8c1f4c..a938d8a4e6852 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -18,7 +18,7 @@ //! The conviction datatype. use crate::types::Delegations; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedDiv, CheckedMul, Zero}, @@ -27,7 +27,19 @@ use sp_runtime::{ use sp_std::{prelude::*, result::Result}; /// A value denoting the strength of conviction of a vote. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo)] +#[derive( + Encode, + MaxEncodedLen, + Decode, + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + TypeInfo, +)] pub enum Conviction { /// 0.1x votes, unlocked. None, diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 3c1be19103998..cf954d4800eee 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -152,21 +152,20 @@ #![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, Input}; +use codec::{Decode, Encode}; use frame_support::{ ensure, traits::{ defensive_prelude::*, - schedule::{DispatchTime, Named as ScheduleNamed}, - BalanceStatus, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, - ReservableCurrency, WithdrawReasons, + schedule::{v3::Named as ScheduleNamed, DispatchTime}, + Bounded, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, QueryPreimage, + ReservableCurrency, StorePreimage, WithdrawReasons, }, weights::Weight, }; -use scale_info::TypeInfo; use sp_runtime::{ - traits::{Bounded, Dispatchable, Hash, Saturating, StaticLookup, Zero}, - ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, + traits::{Bounded as ArithBounded, One, Saturating, StaticLookup, Zero}, + ArithmeticError, DispatchError, DispatchResult, }; use sp_std::prelude::*; @@ -188,12 +187,9 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; -const DEMOCRACY_ID: LockIdentifier = *b"democrac"; +pub mod migrations; -/// The maximum number of vetoers on a single proposal used to compute Weight. -/// -/// NOTE: This is not enforced by any logic. -pub const MAX_VETOERS: u32 = 100; +const DEMOCRACY_ID: LockIdentifier = *b"democrac"; /// A proposal index. pub type PropIndex = u32; @@ -206,58 +202,36 @@ type BalanceOf = type NegativeImbalanceOf = <::Currency as Currency< ::AccountId, >>::NegativeImbalance; +pub type CallOf = ::RuntimeCall; +pub type BoundedCallOf = Bounded>; type AccountIdLookupOf = <::Lookup as StaticLookup>::Source; -#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] -pub enum PreimageStatus { - /// The preimage is imminently needed at the argument. - Missing(BlockNumber), - /// The preimage is available. - Available { - data: Vec, - provider: AccountId, - deposit: Balance, - since: BlockNumber, - /// None if it's not imminent. - expiry: Option, - }, -} - -impl PreimageStatus { - fn to_missing_expiry(self) -> Option { - match self { - PreimageStatus::Missing(expiry) => Some(expiry), - _ => None, - } - } -} - -// A value placed in storage that represents the current version of the Democracy storage. -// This value is used by the `on_runtime_upgrade` logic to determine whether we run -// storage migration logic. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] -enum Releases { - V1, -} - #[frame_support::pallet] pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_core::H256; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + Sized { - type Proposal: Parameter - + Dispatchable - + From>; + type WeightInfo: WeightInfo; type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The Scheduler. + type Scheduler: ScheduleNamed, Self::PalletsOrigin>; + + /// The Preimage provider. + type Preimages: QueryPreimage + StorePreimage; + /// Currency type for this pallet. type Currency: ReservableCurrency + LockableCurrency; @@ -289,6 +263,39 @@ pub mod pallet { #[pallet::constant] type MinimumDeposit: Get>; + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may + /// want to set this permanently to `false`, others may want to condition it on things such + /// as an upgrade having happened recently. + #[pallet::constant] + type InstantAllowed: Get; + + /// Minimum voting period allowed for a fast-track referendum. + #[pallet::constant] + type FastTrackVotingPeriod: Get; + + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + #[pallet::constant] + type CooloffPeriod: Get; + + /// The maximum number of votes for an account. + /// + /// Also used to compute weight, an overly big value can + /// lead to extrinsic with very big weight: see `delegate` for instance. + #[pallet::constant] + type MaxVotes: Get; + + /// The maximum number of public proposals that can exist at any time. + #[pallet::constant] + type MaxProposals: Get; + + /// The maximum number of deposits a public proposal may have at any time. + #[pallet::constant] + type MaxDeposits: Get; + + /// The maximum number of items which can be blacklisted. + #[pallet::constant] + type MaxBlacklisted: Get; + /// Origin from which the next tabled referendum may be forced. This is a normal /// "super-majority-required" referendum. type ExternalOrigin: EnsureOrigin; @@ -311,16 +318,6 @@ pub mod pallet { /// origin. It retains its threshold method. type InstantOrigin: EnsureOrigin; - /// Indicator for whether an emergency origin is even allowed to happen. Some chains may - /// want to set this permanently to `false`, others may want to condition it on things such - /// as an upgrade having happened recently. - #[pallet::constant] - type InstantAllowed: Get; - - /// Minimum voting period allowed for a fast-track referendum. - #[pallet::constant] - type FastTrackVotingPeriod: Get; - /// Origin from which any referendum may be cancelled in an emergency. type CancellationOrigin: EnsureOrigin; @@ -331,79 +328,39 @@ pub mod pallet { type CancelProposalOrigin: EnsureOrigin; /// Origin for anyone able to veto proposals. - /// - /// # Warning - /// - /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to - /// [MAX_VETOERS](./const.MAX_VETOERS.html) type VetoOrigin: EnsureOrigin; - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - #[pallet::constant] - type CooloffPeriod: Get; - - /// The amount of balance that must be deposited per byte of preimage stored. - #[pallet::constant] - type PreimageByteDeposit: Get>; - - /// An origin that can provide a preimage using operational extrinsics. - type OperationalPreimageOrigin: EnsureOrigin; - - /// Handler for the unbalanced reduction when slashing a preimage deposit. - type Slash: OnUnbalanced>; - - /// The Scheduler. - type Scheduler: ScheduleNamed; - /// Overarching type of all pallets origins. type PalletsOrigin: From>; - /// The maximum number of votes for an account. - /// - /// Also used to compute weight, an overly big value can - /// lead to extrinsic with very big weight: see `delegate` for instance. - #[pallet::constant] - type MaxVotes: Get; - - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; - - /// The maximum number of public proposals that can exist at any time. - #[pallet::constant] - type MaxProposals: Get; + /// Handler for the unbalanced reduction when slashing a preimage deposit. + type Slash: OnUnbalanced>; } - // TODO: Refactor public proposal queue into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. #[pallet::storage] #[pallet::getter(fn public_prop_count)] pub type PublicPropCount = StorageValue<_, PropIndex, ValueQuery>; - /// The public proposals. Unsorted. The second item is the proposal's hash. + /// The public proposals. Unsorted. The second item is the proposal. #[pallet::storage] #[pallet::getter(fn public_props)] - pub type PublicProps = - StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; + pub type PublicProps = StorageValue< + _, + BoundedVec<(PropIndex, BoundedCallOf, T::AccountId), T::MaxProposals>, + ValueQuery, + >; /// Those who have locked a deposit. /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] #[pallet::getter(fn deposit_of)] - pub type DepositOf = - StorageMap<_, Twox64Concat, PropIndex, (Vec, BalanceOf)>; - - /// Map of hashes to the proposal preimage, along with who registered it and their deposit. - /// The block number is the block at which it was deposited. - // TODO: Refactor Preimages into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - #[pallet::storage] - pub type Preimages = StorageMap< + pub type DepositOf = StorageMap< _, - Identity, - T::Hash, - PreimageStatus, T::BlockNumber>, + Twox64Concat, + PropIndex, + (BoundedVec, BalanceOf), >; /// The next free referendum index, aka the number of referenda started so far. @@ -426,7 +383,7 @@ pub mod pallet { _, Twox64Concat, ReferendumIndex, - ReferendumInfo>, + ReferendumInfo, BalanceOf>, >; /// All votes for a particular voter. We store the balance for the number of votes that we @@ -438,14 +395,12 @@ pub mod pallet { _, Twox64Concat, T::AccountId, - Voting, T::AccountId, T::BlockNumber>, + Voting, T::AccountId, T::BlockNumber, T::MaxVotes>, ValueQuery, >; /// True if the last referendum tabled was submitted externally. False if it was a public /// proposal. - // TODO: There should be any number of tabling origins, not just public and "external" - // (council). https://github.com/paritytech/substrate/issues/5322 #[pallet::storage] pub type LastTabledWasExternal = StorageValue<_, bool, ValueQuery>; @@ -454,23 +409,21 @@ pub mod pallet { /// - `LastTabledWasExternal` is `false`; or /// - `PublicProps` is empty. #[pallet::storage] - pub type NextExternal = StorageValue<_, (T::Hash, VoteThreshold)>; + pub type NextExternal = StorageValue<_, (BoundedCallOf, VoteThreshold)>; /// A record of who vetoed what. Maps proposal hash to a possible existent block number /// (until when it may not be resubmitted) and who vetoed it. #[pallet::storage] - pub type Blacklist = - StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; + pub type Blacklist = StorageMap< + _, + Identity, + H256, + (T::BlockNumber, BoundedVec), + >; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] - pub type Cancellations = StorageMap<_, Identity, T::Hash, bool, ValueQuery>; - - /// Storage version of the pallet. - /// - /// New networks start with last version. - #[pallet::storage] - pub(crate) type StorageVersion = StorageValue<_, Releases>; + pub type Cancellations = StorageMap<_, Identity, H256, bool, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -490,7 +443,6 @@ pub mod pallet { PublicPropCount::::put(0 as PropIndex); ReferendumCount::::put(0 as ReferendumIndex); LowestUnbaked::::put(0 as ReferendumIndex); - StorageVersion::::put(Releases::V1); } } @@ -500,7 +452,7 @@ pub mod pallet { /// A motion has been proposed by a public account. Proposed { proposal_index: PropIndex, deposit: BalanceOf }, /// A public proposal has been tabled for referendum vote. - Tabled { proposal_index: PropIndex, deposit: BalanceOf, depositors: Vec }, + Tabled { proposal_index: PropIndex, deposit: BalanceOf }, /// An external proposal has been tabled. ExternalTabled, /// A referendum has begun. @@ -511,31 +463,14 @@ pub mod pallet { NotPassed { ref_index: ReferendumIndex }, /// A referendum has been cancelled. Cancelled { ref_index: ReferendumIndex }, - /// A proposal has been enacted. - Executed { ref_index: ReferendumIndex, result: DispatchResult }, /// An account has delegated their vote to another account. Delegated { who: T::AccountId, target: T::AccountId }, /// An account has cancelled a previous delegation operation. Undelegated { account: T::AccountId }, /// An external proposal has been vetoed. - Vetoed { who: T::AccountId, proposal_hash: T::Hash, until: T::BlockNumber }, - /// A proposal's preimage was noted, and the deposit taken. - PreimageNoted { proposal_hash: T::Hash, who: T::AccountId, deposit: BalanceOf }, - /// A proposal preimage was removed and used (the deposit was returned). - PreimageUsed { proposal_hash: T::Hash, provider: T::AccountId, deposit: BalanceOf }, - /// A proposal could not be executed because its preimage was invalid. - PreimageInvalid { proposal_hash: T::Hash, ref_index: ReferendumIndex }, - /// A proposal could not be executed because its preimage was missing. - PreimageMissing { proposal_hash: T::Hash, ref_index: ReferendumIndex }, - /// A registered preimage was removed and the deposit collected by the reaper. - PreimageReaped { - proposal_hash: T::Hash, - provider: T::AccountId, - deposit: BalanceOf, - reaper: T::AccountId, - }, + Vetoed { who: T::AccountId, proposal_hash: H256, until: T::BlockNumber }, /// A proposal_hash has been blacklisted permanently. - Blacklisted { proposal_hash: T::Hash }, + Blacklisted { proposal_hash: H256 }, /// An account has voted in a referendum Voted { voter: T::AccountId, ref_index: ReferendumIndex, vote: AccountVote> }, /// An account has secconded a proposal @@ -564,20 +499,8 @@ pub mod pallet { NoProposal, /// Identity may not veto a proposal twice AlreadyVetoed, - /// Preimage already noted - DuplicatePreimage, - /// Not imminent - NotImminent, - /// Too early - TooEarly, - /// Imminent - Imminent, - /// Preimage not found - PreimageMissing, /// Vote given for invalid referendum ReferendumInvalid, - /// Invalid preimage - PreimageInvalid, /// No proposals waiting NoneWaiting, /// The given account did not vote on the referendum. @@ -601,8 +524,8 @@ pub mod pallet { WrongUpperBound, /// Maximum number of votes reached. MaxVotesReached, - /// Maximum number of proposals reached. - TooManyProposals, + /// Maximum number of items reached. + TooMany, /// Voting period too low VotingPeriodLow, } @@ -626,12 +549,10 @@ pub mod pallet { /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). /// /// Emits `Proposed`. - /// - /// Weight: `O(p)` #[pallet::weight(T::WeightInfo::propose())] pub fn propose( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, #[pallet::compact] value: BalanceOf, ) -> DispatchResult { let who = ensure_signed(origin)?; @@ -640,7 +561,8 @@ pub mod pallet { let index = Self::public_prop_count(); let real_prop_count = PublicProps::::decode_len().unwrap_or(0) as u32; let max_proposals = T::MaxProposals::get(); - ensure!(real_prop_count < max_proposals, Error::::TooManyProposals); + ensure!(real_prop_count < max_proposals, Error::::TooMany); + let proposal_hash = proposal.hash(); if let Some((until, _)) = >::get(proposal_hash) { ensure!( @@ -650,10 +572,14 @@ pub mod pallet { } T::Currency::reserve(&who, value)?; + + let depositors = BoundedVec::<_, T::MaxDeposits>::truncate_from(vec![who.clone()]); + DepositOf::::insert(index, (depositors, value)); + PublicPropCount::::put(index + 1); - >::insert(index, (&[&who][..], value)); - >::append((index, proposal_hash, who)); + PublicProps::::try_append((index, proposal, who)) + .map_err(|_| Error::::TooMany)?; Self::deposit_event(Event::::Proposed { proposal_index: index, deposit: value }); Ok(()) @@ -665,23 +591,19 @@ pub mod pallet { /// must have funds to cover the deposit, equal to the original deposit. /// /// - `proposal`: The index of the proposal to second. - /// - `seconds_upper_bound`: an upper bound on the current number of seconds on this - /// proposal. Extrinsic is weighted according to this value with no refund. - /// - /// Weight: `O(S)` where S is the number of seconds a proposal already has. - #[pallet::weight(T::WeightInfo::second(*seconds_upper_bound))] + #[pallet::weight(T::WeightInfo::second())] pub fn second( origin: OriginFor, #[pallet::compact] proposal: PropIndex, - #[pallet::compact] seconds_upper_bound: u32, ) -> DispatchResult { let who = ensure_signed(origin)?; let seconds = Self::len_of_deposit_of(proposal).ok_or(Error::::ProposalMissing)?; - ensure!(seconds <= seconds_upper_bound, Error::::WrongUpperBound); + ensure!(seconds < T::MaxDeposits::get(), Error::::TooMany); let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; - deposit.0.push(who.clone()); + let ok = deposit.0.try_push(who.clone()).is_ok(); + debug_assert!(ok, "`seconds` is below static limit; `try_insert` should succeed; qed"); >::insert(proposal, deposit); Self::deposit_event(Event::::Seconded { seconder: who, prop_index: proposal }); Ok(()) @@ -694,12 +616,7 @@ pub mod pallet { /// /// - `ref_index`: The index of the referendum to vote for. /// - `vote`: The vote configuration. - /// - /// Weight: `O(R)` where R is the number of referendums the voter has voted on. - #[pallet::weight( - T::WeightInfo::vote_new(T::MaxVotes::get()) - .max(T::WeightInfo::vote_existing(T::MaxVotes::get())) - )] + #[pallet::weight(T::WeightInfo::vote_new().max(T::WeightInfo::vote_existing()))] pub fn vote( origin: OriginFor, #[pallet::compact] ref_index: ReferendumIndex, @@ -725,7 +642,7 @@ pub mod pallet { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; - let h = status.proposal_hash; + let h = status.proposal.hash(); ensure!(!>::contains_key(h), Error::::AlreadyCanceled); >::insert(h, true); @@ -739,20 +656,20 @@ pub mod pallet { /// The dispatch origin of this call must be `ExternalOrigin`. /// /// - `proposal_hash`: The preimage hash of the proposal. - /// - /// Weight: `O(V)` with V number of vetoers in the blacklist of proposal. - /// Decoding vec of length V. Charged as maximum - #[pallet::weight(T::WeightInfo::external_propose(MAX_VETOERS))] - pub fn external_propose(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + #[pallet::weight(T::WeightInfo::external_propose())] + pub fn external_propose( + origin: OriginFor, + proposal: BoundedCallOf, + ) -> DispatchResult { T::ExternalOrigin::ensure_origin(origin)?; ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal_hash) { + if let Some((until, _)) = >::get(proposal.hash()) { ensure!( >::block_number() >= until, Error::::ProposalBlacklisted, ); } - >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); + >::put((proposal, VoteThreshold::SuperMajorityApprove)); Ok(()) } @@ -770,10 +687,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_majority())] pub fn external_propose_majority( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SimpleMajority)); + >::put((proposal, VoteThreshold::SimpleMajority)); Ok(()) } @@ -791,10 +708,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::external_propose_default())] pub fn external_propose_default( origin: OriginFor, - proposal_hash: T::Hash, + proposal: BoundedCallOf, ) -> DispatchResult { T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); + >::put((proposal, VoteThreshold::SuperMajorityAgainst)); Ok(()) } @@ -805,7 +722,7 @@ pub mod pallet { /// The dispatch of this call must be `FastTrackOrigin`. /// /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. + /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to /// Must be always greater than zero. /// For `FastTrackOrigin` must be equal or greater than `FastTrackVotingPeriod`. /// - `delay`: The number of block after voting has ended in approval and this should be @@ -817,7 +734,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::fast_track())] pub fn fast_track( origin: OriginFor, - proposal_hash: T::Hash, + proposal_hash: H256, voting_period: T::BlockNumber, delay: T::BlockNumber, ) -> DispatchResult { @@ -836,20 +753,21 @@ pub mod pallet { T::InstantOrigin::ensure_origin(ensure_instant)?; ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } + ensure!(voting_period > T::BlockNumber::zero(), Error::::VotingPeriodLow); - let (e_proposal_hash, threshold) = + let (ext_proposal, threshold) = >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, ); - ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); + ensure!(proposal_hash == ext_proposal.hash(), Error::::InvalidHash); >::kill(); let now = >::block_number(); Self::inject_referendum( now.saturating_add(voting_period), - proposal_hash, + ext_proposal, threshold, delay, ); @@ -865,22 +783,24 @@ pub mod pallet { /// Emits `Vetoed`. /// /// Weight: `O(V + log(V))` where V is number of `existing vetoers` - #[pallet::weight(T::WeightInfo::veto_external(MAX_VETOERS))] - pub fn veto_external(origin: OriginFor, proposal_hash: T::Hash) -> DispatchResult { + #[pallet::weight(T::WeightInfo::veto_external())] + pub fn veto_external(origin: OriginFor, proposal_hash: H256) -> DispatchResult { let who = T::VetoOrigin::ensure_origin(origin)?; - if let Some((e_proposal_hash, _)) = >::get() { - ensure!(proposal_hash == e_proposal_hash, Error::::ProposalMissing); + if let Some((ext_proposal, _)) = NextExternal::::get() { + ensure!(proposal_hash == ext_proposal.hash(), Error::::ProposalMissing); } else { return Err(Error::::NoProposal.into()) } let mut existing_vetoers = - >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_else(Vec::new); + >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_default(); let insert_position = existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; + existing_vetoers + .try_insert(insert_position, who.clone()) + .map_err(|_| Error::::TooMany)?; - existing_vetoers.insert(insert_position, who.clone()); let until = >::block_number().saturating_add(T::CooloffPeriod::get()); >::insert(&proposal_hash, (until, existing_vetoers)); @@ -907,21 +827,6 @@ pub mod pallet { Ok(()) } - /// Cancel a proposal queued for enactment. - /// - /// The dispatch origin of this call must be _Root_. - /// - /// - `which`: The index of the referendum to cancel. - /// - /// Weight: `O(D)` where `D` is the items in the dispatch queue. Weighted as `D = 10`. - #[pallet::weight((T::WeightInfo::cancel_queued(10), DispatchClass::Operational))] - pub fn cancel_queued(origin: OriginFor, which: ReferendumIndex) -> DispatchResult { - ensure_root(origin)?; - T::Scheduler::cancel_named((DEMOCRACY_ID, which).encode()) - .map_err(|_| Error::::ProposalMissing)?; - Ok(()) - } - /// Delegate the voting power (with some given conviction) of the sending account. /// /// The balance delegated is locked for as long as it's delegated, and thereafter for the @@ -991,135 +896,6 @@ pub mod pallet { Ok(()) } - /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be - /// in the dispatch queue but does require a deposit, returned once enacted. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[pallet::weight(T::WeightInfo::note_preimage(encoded_proposal.len() as u32))] - pub fn note_preimage(origin: OriginFor, encoded_proposal: Vec) -> DispatchResult { - Self::note_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; - Ok(()) - } - - /// Same as `note_preimage` but origin is `OperationalPreimageOrigin`. - #[pallet::weight(( - T::WeightInfo::note_preimage(encoded_proposal.len() as u32), - DispatchClass::Operational, - ))] - pub fn note_preimage_operational( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResult { - let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; - Self::note_preimage_inner(who, encoded_proposal)?; - Ok(()) - } - - /// Register the preimage for an upcoming proposal. This requires the proposal to be - /// in the dispatch queue. No deposit is needed. When this call is successful, i.e. - /// the preimage has not been uploaded before and matches some imminent proposal, - /// no fee is paid. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// Weight: `O(E)` with E size of `encoded_proposal` (protected by a required deposit). - #[pallet::weight(T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32))] - pub fn note_imminent_preimage( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResultWithPostInfo { - Self::note_imminent_preimage_inner(ensure_signed(origin)?, encoded_proposal)?; - // We check that this preimage was not uploaded before in - // `note_imminent_preimage_inner`, thus this call can only be successful once. If - // successful, user does not pay a fee. - Ok(Pays::No.into()) - } - - /// Same as `note_imminent_preimage` but origin is `OperationalPreimageOrigin`. - #[pallet::weight(( - T::WeightInfo::note_imminent_preimage(encoded_proposal.len() as u32), - DispatchClass::Operational, - ))] - pub fn note_imminent_preimage_operational( - origin: OriginFor, - encoded_proposal: Vec, - ) -> DispatchResultWithPostInfo { - let who = T::OperationalPreimageOrigin::ensure_origin(origin)?; - Self::note_imminent_preimage_inner(who, encoded_proposal)?; - // We check that this preimage was not uploaded before in - // `note_imminent_preimage_inner`, thus this call can only be successful once. If - // successful, user does not pay a fee. - Ok(Pays::No.into()) - } - - /// Remove an expired proposal preimage and collect the deposit. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `proposal_hash`: The preimage hash of a proposal. - /// - `proposal_length_upper_bound`: an upper bound on length of the proposal. Extrinsic is - /// weighted according to this value with no refund. - /// - /// This will only work after `VotingPeriod` blocks from the time that the preimage was - /// noted, if it's the same account doing it. If it's a different account, then it'll only - /// work an additional `EnactmentPeriod` later. - /// - /// Emits `PreimageReaped`. - /// - /// Weight: `O(D)` where D is length of proposal. - #[pallet::weight(T::WeightInfo::reap_preimage(*proposal_len_upper_bound))] - pub fn reap_preimage( - origin: OriginFor, - proposal_hash: T::Hash, - #[pallet::compact] proposal_len_upper_bound: u32, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - ensure!( - Self::pre_image_data_len(proposal_hash)? <= proposal_len_upper_bound, - Error::::WrongUpperBound, - ); - - let (provider, deposit, since, expiry) = >::get(&proposal_hash) - .and_then(|m| match m { - PreimageStatus::Available { provider, deposit, since, expiry, .. } => - Some((provider, deposit, since, expiry)), - _ => None, - }) - .ok_or(Error::::PreimageMissing)?; - - let now = >::block_number(); - let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); - let additional = if who == provider { Zero::zero() } else { enactment }; - ensure!( - now >= since.saturating_add(voting).saturating_add(additional), - Error::::TooEarly - ); - ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - - let res = - T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); - debug_assert!(res.is_ok()); - >::remove(&proposal_hash); - Self::deposit_event(Event::::PreimageReaped { - proposal_hash, - provider, - deposit, - reaper: who, - }); - Ok(()) - } - /// Unlock tokens that have an expired lock. /// /// The dispatch origin of this call must be _Signed_. @@ -1127,10 +903,7 @@ pub mod pallet { /// - `target`: The account to remove the lock on. /// /// Weight: `O(R)` with R number of vote of target. - #[pallet::weight( - T::WeightInfo::unlock_set(T::MaxVotes::get()) - .max(T::WeightInfo::unlock_remove(T::MaxVotes::get())) - )] + #[pallet::weight(T::WeightInfo::unlock_set(T::MaxVotes::get()).max(T::WeightInfo::unlock_remove(T::MaxVotes::get())))] pub fn unlock(origin: OriginFor, target: AccountIdLookupOf) -> DispatchResult { ensure_signed(origin)?; let target = T::Lookup::lookup(target)?; @@ -1199,17 +972,6 @@ pub mod pallet { Ok(()) } - /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[pallet::weight(T::BlockWeights::get().max_block)] - pub fn enact_proposal( - origin: OriginFor, - proposal_hash: T::Hash, - index: ReferendumIndex, - ) -> DispatchResult { - ensure_root(origin)?; - Self::do_enact_proposal(proposal_hash, index) - } - /// Permanently place a proposal into the blacklist. This prevents it from ever being /// proposed again. /// @@ -1225,21 +987,21 @@ pub mod pallet { /// /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). - #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] + #[pallet::weight((T::WeightInfo::blacklist(), DispatchClass::Operational))] pub fn blacklist( origin: OriginFor, - proposal_hash: T::Hash, + proposal_hash: H256, maybe_ref_index: Option, ) -> DispatchResult { T::BlacklistOrigin::ensure_origin(origin)?; // Insert the proposal into the blacklist. - let permanent = (T::BlockNumber::max_value(), Vec::::new()); + let permanent = (T::BlockNumber::max_value(), BoundedVec::::default()); Blacklist::::insert(&proposal_hash, permanent); // Remove the queued proposal, if it's there. PublicProps::::mutate(|props| { - if let Some(index) = props.iter().position(|p| p.1 == proposal_hash) { + if let Some(index) = props.iter().position(|p| p.1.hash() == proposal_hash) { let (prop_index, ..) = props.remove(index); if let Some((whos, amount)) = DepositOf::::take(prop_index) { for who in whos.into_iter() { @@ -1250,14 +1012,14 @@ pub mod pallet { }); // Remove the external queued referendum, if it's there. - if matches!(NextExternal::::get(), Some((h, ..)) if h == proposal_hash) { + if matches!(NextExternal::::get(), Some((p, ..)) if p.hash() == proposal_hash) { NextExternal::::kill(); } // Remove the referendum, if it's there. if let Some(ref_index) = maybe_ref_index { if let Ok(status) = Self::referendum_status(ref_index) { - if status.proposal_hash == proposal_hash { + if status.proposal.hash() == proposal_hash { Self::internal_cancel_referendum(ref_index); } } @@ -1274,7 +1036,7 @@ pub mod pallet { /// - `prop_index`: The index of the proposal to cancel. /// /// Weight: `O(p)` where `p = PublicProps::::decode_len()` - #[pallet::weight(T::WeightInfo::cancel_proposal(T::MaxProposals::get()))] + #[pallet::weight(T::WeightInfo::cancel_proposal())] pub fn cancel_proposal( origin: OriginFor, #[pallet::compact] prop_index: PropIndex, @@ -1294,6 +1056,25 @@ pub mod pallet { } } +pub trait EncodeInto: Encode { + fn encode_into + Default>(&self) -> T { + let mut t = T::default(); + self.using_encoded(|data| { + if data.len() <= t.as_mut().len() { + t.as_mut()[0..data.len()].copy_from_slice(data); + } else { + // encoded self is too big to fit into a T. hash it and use the first bytes of that + // instead. + let hash = sp_io::hashing::blake2_256(data); + let l = t.as_mut().len().min(hash.len()); + t.as_mut()[0..l].copy_from_slice(&hash[0..l]); + } + }); + t + } +} +impl EncodeInto for T {} + impl Pallet { // exposed immutables. @@ -1306,7 +1087,7 @@ impl Pallet { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( n: T::BlockNumber, - ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); Self::maturing_referenda_at_inner(n, next..last) @@ -1315,7 +1096,7 @@ impl Pallet { fn maturing_referenda_at_inner( n: T::BlockNumber, range: core::ops::Range, - ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus, BalanceOf>)> { range .into_iter() .map(|i| (i, Self::referendum_info(i))) @@ -1331,13 +1112,13 @@ impl Pallet { /// Start a referendum. pub fn internal_start_referendum( - proposal_hash: T::Hash, + proposal: BoundedCallOf, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { >::inject_referendum( >::block_number().saturating_add(T::VotingPeriod::get()), - proposal_hash, + proposal, threshold, delay, ) @@ -1353,8 +1134,8 @@ impl Pallet { /// Ok if the given referendum is active, Err otherwise fn ensure_ongoing( - r: ReferendumInfo>, - ) -> Result>, DispatchError> { + r: ReferendumInfo, BalanceOf>, + ) -> Result, BalanceOf>, DispatchError> { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), @@ -1363,7 +1144,7 @@ impl Pallet { fn referendum_status( ref_index: ReferendumIndex, - ) -> Result>, DispatchError> { + ) -> Result, BalanceOf>, DispatchError> { let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } @@ -1388,11 +1169,9 @@ impl Pallet { votes[i].1 = vote; }, Err(i) => { - ensure!( - votes.len() as u32 <= T::MaxVotes::get(), - Error::::MaxVotesReached - ); - votes.insert(i, (ref_index, vote)); + votes + .try_insert(i, (ref_index, vote)) + .map_err(|_| Error::::MaxVotesReached)?; }, } Self::deposit_event(Event::::Voted { voter: who.clone(), ref_index, vote }); @@ -1606,14 +1385,14 @@ impl Pallet { /// Start a referendum fn inject_referendum( end: T::BlockNumber, - proposal_hash: T::Hash, + proposal: BoundedCallOf, threshold: VoteThreshold, delay: T::BlockNumber, ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::::put(ref_index + 1); let status = - ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; + ReferendumStatus { end, proposal, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); Self::deposit_event(Event::::Started { ref_index, threshold }); @@ -1659,14 +1438,10 @@ impl Pallet { if let Some((depositors, deposit)) = >::take(prop_index) { // refund depositors - for d in &depositors { + for d in depositors.iter() { T::Currency::unreserve(d, deposit); } - Self::deposit_event(Event::::Tabled { - proposal_index: prop_index, - deposit, - depositors, - }); + Self::deposit_event(Event::::Tabled { proposal_index: prop_index, deposit }); Self::inject_referendum( now.saturating_add(T::VotingPeriod::get()), proposal, @@ -1680,71 +1455,35 @@ impl Pallet { } } - fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { - let preimage = >::take(&proposal_hash); - if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { - if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { - let err_amount = T::Currency::unreserve(&provider, deposit); - debug_assert!(err_amount.is_zero()); - Self::deposit_event(Event::::PreimageUsed { proposal_hash, provider, deposit }); - - let res = proposal - .dispatch(frame_system::RawOrigin::Root.into()) - .map(|_| ()) - .map_err(|e| e.error); - Self::deposit_event(Event::::Executed { ref_index: index, result: res }); - - Ok(()) - } else { - T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); - Self::deposit_event(Event::::PreimageInvalid { - proposal_hash, - ref_index: index, - }); - Err(Error::::PreimageInvalid.into()) - } - } else { - Self::deposit_event(Event::::PreimageMissing { proposal_hash, ref_index: index }); - Err(Error::::PreimageMissing.into()) - } - } - fn bake_referendum( now: T::BlockNumber, index: ReferendumIndex, - status: ReferendumStatus>, + status: ReferendumStatus, BalanceOf>, ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); if approved { Self::deposit_event(Event::::Passed { ref_index: index }); - if status.delay.is_zero() { - let _ = Self::do_enact_proposal(status.proposal_hash, index); - } else { - let when = now.saturating_add(status.delay); - // Note that we need the preimage now. - Preimages::::mutate_exists( - &status.proposal_hash, - |maybe_pre| match *maybe_pre { - Some(PreimageStatus::Available { ref mut expiry, .. }) => - *expiry = Some(when), - ref mut a => *a = Some(PreimageStatus::Missing(when)), - }, - ); - - if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index).encode(), - DispatchTime::At(when), - None, - 63, - frame_system::RawOrigin::Root.into(), - Call::enact_proposal { proposal_hash: status.proposal_hash, index }.into(), - ) - .is_err() - { - frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); - } + // Actually `hold` the proposal now since we didn't hold it when it came in via the + // submit extrinsic and we now know that it will be needed. This will be reversed by + // Scheduler pallet once it is executed which assumes that we will already have placed + // a `hold` on it. + T::Preimages::hold(&status.proposal); + + // Earliest it can be scheduled for is next block. + let when = now.saturating_add(status.delay.max(One::one())); + if T::Scheduler::schedule_named( + (DEMOCRACY_ID, index).encode_into(), + DispatchTime::At(when), + None, + 63, + frame_system::RawOrigin::Root.into(), + status.proposal, + ) + .is_err() + { + frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); } } else { Self::deposit_event(Event::::NotPassed { ref_index: index }); @@ -1780,11 +1519,10 @@ impl Pallet { if Self::launch_next(now).is_ok() { weight = max_block_weight; } else { - weight = - weight.saturating_add(T::WeightInfo::on_initialize_base_with_launch_period(r)); + weight.saturating_accrue(T::WeightInfo::on_initialize_base_with_launch_period(r)); } } else { - weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); + weight.saturating_accrue(T::WeightInfo::on_initialize_base(r)); } // tally up votes for any expiring referenda. @@ -1795,8 +1533,8 @@ impl Pallet { } // Notes: - // * We don't consider the lowest unbaked to be the last maturing in case some refendum have - // longer voting period than others. + // * We don't consider the lowest unbaked to be the last maturing in case some referenda + // have a longer voting period than others. // * The iteration here shouldn't trigger any storage read that are not in cache, due to // `maturing_referenda_at_inner` having already read them. // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number @@ -1822,116 +1560,6 @@ impl Pallet { // `Compact`. decode_compact_u32_at(&>::hashed_key_for(proposal)) } - - /// Check that pre image exists and its value is variant `PreimageStatus::Missing`. - /// - /// This check is done without getting the complete value in the runtime to avoid copying a big - /// value in the runtime. - fn check_pre_image_is_missing(proposal_hash: T::Hash) -> DispatchResult { - // To decode the enum variant we only need the first byte. - let mut buf = [0u8; 1]; - let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::NotImminent)?; - // The value may be smaller that 1 byte. - let mut input = &buf[0..buf.len().min(bytes as usize)]; - - match input.read_byte() { - Ok(0) => Ok(()), // PreimageStatus::Missing is variant 0 - Ok(1) => Err(Error::::DuplicatePreimage.into()), - _ => { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - Err(Error::::NotImminent.into()) - }, - } - } - - /// Check that pre image exists, its value is variant `PreimageStatus::Available` and decode - /// the length of `data: Vec` fields. - /// - /// This check is done without getting the complete value in the runtime to avoid copying a big - /// value in the runtime. - /// - /// If the pre image is missing variant or doesn't exist then the error `PreimageMissing` is - /// returned. - fn pre_image_data_len(proposal_hash: T::Hash) -> Result { - // To decode the `data` field of Available variant we need: - // * one byte for the variant - // * at most 5 bytes to decode a `Compact` - let mut buf = [0u8; 6]; - let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or(Error::::PreimageMissing)?; - // The value may be smaller that 6 bytes. - let mut input = &buf[0..buf.len().min(bytes as usize)]; - - match input.read_byte() { - Ok(1) => (), // Check that input exists and is second variant. - Ok(0) => return Err(Error::::PreimageMissing.into()), - _ => { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - return Err(Error::::PreimageMissing.into()) - }, - } - - // Decode the length of the vector. - let len = codec::Compact::::decode(&mut input) - .map_err(|_| { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - DispatchError::from(Error::::PreimageMissing) - })? - .0; - - Ok(len) - } - - // See `note_preimage` - fn note_preimage_inner(who: T::AccountId, encoded_proposal: Vec) -> DispatchResult { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); - - let deposit = >::from(encoded_proposal.len() as u32) - .saturating_mul(T::PreimageByteDeposit::get()); - T::Currency::reserve(&who, deposit)?; - - let now = >::block_number(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit, - since: now, - expiry: None, - }; - >::insert(proposal_hash, a); - - Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit }); - - Ok(()) - } - - // See `note_imminent_preimage` - fn note_imminent_preimage_inner( - who: T::AccountId, - encoded_proposal: Vec, - ) -> DispatchResult { - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - Self::check_pre_image_is_missing(proposal_hash)?; - let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; - let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; - - let now = >::block_number(); - let free = >::zero(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit: Zero::zero(), - since: now, - expiry: Some(expiry), - }; - >::insert(proposal_hash, a); - - Self::deposit_event(Event::::PreimageNoted { proposal_hash, who, deposit: free }); - - Ok(()) - } } /// Decode `Compact` from the trie at given key. diff --git a/frame/democracy/src/migrations.rs b/frame/democracy/src/migrations.rs new file mode 100644 index 0000000000000..3ec249c1d981c --- /dev/null +++ b/frame/democracy/src/migrations.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the preimage pallet. + +use super::*; +use frame_support::{pallet_prelude::*, storage_alias, traits::OnRuntimeUpgrade, BoundedVec}; +use sp_core::H256; + +/// The log target. +const TARGET: &'static str = "runtime::democracy::migration::v1"; + +/// The original data layout of the democracy pallet without a specific version number. +mod v0 { + use super::*; + + #[storage_alias] + pub type PublicProps = StorageValue< + Pallet, + Vec<(PropIndex, ::Hash, ::AccountId)>, + ValueQuery, + >; + + #[storage_alias] + pub type NextExternal = + StorageValue, (::Hash, VoteThreshold)>; + + #[cfg(feature = "try-runtime")] + #[storage_alias] + pub type ReferendumInfoOf = StorageMap< + Pallet, + frame_support::Twox64Concat, + ReferendumIndex, + ReferendumInfo< + ::BlockNumber, + ::Hash, + BalanceOf, + >, + >; +} + +pub mod v1 { + use super::*; + + /// Migration for translating bare `Hash`es into `Bounded`s. + pub struct Migration(sp_std::marker::PhantomData); + + impl> OnRuntimeUpgrade for Migration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + + let props_count = v0::PublicProps::::get().len(); + log::info!(target: TARGET, "{} public proposals will be migrated.", props_count,); + ensure!(props_count <= T::MaxProposals::get() as usize, "too many proposals"); + + let referenda_count = v0::ReferendumInfoOf::::iter().count(); + log::info!(target: TARGET, "{} referenda will be migrated.", referenda_count); + + Ok((props_count as u32, referenda_count as u32).encode()) + } + + #[allow(deprecated)] + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + if StorageVersion::get::>() != 0 { + log::warn!( + target: TARGET, + "skipping on_runtime_upgrade: executed on wrong storage version.\ + Expected version 0" + ); + return weight + } + + ReferendumInfoOf::::translate( + |index, old: ReferendumInfo>| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + log::info!(target: TARGET, "migrating referendum #{:?}", &index); + Some(match old { + ReferendumInfo::Ongoing(status) => + ReferendumInfo::Ongoing(ReferendumStatus { + end: status.end, + proposal: Bounded::from_legacy_hash(status.proposal), + threshold: status.threshold, + delay: status.delay, + tally: status.tally, + }), + ReferendumInfo::Finished { approved, end } => + ReferendumInfo::Finished { approved, end }, + }) + }, + ); + + let props = v0::PublicProps::::take() + .into_iter() + .map(|(i, hash, a)| (i, Bounded::from_legacy_hash(hash), a)) + .collect::>(); + let bounded = BoundedVec::<_, T::MaxProposals>::truncate_from(props.clone()); + PublicProps::::put(bounded); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + + if props.len() as u32 > T::MaxProposals::get() { + log::error!( + target: TARGET, + "truncated {} public proposals to {}; continuing", + props.len(), + T::MaxProposals::get() + ); + } + + if let Some((hash, threshold)) = v0::NextExternal::::take() { + log::info!(target: TARGET, "migrating next external proposal"); + NextExternal::::put((Bounded::from_legacy_hash(hash), threshold)); + } + + StorageVersion::new(1).put::>(); + + weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + + let (old_props_count, old_ref_count): (u32, u32) = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_props_count = crate::PublicProps::::get().len() as u32; + assert_eq!(new_props_count, old_props_count, "must migrate all public proposals"); + let new_ref_count = crate::ReferendumInfoOf::::iter().count() as u32; + assert_eq!(new_ref_count, old_ref_count, "must migrate all referenda"); + + log::info!( + target: TARGET, + "{} public proposals migrated, {} referenda migrated", + new_props_count, + new_ref_count, + ); + Ok(()) + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::{ + tests::{Test as T, *}, + types::*, + }; + use frame_support::bounded_vec; + + #[allow(deprecated)] + #[test] + fn migration_works() { + new_test_ext().execute_with(|| { + assert_eq!(StorageVersion::get::>(), 0); + // Insert some values into the v0 storage: + + // Case 1: Ongoing referendum + let hash = H256::repeat_byte(1); + let status = ReferendumStatus { + end: 1u32.into(), + proposal: hash.clone(), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 1u32.into(), + tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, + }; + v0::ReferendumInfoOf::::insert(1u32, ReferendumInfo::Ongoing(status)); + + // Case 2: Finished referendum + v0::ReferendumInfoOf::::insert( + 2u32, + ReferendumInfo::Finished { approved: true, end: 123u32.into() }, + ); + + // Case 3: Public proposals + let hash2 = H256::repeat_byte(2); + v0::PublicProps::::put(vec![ + (3u32, hash.clone(), 123u64), + (4u32, hash2.clone(), 123u64), + ]); + + // Case 4: Next external + v0::NextExternal::::put((hash.clone(), VoteThreshold::SuperMajorityApprove)); + + // Migrate. + let state = v1::Migration::::pre_upgrade().unwrap(); + let _weight = v1::Migration::::on_runtime_upgrade(); + v1::Migration::::post_upgrade(state).unwrap(); + // Check that all values got migrated. + + // Case 1: Ongoing referendum + assert_eq!( + ReferendumInfoOf::::get(1u32), + Some(ReferendumInfo::Ongoing(ReferendumStatus { + end: 1u32.into(), + proposal: Bounded::from_legacy_hash(hash), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 1u32.into(), + tally: Tally { ayes: 1u32.into(), nays: 1u32.into(), turnout: 1u32.into() }, + })) + ); + // Case 2: Finished referendum + assert_eq!( + ReferendumInfoOf::::get(2u32), + Some(ReferendumInfo::Finished { approved: true, end: 123u32.into() }) + ); + // Case 3: Public proposals + let props: BoundedVec<_, ::MaxProposals> = bounded_vec![ + (3u32, Bounded::from_legacy_hash(hash), 123u64), + (4u32, Bounded::from_legacy_hash(hash2), 123u64) + ]; + assert_eq!(PublicProps::::get(), props); + // Case 4: Next external + assert_eq!( + NextExternal::::get(), + Some((Bounded::from_legacy_hash(hash), VoteThreshold::SuperMajorityApprove)) + ); + }); + } +} diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 03d7216fd5aaa..eceb1a3400bba 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -19,11 +19,11 @@ use super::*; use crate as pallet_democracy; -use codec::Encode; use frame_support::{ assert_noop, assert_ok, ord_parameter_types, parameter_types, traits::{ - ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, SortedMembers, + ConstU32, ConstU64, Contains, EqualPrivilegeOnly, GenesisBuild, OnInitialize, + SortedMembers, StorePreimage, }, weights::Weight, }; @@ -35,14 +35,12 @@ use sp_runtime::{ traits::{BadOrigin, BlakeTwo256, IdentityLookup}, Perbill, }; - mod cancellation; mod decoders; mod delegation; mod external_proposing; mod fast_tracking; mod lock_voting; -mod preimage; mod public_proposals; mod scheduling; mod voting; @@ -63,6 +61,7 @@ frame_support::construct_runtime!( { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Preimage: pallet_preimage, Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event}, Democracy: pallet_democracy::{Pallet, Call, Storage, Config, Event}, } @@ -78,13 +77,11 @@ impl Contains for BaseFilter { parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max( - Weight::from_ref_time(1_000_000).set_proof_size(u64::MAX), - ); + frame_system::limits::BlockWeights::simple_max(frame_support::weights::constants::WEIGHT_PER_SECOND.set_proof_size(u64::MAX)); } impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = (); type RuntimeOrigin = RuntimeOrigin; @@ -111,6 +108,16 @@ impl frame_system::Config for Test { parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; } + +impl pallet_preimage::Config for Test { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = (); + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type BaseDeposit = ConstU64<0>; + type ByteDeposit = ConstU64<0>; +} + impl pallet_scheduler::Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; @@ -118,11 +125,10 @@ impl pallet_scheduler::Config for Test { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = (); + type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = (); - type NoPreimagePostponement = (); + type Preimages = (); } impl pallet_balances::Config for Test { @@ -158,7 +164,6 @@ impl SortedMembers for OneToFive { } impl Config for Test { - type Proposal = RuntimeCall; type RuntimeEvent = RuntimeEvent; type Currency = pallet_balances::Pallet; type EnactmentPeriod = ConstU64<2>; @@ -167,6 +172,8 @@ impl Config for Test { type VoteLockingPeriod = ConstU64<3>; type FastTrackVotingPeriod = ConstU64<2>; type MinimumDeposit = ConstU64<1>; + type MaxDeposits = ConstU32<1000>; + type MaxBlacklisted = ConstU32<5>; type ExternalOrigin = EnsureSignedBy; type ExternalMajorityOrigin = EnsureSignedBy; type ExternalDefaultOrigin = EnsureSignedBy; @@ -176,16 +183,15 @@ impl Config for Test { type CancelProposalOrigin = EnsureRoot; type VetoOrigin = EnsureSignedBy; type CooloffPeriod = ConstU64<2>; - type PreimageByteDeposit = PreimageByteDeposit; type Slash = (); type InstantOrigin = EnsureSignedBy; type InstantAllowed = InstantAllowed; type Scheduler = Scheduler; type MaxVotes = ConstU32<100>; - type OperationalPreimageOrigin = EnsureSignedBy; type PalletsOrigin = OriginCaller; type WeightInfo = (); type MaxProposals = ConstU32<100>; + type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -203,12 +209,6 @@ pub fn new_test_ext() -> sp_io::TestExternalities { ext } -/// Execute the function two times, with `true` and with `false`. -pub fn new_test_ext_execute_with_cond(execute: impl FnOnce(bool) -> () + Clone) { - new_test_ext().execute_with(|| (execute.clone())(false)); - new_test_ext().execute_with(|| execute(true)); -} - #[test] fn params_should_work() { new_test_ext().execute_with(|| { @@ -218,44 +218,22 @@ fn params_should_work() { }); } -fn set_balance_proposal(value: u64) -> Vec { - RuntimeCall::Balances(pallet_balances::Call::set_balance { - who: 42, - new_free: value, - new_reserved: 0, - }) - .encode() +fn set_balance_proposal(value: u64) -> BoundedCallOf { + let inner = pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0 }; + let outer = RuntimeCall::Balances(inner); + Preimage::bound(outer).unwrap() } #[test] fn set_balance_proposal_is_correctly_filtered_out() { for i in 0..10 { - let call = RuntimeCall::decode(&mut &set_balance_proposal(i)[..]).unwrap(); + let call = Preimage::realize(&set_balance_proposal(i)).unwrap().0; assert!(!::BaseCallFilter::contains(&call)); } } -fn set_balance_proposal_hash(value: u64) -> H256 { - BlakeTwo256::hash(&set_balance_proposal(value)[..]) -} - -fn set_balance_proposal_hash_and_note(value: u64) -> H256 { - let p = set_balance_proposal(value); - let h = BlakeTwo256::hash(&p[..]); - match Democracy::note_preimage(RuntimeOrigin::signed(6), p) { - Ok(_) => (), - Err(x) if x == Error::::DuplicatePreimage.into() => (), - Err(x) => panic!("{:?}", x), - } - h -} - fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash(value), delay) -} - -fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal_hash_and_note(value), delay) + Democracy::propose(RuntimeOrigin::signed(who), set_balance_proposal(value), delay) } fn next_block() { @@ -272,7 +250,7 @@ fn fast_forward_to(n: u64) { fn begin_referendum() -> ReferendumIndex { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); 0 } diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index b98e51aa3d4d1..ff046d612c026 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -24,7 +24,7 @@ fn cancel_referendum_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -42,37 +42,13 @@ fn cancel_referendum_should_work() { }); } -#[test] -fn cancel_queued_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - // start of 2 => next referendum scheduled. - fast_forward_to(2); - - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); - - fast_forward_to(4); - - assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); - - assert_noop!( - Democracy::cancel_queued(RuntimeOrigin::root(), 1), - Error::::ProposalMissing - ); - assert_ok!(Democracy::cancel_queued(RuntimeOrigin::root(), 0)); - assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); - }); -} - #[test] fn emergency_cancel_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 2, ); @@ -86,7 +62,7 @@ fn emergency_cancel_should_work() { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 2, ); diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 1fbb88060549b..1c8b9c3d980f9 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -18,7 +18,10 @@ //! The for various partial storage decoders use super::*; -use frame_support::storage::{migration, unhashed}; +use frame_support::{ + storage::{migration, unhashed}, + BoundedVec, +}; #[test] fn test_decode_compact_u32_at() { @@ -42,7 +45,8 @@ fn test_decode_compact_u32_at() { fn len_of_deposit_of() { new_test_ext().execute_with(|| { for l in vec![0, 1, 200, 1000] { - let value: (Vec, u64) = ((0..l).map(|_| Default::default()).collect(), 3u64); + let value: (BoundedVec, u64) = + ((0..l).map(|_| Default::default()).collect::>().try_into().unwrap(), 3u64); DepositOf::::insert(2, value); assert_eq!(Democracy::len_of_deposit_of(2), Some(l)); } @@ -51,35 +55,3 @@ fn len_of_deposit_of() { assert_eq!(Democracy::len_of_deposit_of(2), None); }) } - -#[test] -fn pre_image() { - new_test_ext().execute_with(|| { - let key = Default::default(); - let missing = PreimageStatus::Missing(0); - Preimages::::insert(key, missing); - assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); - assert_eq!(Democracy::check_pre_image_is_missing(key), Ok(())); - - Preimages::::remove(key); - assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); - assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); - - for l in vec![0, 10, 100, 1000u32] { - let available = PreimageStatus::Available { - data: (0..l).map(|i| i as u8).collect(), - provider: 0, - deposit: 0, - since: 0, - expiry: None, - }; - - Preimages::::insert(key, available); - assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); - assert_noop!( - Democracy::check_pre_image_is_missing(key), - Error::::DuplicatePreimage - ); - } - }) -} diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 4c5ee79286055..bca7cb9524112 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -24,7 +24,7 @@ fn single_proposal_should_work_with_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -75,7 +75,7 @@ fn cyclic_delegation_should_unwind() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -100,7 +100,7 @@ fn single_proposal_should_work_with_vote_and_delegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); fast_forward_to(2); @@ -122,7 +122,7 @@ fn single_proposal_should_work_with_undelegation() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); // Delegate and undelegate vote. assert_ok!(Democracy::delegate(RuntimeOrigin::signed(2), 1, Conviction::None, 20)); diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index fda555b9c3459..4cfdd2aa74a3d 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -23,35 +23,29 @@ use super::*; fn veto_external_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert!(>::exists()); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), Error::::ProposalBlacklisted ); fast_forward_to(1); // fails as we're still in cooloff period. assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),), Error::::ProposalBlacklisted ); fast_forward_to(2); // works; as we're out of the cooloff period. - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert!(>::exists()); // 3 can't veto the same thing twice. @@ -68,14 +62,11 @@ fn veto_external_works() { fast_forward_to(3); // same proposal fails as we're still in cooloff assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), Error::::ProposalBlacklisted ); // different proposal works fine. - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); }); } @@ -84,22 +75,16 @@ fn external_blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); - let hash = set_balance_proposal_hash(2); + let hash = set_balance_proposal(2).hash(); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); fast_forward_to(2); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - ), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2)), Error::::ProposalBlacklisted, ); }); @@ -110,15 +95,12 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal_hash(2),), + Democracy::external_propose(RuntimeOrigin::signed(1), set_balance_proposal(2),), BadOrigin, ); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2),)); assert_noop!( - Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal_hash(1),), + Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),), Error::::DuplicateProposal ); fast_forward_to(2); @@ -126,7 +108,7 @@ fn external_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -140,22 +122,19 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority( - RuntimeOrigin::signed(1), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_majority(RuntimeOrigin::signed(1), set_balance_proposal(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -169,22 +148,19 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default( - RuntimeOrigin::signed(3), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_default(RuntimeOrigin::signed(3), set_balance_proposal(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_default( RuntimeOrigin::signed(1), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityAgainst, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -197,11 +173,8 @@ fn external_default_referendum_works() { fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(1), - )); - assert_ok!(propose_set_balance_and_note(6, 2, 2)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(1),)); + assert_ok!(propose_set_balance(6, 2, 2)); fast_forward_to(2); @@ -210,17 +183,14 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash_and_note(1), + proposal: set_balance_proposal(1), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(3),)); fast_forward_to(4); @@ -229,7 +199,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(1), Ok(ReferendumStatus { end: 6, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -244,17 +214,14 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(2), Ok(ReferendumStatus { end: 8, - proposal_hash: set_balance_proposal_hash_and_note(3), + proposal: set_balance_proposal(3), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish external - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(5), - )); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(5),)); fast_forward_to(8); @@ -263,18 +230,15 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(3), Ok(ReferendumStatus { end: 10, - proposal_hash: set_balance_proposal_hash_and_note(5), + proposal: set_balance_proposal(5), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish both - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(7), - )); - assert_ok!(propose_set_balance_and_note(6, 4, 2)); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(7),)); + assert_ok!(propose_set_balance(6, 4, 2)); fast_forward_to(10); @@ -283,16 +247,16 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(4), Ok(ReferendumStatus { end: 12, - proposal_hash: set_balance_proposal_hash_and_note(4), + proposal: set_balance_proposal(4), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, }) ); // replenish public again - assert_ok!(propose_set_balance_and_note(6, 6, 2)); + assert_ok!(propose_set_balance(6, 6, 2)); // cancel external - let h = set_balance_proposal_hash_and_note(7); + let h = set_balance_proposal(7).hash(); assert_ok!(Democracy::veto_external(RuntimeOrigin::signed(3), h)); fast_forward_to(12); @@ -302,7 +266,7 @@ fn external_and_public_interleaving_works() { Democracy::referendum_status(5), Ok(ReferendumStatus { end: 14, - proposal_hash: set_balance_proposal_hash_and_note(6), + proposal: set_balance_proposal(6), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 8fef985c8561c..97bb7a63908ab 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -23,14 +23,14 @@ use super::*; fn fast_track_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_ok!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 2, 0)); @@ -38,7 +38,7 @@ fn fast_track_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 2, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -51,14 +51,14 @@ fn fast_track_referendum_works() { fn instant_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); + let h = set_balance_proposal(2).hash(); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::ProposalMissing ); assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(3), - set_balance_proposal_hash_and_note(2) + set_balance_proposal(2) )); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(1), h, 3, 2), BadOrigin); assert_noop!(Democracy::fast_track(RuntimeOrigin::signed(5), h, 1, 0), BadOrigin); @@ -76,7 +76,7 @@ fn instant_referendum_works() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 1, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SimpleMajority, delay: 0, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -93,7 +93,7 @@ fn instant_next_block_referendum_backed() { let majority_origin_id = 3; let instant_origin_id = 6; let voting_period = 1; - let proposal_hash = set_balance_proposal_hash_and_note(2); + let proposal = set_balance_proposal(2); let delay = 2; // has no effect on test // init @@ -103,13 +103,13 @@ fn instant_next_block_referendum_backed() { // propose with majority origin assert_ok!(Democracy::external_propose_majority( RuntimeOrigin::signed(majority_origin_id), - proposal_hash + proposal.clone() )); // fast track with instant origin and voting period pointing to the next block assert_ok!(Democracy::fast_track( RuntimeOrigin::signed(instant_origin_id), - proposal_hash, + proposal.hash(), voting_period, delay )); @@ -119,7 +119,7 @@ fn instant_next_block_referendum_backed() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: start_block_number + voting_period, - proposal_hash, + proposal, threshold: VoteThreshold::SimpleMajority, delay, tally: Tally { ayes: 0, nays: 0, turnout: 0 }, @@ -143,11 +143,8 @@ fn instant_next_block_referendum_backed() { fn fast_track_referendum_fails_when_no_simple_majority() { new_test_ext().execute_with(|| { System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::external_propose( - RuntimeOrigin::signed(2), - set_balance_proposal_hash_and_note(2) - )); + let h = set_balance_proposal(2).hash(); + assert_ok!(Democracy::external_propose(RuntimeOrigin::signed(2), set_balance_proposal(2))); assert_noop!( Democracy::fast_track(RuntimeOrigin::signed(5), h, 3, 2), Error::::NotSimpleMajority diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index de1137f03fd38..540198ecf33a1 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -43,7 +43,7 @@ fn lock_voting_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -59,7 +59,7 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); } - fast_forward_to(2); + fast_forward_to(3); // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. assert_ok!(Democracy::remove_vote(RuntimeOrigin::signed(1), r)); @@ -126,13 +126,13 @@ fn no_locks_without_conviction_should_work() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(0, 10))); - fast_forward_to(2); + fast_forward_to(3); assert_eq!(Balances::free_balance(42), 2); assert_ok!(Democracy::remove_other_vote(RuntimeOrigin::signed(2), 1, r)); @@ -146,7 +146,7 @@ fn lock_voting_should_work_with_delegation() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -167,28 +167,16 @@ fn lock_voting_should_work_with_delegation() { fn setup_three_referenda() -> (u32, u32, u32) { System::set_block_number(0); - let r1 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r1 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r1, aye(4, 10))); - let r2 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r2 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r2, aye(3, 20))); - let r3 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0, - ); + let r3 = + Democracy::inject_referendum(2, set_balance_proposal(2), VoteThreshold::SimpleMajority, 0); assert_ok!(Democracy::vote(RuntimeOrigin::signed(5), r3, aye(2, 50))); fast_forward_to(2); @@ -306,7 +294,7 @@ fn locks_should_persist_from_voting_to_delegation() { System::set_block_number(0); let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SimpleMajority, 0, ); diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs deleted file mode 100644 index 39536eab8009b..0000000000000 --- a/frame/democracy/src/tests/preimage.rs +++ /dev/null @@ -1,237 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The preimage tests. - -use super::*; - -#[test] -fn missing_preimage_should_fail() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); -} - -#[test] -fn preimage_deposit_should_be_required_and_returned() { - new_test_ext_execute_with_cond(|operational| { - // fee of 100 is too much. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); - assert_noop!( - if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), vec![0; 500]) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), vec![0; 500]) - }, - BalancesError::::InsufficientBalance, - ); - // fee of 1 is reasonable. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::free_balance(42), 2); - }); -} - -#[test] -fn preimage_deposit_should_be_reapable_earlier_by_owner() { - new_test_ext_execute_with_cond(|operational| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::TooEarly - ); - next_block(); - assert_ok!(Democracy::reap_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal_hash(2), - u32::MAX - )); - - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::reserved_balance(6), 0); - }); -} - -#[test] -fn preimage_deposit_should_be_reapable() { - new_test_ext_execute_with_cond(|operational| { - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::PreimageMissing - ); - - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(if operational { - Democracy::note_preimage_operational(RuntimeOrigin::signed(6), set_balance_proposal(2)) - } else { - Democracy::note_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }); - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - ), - Error::::TooEarly - ); - - next_block(); - assert_ok!(Democracy::reap_preimage( - RuntimeOrigin::signed(5), - set_balance_proposal_hash(2), - u32::MAX - )); - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 48); - assert_eq!(Balances::free_balance(5), 62); - }); -} - -#[test] -fn noting_imminent_preimage_for_free_should_work() { - new_test_ext_execute_with_cond(|operational| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - - assert_noop!( - if operational { - Democracy::note_imminent_preimage_operational( - RuntimeOrigin::signed(6), - set_balance_proposal(2), - ) - } else { - Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)) - }, - Error::::NotImminent - ); - - next_block(); - - // Now we're in the dispatch queue it's all good. - assert_ok!(Democracy::note_imminent_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal(2) - )); - - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); -} - -#[test] -fn reaping_imminent_preimage_should_fail() { - new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash_and_note(2); - let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage(RuntimeOrigin::signed(6), h, u32::MAX), - Error::::Imminent - ); - }); -} - -#[test] -fn note_imminent_preimage_can_only_be_successful_once() { - new_test_ext().execute_with(|| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1, - ); - assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r, aye(1))); - next_block(); - - // First time works - assert_ok!(Democracy::note_imminent_preimage( - RuntimeOrigin::signed(6), - set_balance_proposal(2) - )); - - // Second time fails - assert_noop!( - Democracy::note_imminent_preimage(RuntimeOrigin::signed(6), set_balance_proposal(2)), - Error::::DuplicatePreimage - ); - - // Fails from any user - assert_noop!( - Democracy::note_imminent_preimage(RuntimeOrigin::signed(5), set_balance_proposal(2)), - Error::::DuplicatePreimage - ); - }); -} diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index c52533e46ccc5..f48824dc95c5d 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -22,9 +22,9 @@ use super::*; #[test] fn backing_for_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); assert_eq!(Democracy::backing_for(0), Some(2)); assert_eq!(Democracy::backing_for(1), Some(4)); assert_eq!(Democracy::backing_for(2), Some(3)); @@ -34,11 +34,11 @@ fn backing_for_should_work() { #[test] fn deposit_for_proposals_should_be_taken() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); assert_eq!(Balances::free_balance(5), 35); @@ -48,11 +48,11 @@ fn deposit_for_proposals_should_be_taken() { #[test] fn deposit_for_proposals_should_be_returned() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); - assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0, u32::MAX)); + assert_ok!(propose_set_balance(1, 2, 5)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(2), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); + assert_ok!(Democracy::second(RuntimeOrigin::signed(5), 0)); fast_forward_to(3); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 20); @@ -77,30 +77,19 @@ fn poor_proposer_should_not_work() { #[test] fn poor_seconder_should_not_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(2, 2, 11)); + assert_ok!(propose_set_balance(2, 2, 11)); assert_noop!( - Democracy::second(RuntimeOrigin::signed(1), 0, u32::MAX), + Democracy::second(RuntimeOrigin::signed(1), 0), BalancesError::::InsufficientBalance ); }); } -#[test] -fn invalid_seconds_upper_bound_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_noop!( - Democracy::second(RuntimeOrigin::signed(2), 0, 0), - Error::::WrongUpperBound - ); - }); -} - #[test] fn cancel_proposal_should_work() { new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); assert_noop!(Democracy::cancel_proposal(RuntimeOrigin::signed(1), 0), BadOrigin); assert_ok!(Democracy::cancel_proposal(RuntimeOrigin::root(), 0)); System::assert_last_event(crate::Event::ProposalCanceled { prop_index: 0 }.into()); @@ -113,10 +102,10 @@ fn cancel_proposal_should_work() { fn blacklisting_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - let hash = set_balance_proposal_hash(2); + let hash = set_balance_proposal(2).hash(); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); assert_noop!(Democracy::blacklist(RuntimeOrigin::signed(1), hash, None), BadOrigin); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, None)); @@ -124,11 +113,11 @@ fn blacklisting_should_work() { assert_eq!(Democracy::backing_for(0), None); assert_eq!(Democracy::backing_for(1), Some(4)); - assert_noop!(propose_set_balance_and_note(1, 2, 2), Error::::ProposalBlacklisted); + assert_noop!(propose_set_balance(1, 2, 2), Error::::ProposalBlacklisted); fast_forward_to(2); - let hash = set_balance_proposal_hash(4); + let hash = set_balance_proposal(4).hash(); assert_ok!(Democracy::referendum_status(0)); assert_ok!(Democracy::blacklist(RuntimeOrigin::root(), hash, Some(0))); assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); @@ -139,9 +128,9 @@ fn blacklisting_should_work() { fn runners_up_should_come_after() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_ok!(propose_set_balance(1, 2, 2)); + assert_ok!(propose_set_balance(1, 4, 4)); + assert_ok!(propose_set_balance(1, 3, 3)); fast_forward_to(2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), 0, aye(1))); fast_forward_to(4); diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 4b5fe8dd9c1c3..5e133f38945d6 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -24,7 +24,7 @@ fn simple_passing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -43,7 +43,7 @@ fn simple_failing_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -62,13 +62,13 @@ fn ooo_inject_referendums_should_work() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal_hash_and_note(3), + set_balance_proposal(3), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -77,11 +77,13 @@ fn ooo_inject_referendums_should_work() { assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); next_block(); - assert_eq!(Balances::free_balance(42), 2); assert_ok!(Democracy::vote(RuntimeOrigin::signed(1), r1, aye(1))); assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + next_block(); assert_eq!(Balances::free_balance(42), 3); }); @@ -92,7 +94,7 @@ fn delayed_enactment_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 1, ); @@ -118,19 +120,19 @@ fn lowest_unbaked_should_be_sensible() { new_test_ext().execute_with(|| { let r1 = Democracy::inject_referendum( 3, - set_balance_proposal_hash_and_note(1), + set_balance_proposal(1), VoteThreshold::SuperMajorityApprove, 0, ); let r2 = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); let r3 = Democracy::inject_referendum( 10, - set_balance_proposal_hash_and_note(3), + set_balance_proposal(3), VoteThreshold::SuperMajorityApprove, 0, ); @@ -141,16 +143,19 @@ fn lowest_unbaked_should_be_sensible() { assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - - // r2 is approved - assert_eq!(Balances::free_balance(42), 2); + // r2 ends with approval assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); - - // r1 is approved - assert_eq!(Balances::free_balance(42), 1); + // r1 ends with approval assert_eq!(Democracy::lowest_unbaked(), 3); assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + + // r2 is executed + assert_eq!(Balances::free_balance(42), 2); + + next_block(); + // r1 is executed + assert_eq!(Balances::free_balance(42), 1); }); } diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 59d0cd6bc50ef..482cd430e0e7f 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -63,7 +63,7 @@ fn split_vote_cancellation_should_work() { fn single_proposal_should_work() { new_test_ext().execute_with(|| { System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + assert_ok!(propose_set_balance(1, 2, 1)); let r = 0; assert!(Democracy::referendum_info(r).is_none()); @@ -76,7 +76,7 @@ fn single_proposal_should_work() { Democracy::referendum_status(0), Ok(ReferendumStatus { end: 4, - proposal_hash: set_balance_proposal_hash_and_note(2), + proposal: set_balance_proposal(2), threshold: VoteThreshold::SuperMajorityApprove, delay: 2, tally: Tally { ayes: 1, nays: 0, turnout: 10 }, @@ -106,7 +106,7 @@ fn controversial_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -132,7 +132,7 @@ fn controversial_low_turnout_voting_should_work() { new_test_ext().execute_with(|| { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); @@ -156,7 +156,7 @@ fn passing_low_turnout_voting_should_work() { let r = Democracy::inject_referendum( 2, - set_balance_proposal_hash_and_note(2), + set_balance_proposal(2), VoteThreshold::SuperMajorityApprove, 0, ); diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 52ab8a40eb3e3..4b7f1a0fac45c 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -18,7 +18,7 @@ //! Miscellaneous additional datatypes. use crate::{AccountVote, Conviction, Vote, VoteThreshold}; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{ traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero}, @@ -26,7 +26,7 @@ use sp_runtime::{ }; /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive(Encode, MaxEncodedLen, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub struct Tally { /// The number of aye votes, expressed in terms of post-conviction lock-vote. pub ayes: Balance, @@ -37,7 +37,9 @@ pub struct Tally { } /// Amount of votes and capital placed in delegation for an account. -#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +#[derive( + Encode, MaxEncodedLen, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, +)] pub struct Delegations { /// The number of votes (this is post-conviction). pub votes: Balance, @@ -160,12 +162,12 @@ impl< } /// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct ReferendumStatus { +#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub struct ReferendumStatus { /// When voting on this referendum will end. pub end: BlockNumber, - /// The hash of the proposal being voted on. - pub proposal_hash: Hash, + /// The proposal being voted on. + pub proposal: Proposal, /// The thresholding mechanism to determine whether it passed. pub threshold: VoteThreshold, /// The delay (in blocks) to wait after a successful referendum before deploying. @@ -175,23 +177,23 @@ pub struct ReferendumStatus { } /// Info regarding a referendum, present or past. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub enum ReferendumInfo { +#[derive(Encode, MaxEncodedLen, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] +pub enum ReferendumInfo { /// Referendum is happening, the arg is the block number at which it will end. - Ongoing(ReferendumStatus), + Ongoing(ReferendumStatus), /// Referendum finished at `end`, and has been `approved` or rejected. Finished { approved: bool, end: BlockNumber }, } -impl ReferendumInfo { +impl ReferendumInfo { /// Create a new instance. pub fn new( end: BlockNumber, - proposal_hash: Hash, + proposal: Proposal, threshold: VoteThreshold, delay: BlockNumber, ) -> Self { - let s = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Tally::default() }; + let s = ReferendumStatus { end, proposal, threshold, delay, tally: Tally::default() }; ReferendumInfo::Ongoing(s) } } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index c74623d4dfeb8..122f54febd8cf 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -18,11 +18,12 @@ //! The vote datatype. use crate::{Conviction, Delegations, ReferendumIndex}; -use codec::{Decode, Encode, EncodeLike, Input, Output}; +use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen, Output}; +use frame_support::traits::Get; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, - RuntimeDebug, + BoundedVec, RuntimeDebug, }; use sp_std::prelude::*; @@ -39,6 +40,12 @@ impl Encode for Vote { } } +impl MaxEncodedLen for Vote { + fn max_encoded_len() -> usize { + 1 + } +} + impl EncodeLike for Vote {} impl Decode for Vote { @@ -66,7 +73,7 @@ impl TypeInfo for Vote { } /// A vote for a referendum of a particular account. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] +#[derive(Encode, MaxEncodedLen, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug, TypeInfo)] pub enum AccountVote { /// A standard vote, one-way (approve or reject) with a given amount of conviction. Standard { vote: Vote, balance: Balance }, @@ -107,7 +114,18 @@ impl AccountVote { /// A "prior" lock, i.e. a lock for some now-forgotten reason. #[derive( - Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, TypeInfo, + Encode, + MaxEncodedLen, + Decode, + Default, + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + RuntimeDebug, + TypeInfo, )] pub struct PriorLock(BlockNumber, Balance); @@ -131,13 +149,15 @@ impl PriorLock { +#[derive(Clone, Encode, Decode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)] +#[codec(mel_bound(skip_type_params(MaxVotes)))] +#[scale_info(skip_type_params(MaxVotes))] +pub enum Voting> { /// The account is voting directly. `delegations` is the total amount of post-conviction voting /// weight that it controls from those that have delegated to it. Direct { /// The current votes of the account. - votes: Vec<(ReferendumIndex, AccountVote)>, + votes: BoundedVec<(ReferendumIndex, AccountVote), MaxVotes>, /// The total amount of delegations that this account has received. delegations: Delegations, /// Any pre-existing locks from past voting/delegating activity. @@ -155,20 +175,24 @@ pub enum Voting { }, } -impl Default - for Voting +impl> Default + for Voting { fn default() -> Self { Voting::Direct { - votes: Vec::new(), + votes: Default::default(), delegations: Default::default(), prior: PriorLock(Zero::zero(), Default::default()), } } } -impl - Voting +impl< + Balance: Saturating + Ord + Zero + Copy, + BlockNumber: Ord + Copy + Zero, + AccountId, + MaxVotes: Get, + > Voting { pub fn rejig(&mut self, now: BlockNumber) { match self { diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index 443d6b1166198..e8ef91def9820 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -18,7 +18,7 @@ //! Voting thresholds. use crate::Tally; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -26,7 +26,9 @@ use sp_runtime::traits::{IntegerSquareRoot, Zero}; use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, TypeInfo)] +#[derive( + Clone, Copy, PartialEq, Eq, Encode, MaxEncodedLen, Decode, sp_runtime::RuntimeDebug, TypeInfo, +)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum VoteThreshold { /// A supermajority of approvals is needed to pass this vote. diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index 272921ed3a15d..0a3b717938022 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_democracy // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_democracy +// --chain=dev // --output=./frame/democracy/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -45,27 +47,23 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_democracy. pub trait WeightInfo { fn propose() -> Weight; - fn second(s: u32, ) -> Weight; - fn vote_new(r: u32, ) -> Weight; - fn vote_existing(r: u32, ) -> Weight; + fn second() -> Weight; + fn vote_new() -> Weight; + fn vote_existing() -> Weight; fn emergency_cancel() -> Weight; - fn blacklist(p: u32, ) -> Weight; - fn external_propose(v: u32, ) -> Weight; + fn blacklist() -> Weight; + fn external_propose() -> Weight; fn external_propose_majority() -> Weight; fn external_propose_default() -> Weight; fn fast_track() -> Weight; - fn veto_external(v: u32, ) -> Weight; - fn cancel_proposal(p: u32, ) -> Weight; + fn veto_external() -> Weight; + fn cancel_proposal() -> Weight; fn cancel_referendum() -> Weight; - fn cancel_queued(r: u32, ) -> Weight; fn on_initialize_base(r: u32, ) -> Weight; fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; - fn note_preimage(b: u32, ) -> Weight; - fn note_imminent_preimage(b: u32, ) -> Weight; - fn reap_preimage(b: u32, ) -> Weight; fn unlock_remove(r: u32, ) -> Weight; fn unlock_set(r: u32, ) -> Weight; fn remove_vote(r: u32, ) -> Weight; @@ -80,125 +78,103 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as u64) + Weight::from_ref_time(57_410_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + fn second() -> Weight { + Weight::from_ref_time(49_224_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + fn vote_new() -> Weight { + Weight::from_ref_time(60_933_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + fn vote_existing() -> Weight { + Weight::from_ref_time(60_393_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as u64) + Weight::from_ref_time(24_588_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + fn blacklist() -> Weight { + Weight::from_ref_time(91_226_000 as u64) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + fn external_propose() -> Weight { + Weight::from_ref_time(18_898_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as u64) + Weight::from_ref_time(5_136_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as u64) + Weight::from_ref_time(5_243_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as u64) + Weight::from_ref_time(24_275_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + fn veto_external() -> Weight { + Weight::from_ref_time(30_988_000 as u64) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + fn cancel_proposal() -> Weight { + Weight::from_ref_time(78_515_000 as u64) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as u64) + Weight::from_ref_time(16_155_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(7_007_000 as u64) + // Standard Error: 2_686 + .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) @@ -208,33 +184,36 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(9_528_000 as u64) + // Standard Error: 2_521 + .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(5 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(46_787_000 as u64) + // Standard Error: 2_943 + .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(4 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(4 as u64)) .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(29_789_000 as u64) + // Standard Error: 2_324 + .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) @@ -242,69 +221,48 @@ impl WeightInfo for SubstrateWeight { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as u64) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - // Storage: System Account (r:1 w:0) - fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + Weight::from_ref_time(6_519_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(28_884_000 as u64) + // Standard Error: 2_631 + .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(33_498_000 as u64) + // Standard Error: 622 + .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(3 as u64)) .saturating_add(T::DbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_201_000 as u64) + // Standard Error: 1_007 + .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_455_000 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -317,125 +275,103 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - Weight::from_ref_time(48_328_000 as u64) + Weight::from_ref_time(57_410_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy DepositOf (r:1 w:1) - fn second(s: u32, ) -> Weight { - Weight::from_ref_time(30_923_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(s as u64)) + fn second() -> Weight { + Weight::from_ref_time(49_224_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_new(r: u32, ) -> Weight { - Weight::from_ref_time(40_345_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(140_000 as u64).saturating_mul(r as u64)) + fn vote_new() -> Weight { + Weight::from_ref_time(60_933_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) - fn vote_existing(r: u32, ) -> Weight { - Weight::from_ref_time(39_853_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(150_000 as u64).saturating_mul(r as u64)) + fn vote_existing() -> Weight { + Weight::from_ref_time(60_393_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - Weight::from_ref_time(19_364_000 as u64) + Weight::from_ref_time(24_588_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) + // Storage: Democracy DepositOf (r:1 w:1) + // Storage: System Account (r:1 w:1) // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Blacklist (r:0 w:1) - // Storage: Democracy DepositOf (r:1 w:1) - // Storage: System Account (r:1 w:1) - fn blacklist(p: u32, ) -> Weight { - Weight::from_ref_time(57_708_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(192_000 as u64).saturating_mul(p as u64)) + fn blacklist() -> Weight { + Weight::from_ref_time(91_226_000 as u64) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().writes(6 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) - fn external_propose(v: u32, ) -> Weight { - Weight::from_ref_time(10_714_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(33_000 as u64).saturating_mul(v as u64)) + fn external_propose() -> Weight { + Weight::from_ref_time(18_898_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - Weight::from_ref_time(3_697_000 as u64) + Weight::from_ref_time(5_136_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - Weight::from_ref_time(3_831_000 as u64) + Weight::from_ref_time(5_243_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - Weight::from_ref_time(20_271_000 as u64) + Weight::from_ref_time(24_275_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) - fn veto_external(v: u32, ) -> Weight { - Weight::from_ref_time(21_319_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(52_000 as u64).saturating_mul(v as u64)) + fn veto_external() -> Weight { + Weight::from_ref_time(30_988_000 as u64) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy PublicProps (r:1 w:1) // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) - fn cancel_proposal(p: u32, ) -> Weight { - Weight::from_ref_time(43_960_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(184_000 as u64).saturating_mul(p as u64)) + fn cancel_proposal() -> Weight { + Weight::from_ref_time(78_515_000 as u64) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - Weight::from_ref_time(13_475_000 as u64) + Weight::from_ref_time(16_155_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } - // Storage: Scheduler Lookup (r:1 w:1) - // Storage: Scheduler Agenda (r:1 w:1) - fn cancel_queued(r: u32, ) -> Weight { - Weight::from_ref_time(24_320_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(560_000 as u64).saturating_mul(r as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - } // Storage: Democracy LowestUnbaked (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base(r: u32, ) -> Weight { - Weight::from_ref_time(3_428_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_171_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(7_007_000 as u64) + // Standard Error: 2_686 + .saturating_add(Weight::from_ref_time(2_288_781 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) @@ -445,33 +381,36 @@ impl WeightInfo for () { // Storage: Democracy LastTabledWasExternal (r:1 w:0) // Storage: Democracy NextExternal (r:1 w:0) // Storage: Democracy PublicProps (r:1 w:0) - // Storage: Democracy ReferendumInfoOf (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:2 w:0) + /// The range of component `r` is `[0, 99]`. fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { - Weight::from_ref_time(7_867_000 as u64) - // Standard Error: 2_000 - .saturating_add(Weight::from_ref_time(3_177_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(9_528_000 as u64) + // Standard Error: 2_521 + .saturating_add(Weight::from_ref_time(2_291_780 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(5 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:3 w:3) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn delegate(r: u32, ) -> Weight { - Weight::from_ref_time(37_902_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(4_335_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(46_787_000 as u64) + // Standard Error: 2_943 + .saturating_add(Weight::from_ref_time(3_460_194 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(4 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(4 as u64)) .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(r as u64))) } // Storage: Democracy VotingOf (r:2 w:2) - // Storage: Democracy ReferendumInfoOf (r:1 w:1) + // Storage: Democracy ReferendumInfoOf (r:2 w:2) + /// The range of component `r` is `[0, 99]`. fn undelegate(r: u32, ) -> Weight { - Weight::from_ref_time(21_272_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(4_351_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(29_789_000 as u64) + // Standard Error: 2_324 + .saturating_add(Weight::from_ref_time(3_360_918 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(r as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) @@ -479,69 +418,48 @@ impl WeightInfo for () { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - Weight::from_ref_time(4_913_000 as u64) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(27_986_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - fn note_imminent_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(20_058_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - } - // Storage: Democracy Preimages (r:1 w:1) - // Storage: System Account (r:1 w:0) - fn reap_preimage(b: u32, ) -> Weight { - Weight::from_ref_time(28_619_000 as u64) - // Standard Error: 0 - .saturating_add(Weight::from_ref_time(1_000 as u64).saturating_mul(b as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + Weight::from_ref_time(6_519_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_remove(r: u32, ) -> Weight { - Weight::from_ref_time(26_619_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(56_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(28_884_000 as u64) + // Standard Error: 2_631 + .saturating_add(Weight::from_ref_time(163_516 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) + /// The range of component `r` is `[0, 99]`. fn unlock_set(r: u32, ) -> Weight { - Weight::from_ref_time(25_373_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(142_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(33_498_000 as u64) + // Standard Error: 622 + .saturating_add(Weight::from_ref_time(133_421 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(3 as u64)) .saturating_add(RocksDbWeight::get().writes(3 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_961_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(115_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_201_000 as u64) + // Standard Error: 1_007 + .saturating_add(Weight::from_ref_time(152_699 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) + /// The range of component `r` is `[1, 100]`. fn remove_other_vote(r: u32, ) -> Weight { - Weight::from_ref_time(15_992_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(113_000 as u64).saturating_mul(r as u64)) + Weight::from_ref_time(18_455_000 as u64) + // Standard Error: 951 + .saturating_add(Weight::from_ref_time(150_907 as u64).saturating_mul(r as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index 747d1e9f27106..20628da50937a 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -268,7 +268,7 @@ fn registration_should_work() { let mut three_fields = ten(); three_fields.additional.try_push(Default::default()).unwrap(); three_fields.additional.try_push(Default::default()).unwrap(); - assert_eq!(three_fields.additional.try_push(Default::default()), Err(())); + assert!(three_fields.additional.try_push(Default::default()).is_err()); assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten()))); assert_eq!(Identity::identity(10).unwrap().info, ten()); assert_eq!(Balances::free_balance(10), 90); diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 6c8b5fbaa7362..bfd0870d30c22 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -22,6 +22,9 @@ sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +# third party +log = { version = "0.4.17", default-features = false } + [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } sp-core = { version = "6.0.0", path = "../../primitives/core" } diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index c0b0097b07236..d949414e31cb3 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -31,7 +31,7 @@ const SEED: u32 = 0; fn setup_multi( s: u32, z: u32, -) -> Result<(Vec, OpaqueCall), &'static str> { +) -> Result<(Vec, Box<::RuntimeCall>), &'static str> { let mut signatories: Vec = Vec::new(); for i in 0..s { let signatory = account("signatory", i, SEED); @@ -44,8 +44,7 @@ fn setup_multi( // Must first convert to runtime call type. let call: ::RuntimeCall = frame_system::Call::::remark { remark: vec![0; z as usize] }.into(); - let call_data = OpaqueCall::::from_encoded(call.encode()); - Ok((signatories, call_data)) + Ok((signatories, Box::new(call))) } benchmarks! { @@ -74,35 +73,15 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, false, Weight::zero()) - verify { - assert!(Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(!Calls::::contains_key(call_hash)); - } - - as_multi_create_store { - // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, true, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call, Weight::zero()) verify { assert!(Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(Calls::::contains_key(call_hash)); } as_multi_approve { @@ -111,49 +90,22 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // before the call, get the timepoint - let timepoint = Multisig::::timepoint(); - // Create the multi, storing for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; - assert!(Calls::::contains_key(call_hash)); - let caller2 = signatories2.remove(0); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::zero()) - verify { - let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; - assert_eq!(multisig.approvals.len(), 2); - } - - as_multi_approve_store { - // Signatories, need at least 3 people (so we don't complete the multisig) - let s in 3 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi, not storing - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), false, Weight::zero())?; - assert!(!Calls::::contains_key(call_hash)); + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; let caller2 = signatories2.remove(0); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, true, Weight::zero()) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::zero()) verify { let multisig = Multisigs::::get(multi_account_id, call_hash).ok_or("multisig not created")?; assert_eq!(multisig.approvals.len(), 2); - assert!(Calls::::contains_key(call_hash)); } as_multi_complete { @@ -162,27 +114,27 @@ benchmarks! { // Transaction Length let z in 0 .. 10_000; let (mut signatories, call) = setup_multi::(s, z)?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let mut signatories2 = signatories.clone(); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; // before the call, get the timepoint let timepoint = Multisig::::timepoint(); - // Create the multi, storing it for worst case - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; + // Create the multi + Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), Weight::zero())?; // Everyone except the first person approves for i in 1 .. s - 1 { let mut signatories_loop = signatories2.clone(); let caller_loop = signatories_loop.remove(i as usize); let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), Weight::zero())?; } let caller2 = signatories2.remove(0); assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller2); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, false, Weight::MAX) + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call, Weight::MAX) verify { assert!(!Multisigs::::contains_key(&multi_account_id, call_hash)); } @@ -195,7 +147,7 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); @@ -214,7 +166,7 @@ benchmarks! { let mut signatories2 = signatories.clone(); let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); // before the call, get the timepoint let timepoint = Multisig::::timepoint(); // Create the multi @@ -224,7 +176,6 @@ benchmarks! { signatories, None, call, - false, Weight::zero() )?; let caller2 = signatories2.remove(0); @@ -237,45 +188,6 @@ benchmarks! { assert_eq!(multisig.approvals.len(), 2); } - approve_as_multi_complete { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length, not a component - let z = 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - let call_hash = blake2_256(call.encoded()); - // before the call, get the timepoint - let timepoint = Multisig::::timepoint(); - // Create the multi - Multisig::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone(), true, Weight::zero())?; - // Everyone except the first person approves - for i in 1 .. s - 1 { - let mut signatories_loop = signatories2.clone(); - let caller_loop = signatories_loop.remove(i as usize); - let o = RawOrigin::Signed(caller_loop).into(); - Multisig::::as_multi(o, s as u16, signatories_loop, Some(timepoint), call.clone(), false, Weight::zero())?; - } - let caller2 = signatories2.remove(0); - assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); - // Whitelist caller account from further DB operations. - let caller_key = frame_system::Account::::hashed_key_for(&caller2); - frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); - }: approve_as_multi( - RawOrigin::Signed(caller2), - s as u16, - signatories2, - Some(timepoint), - call_hash, - Weight::MAX - ) - verify { - assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); - } - cancel_as_multi { // Signatories, need at least 2 people let s in 2 .. T::MaxSignatories::get() as u32; @@ -284,20 +196,18 @@ benchmarks! { let (mut signatories, call) = setup_multi::(s, z)?; let multi_account_id = Multisig::::multi_account_id(&signatories, s.try_into().unwrap()); let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = blake2_256(call.encoded()); + let call_hash = call.using_encoded(blake2_256); let timepoint = Multisig::::timepoint(); // Create the multi let o = RawOrigin::Signed(caller.clone()).into(); - Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, true, Weight::zero())?; + Multisig::::as_multi(o, s as u16, signatories.clone(), None, call, Weight::zero())?; assert!(Multisigs::::contains_key(&multi_account_id, call_hash)); - assert!(Calls::::contains_key(call_hash)); // Whitelist caller account from further DB operations. let caller_key = frame_system::Account::::hashed_key_for(&caller); frame_benchmarking::benchmarking::add_to_whitelist(caller_key.into()); }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) verify { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); - assert!(!Calls::::contains_key(call_hash)); } impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index 3bdb47ffc4568..e3031cc830209 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -47,6 +47,7 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; +pub mod migrations; mod tests; pub mod weights; @@ -57,7 +58,7 @@ use frame_support::{ PostDispatchInfo, }, ensure, - traits::{Currency, Get, ReservableCurrency, WrapperKeepOpaque}, + traits::{Currency, Get, ReservableCurrency}, weights::Weight, RuntimeDebug, }; @@ -73,6 +74,20 @@ pub use weights::WeightInfo; pub use pallet::*; +/// The log target of this pallet. +pub const LOG_TARGET: &'static str = "runtime::multisig"; + +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] ✍️ ", $patter), >::block_number() $(, $values)* + ) + }; +} + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -100,12 +115,10 @@ pub struct Multisig { approvals: Vec, } -type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; - type CallHash = [u8; 32]; enum CallOrHash { - Call(OpaqueCall, bool), + Call(::RuntimeCall), Hash([u8; 32]), } @@ -152,9 +165,13 @@ pub mod pallet { type WeightInfo: WeightInfo; } + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(_); /// The set of open multisig operations. @@ -168,10 +185,6 @@ pub mod pallet { Multisig, T::AccountId>, >; - #[pallet::storage] - pub type Calls = - StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; - #[pallet::error] pub enum Error { /// Threshold must be 2 or greater. @@ -343,13 +356,13 @@ pub mod pallet { /// taken for its lifetime of `DepositBase + threshold * DepositFactor`. /// ------------------------------- /// - DB Weight: - /// - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`) - /// - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`) + /// - Reads: Multisig Storage, [Caller Account] + /// - Writes: Multisig Storage, [Caller Account] /// - Plus Call Weight /// # #[pallet::weight({ let s = other_signatories.len() as u32; - let z = call.encoded_len() as u32; + let z = call.using_encoded(|d| d.len()) as u32; T::WeightInfo::as_multi_create(s, z) .max(T::WeightInfo::as_multi_create_store(s, z)) @@ -362,8 +375,7 @@ pub mod pallet { threshold: u16, other_signatories: Vec, maybe_timepoint: Option>, - call: OpaqueCall, - store_call: bool, + call: Box<::RuntimeCall>, max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -372,7 +384,7 @@ pub mod pallet { threshold, other_signatories, maybe_timepoint, - CallOrHash::Call(call, store_call), + CallOrHash::Call(*call), max_weight, ) } @@ -462,8 +474,8 @@ pub mod pallet { /// - Storage: removes one item. /// ---------------------------------- /// - DB Weight: - /// - Read: Multisig Storage, [Caller Account], Refund Account, Calls - /// - Write: Multisig Storage, [Caller Account], Refund Account, Calls + /// - Read: Multisig Storage, [Caller Account], Refund Account + /// - Write: Multisig Storage, [Caller Account], Refund Account /// # #[pallet::weight(T::WeightInfo::cancel_as_multi(other_signatories.len() as u32))] pub fn cancel_as_multi( @@ -489,7 +501,6 @@ pub mod pallet { let err_amount = T::Currency::unreserve(&m.depositor, m.deposit); debug_assert!(err_amount.is_zero()); >::remove(&id, &call_hash); - Self::clear_call(&call_hash); Self::deposit_event(Event::MultisigCancelled { cancelling: who, @@ -531,13 +542,12 @@ impl Pallet { let id = Self::multi_account_id(&signatories, threshold); // Threshold > 1; this means it's a multi-step operation. We extract the `call_hash`. - let (call_hash, call_len, maybe_call, store) = match call_or_hash { - CallOrHash::Call(call, should_store) => { - let call_hash = blake2_256(call.encoded()); - let call_len = call.encoded_len(); - (call_hash, call_len, Some(call), should_store) + let (call_hash, call_len, maybe_call) = match call_or_hash { + CallOrHash::Call(call) => { + let (call_hash, call_len) = call.using_encoded(|d| (blake2_256(d), d.len())); + (call_hash, call_len, Some(call)) }, - CallOrHash::Hash(h) => (h, 0, None, false), + CallOrHash::Hash(h) => (h, 0, None), }; // Branch on whether the operation has already started or not. @@ -556,13 +566,7 @@ impl Pallet { } // We only bother fetching/decoding call if we know that we're ready to execute. - let maybe_approved_call = if approvals >= threshold { - Self::get_call(&call_hash, maybe_call.as_ref()) - } else { - None - }; - - if let Some((call, call_len)) = maybe_approved_call { + if let Some(call) = maybe_call.filter(|_| approvals >= threshold) { // verify weight ensure!( call.get_dispatch_info().weight.all_lte(max_weight), @@ -572,7 +576,6 @@ impl Pallet { // Clean up storage before executing call to avoid an possibility of reentrancy // attack. >::remove(&id, call_hash); - Self::clear_call(&call_hash); T::Currency::unreserve(&m.depositor, m.deposit); let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); @@ -596,19 +599,6 @@ impl Pallet { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. - // Store the call if desired. - let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve( - who.clone(), - &call_hash, - data, - BalanceOf::::zero(), - )?; - true - } else { - false - }; - if let Some(pos) = maybe_pos { // Record approval. m.approvals.insert(pos, who.clone()); @@ -622,17 +612,11 @@ impl Pallet { } else { // If we already approved and didn't store the Call, then this was useless and // we report an error. - ensure!(stored, Error::::AlreadyApproved); + Err(Error::::AlreadyApproved)? } - let final_weight = if stored { - T::WeightInfo::as_multi_approve_store( - other_signatories_len as u32, - call_len as u32, - ) - } else { - T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32) - }; + let final_weight = + T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32); // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } @@ -643,14 +627,7 @@ impl Pallet { // Just start the operation by recording it in storage. let deposit = T::DepositBase::get() + T::DepositFactor::get() * threshold.into(); - // Store the call if desired. - let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve(who.clone(), &call_hash, data, deposit)?; - true - } else { - T::Currency::reserve(&who, deposit)?; - false - }; + T::Currency::reserve(&who, deposit)?; >::insert( &id, @@ -664,58 +641,13 @@ impl Pallet { ); Self::deposit_event(Event::NewMultisig { approving: who, multisig: id, call_hash }); - let final_weight = if stored { - T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) - } else { - T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32) - }; + let final_weight = + T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32); // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) } } - /// Place a call's encoded data in storage, reserving funds as appropriate. - /// - /// We store `data` here because storing `call` would result in needing another `.encode`. - /// - /// Returns a `bool` indicating whether the data did end up being stored. - fn store_call_and_reserve( - who: T::AccountId, - hash: &[u8; 32], - data: OpaqueCall, - other_deposit: BalanceOf, - ) -> DispatchResult { - ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); - let deposit = other_deposit + - T::DepositBase::get() + - T::DepositFactor::get() * - BalanceOf::::from(((data.encoded_len() + 31) / 32) as u32); - T::Currency::reserve(&who, deposit)?; - Calls::::insert(&hash, (data, who, deposit)); - Ok(()) - } - - /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call( - hash: &[u8; 32], - maybe_known: Option<&OpaqueCall>, - ) -> Option<(::RuntimeCall, usize)> { - maybe_known.map_or_else( - || { - Calls::::get(hash) - .and_then(|(data, ..)| Some((data.try_decode()?, data.encoded_len()))) - }, - |data| Some((data.try_decode()?, data.encoded_len())), - ) - } - - /// Attempt to remove a call from storage, returning any deposit on it to the owner. - fn clear_call(hash: &[u8; 32]) { - if let Some((_, who, deposit)) = Calls::::take(hash) { - T::Currency::unreserve(&who, deposit); - } - } - /// The current `Timepoint`. pub fn timepoint() -> Timepoint { Timepoint { diff --git a/frame/multisig/src/migrations.rs b/frame/multisig/src/migrations.rs new file mode 100644 index 0000000000000..5085297cde433 --- /dev/null +++ b/frame/multisig/src/migrations.rs @@ -0,0 +1,86 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Migrations for Multisig Pallet + +use super::*; +use frame_support::{ + dispatch::GetStorageVersion, + traits::{OnRuntimeUpgrade, WrapperKeepOpaque}, + Identity, +}; + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; + +pub mod v1 { + use super::*; + + type OpaqueCall = WrapperKeepOpaque<::RuntimeCall>; + + #[frame_support::storage_alias] + type Calls = StorageMap< + Pallet, + Identity, + [u8; 32], + (OpaqueCall, ::AccountId, BalanceOf), + >; + + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + + ensure!(onchain < 1, "this migration can be deleted"); + + log!(info, "Number of calls to refund and delete: {}", Calls::::iter().count()); + + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let current = Pallet::::current_storage_version(); + let onchain = Pallet::::on_chain_storage_version(); + + if onchain > 0 { + log!(info, "MigrateToV1 should be removed"); + return T::DbWeight::get().reads(1) + } + + Calls::::drain().for_each(|(_call_hash, (_data, caller, deposit))| { + T::Currency::unreserve(&caller, deposit); + }); + + current.put::>(); + + ::BlockWeights::get().max_block + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), &'static str> { + let onchain = Pallet::::on_chain_storage_version(); + ensure!(onchain < 2, "this migration needs to be removed"); + ensure!(onchain == 1, "this migration needs to be run"); + ensure!( + Calls::::iter().count() == 0, + "there are some dangling calls that need to be destroyed and refunded" + ); + Ok(()) + } + } +} diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index b24a06f454368..f753b6f386c56 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -34,7 +34,6 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; -type OpaqueCall = super::OpaqueCall; frame_support::construct_runtime!( pub enum Test where @@ -130,8 +129,8 @@ fn now() -> Timepoint { Multisig::timepoint() } -fn call_transfer(dest: u64, value: u64) -> RuntimeCall { - RuntimeCall::Balances(BalancesCall::transfer { dest, value }) +fn call_transfer(dest: u64, value: u64) -> Box { + Box::new(RuntimeCall::Balances(BalancesCall::transfer { dest, value })) } #[test] @@ -144,14 +143,12 @@ fn multisig_deposit_is_taken_and_returned() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(1), 2); @@ -162,8 +159,7 @@ fn multisig_deposit_is_taken_and_returned() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(1), 5); @@ -171,96 +167,6 @@ fn multisig_deposit_is_taken_and_returned() { }); } -#[test] -fn multisig_deposit_is_taken_and_returned_with_call_storage() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 2, - vec![2, 3], - None, - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 5); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 2, - vec![1, 3], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - }); -} - -#[test] -fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - hash, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 1); - assert_eq!(Balances::reserved_balance(1), 4); - - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(2), 3); - assert_eq!(Balances::reserved_balance(2), 2); - assert_eq!(Balances::free_balance(1), 1); - assert_eq!(Balances::reserved_balance(1), 4); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(3), - 3, - vec![1, 2], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(2), 5); - assert_eq!(Balances::reserved_balance(2), 0); - }); -} - #[test] fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { @@ -298,8 +204,8 @@ fn timepoint_checking_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); + let call = call_transfer(6, 15); + let hash = blake2_256(&call.encode()); assert_noop!( Multisig::approve_as_multi( @@ -328,8 +234,7 @@ fn timepoint_checking_works() { 2, vec![1, 3], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::NoTimepoint, @@ -341,8 +246,7 @@ fn timepoint_checking_works() { 2, vec![1, 3], Some(later), - OpaqueCall::from_encoded(call), - false, + call, Weight::zero() ), Error::::WrongTimepoint, @@ -350,41 +254,6 @@ fn timepoint_checking_works() { }); } -#[test] -fn multisig_2_of_3_works_with_call_storing() { - new_test_ext().execute_with(|| { - let multi = Multisig::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - - let call = call_transfer(6, 15); - let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 2, - vec![2, 3], - None, - OpaqueCall::from_encoded(data), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 2, - vec![1, 3], - Some(now()), - hash, - call_weight - )); - assert_eq!(Balances::free_balance(6), 15); - }); -} - #[test] fn multisig_2_of_3_works() { new_test_ext().execute_with(|| { @@ -395,8 +264,7 @@ fn multisig_2_of_3_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 2, @@ -412,8 +280,7 @@ fn multisig_2_of_3_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -430,8 +297,7 @@ fn multisig_3_of_3_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 3, @@ -455,8 +321,7 @@ fn multisig_3_of_3_works() { 3, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -492,68 +357,6 @@ fn cancel_multisig_works() { }); } -#[test] -fn cancel_multisig_with_call_storage_works() { - new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - OpaqueCall::from_encoded(call), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 4); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - hash, - Weight::zero() - )); - assert_noop!( - Multisig::cancel_as_multi(RuntimeOrigin::signed(2), 3, vec![1, 3], now(), hash), - Error::::NotOwner, - ); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash),); - assert_eq!(Balances::free_balance(1), 10); - }); -} - -#[test] -fn cancel_multisig_with_alt_call_storage_works() { - new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi( - RuntimeOrigin::signed(1), - 3, - vec![2, 3], - None, - hash, - Weight::zero() - )); - assert_eq!(Balances::free_balance(1), 6); - assert_ok!(Multisig::as_multi( - RuntimeOrigin::signed(2), - 3, - vec![1, 3], - Some(now()), - OpaqueCall::from_encoded(call), - true, - Weight::zero() - )); - assert_eq!(Balances::free_balance(2), 8); - assert_ok!(Multisig::cancel_as_multi(RuntimeOrigin::signed(1), 3, vec![2, 3], now(), hash)); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - }); -} - #[test] fn multisig_2_of_3_as_multi_works() { new_test_ext().execute_with(|| { @@ -564,14 +367,12 @@ fn multisig_2_of_3_as_multi_works() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -581,8 +382,7 @@ fn multisig_2_of_3_as_multi_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); @@ -599,18 +399,15 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { let call1 = call_transfer(6, 10); let call1_weight = call1.get_dispatch_info().weight; - let data1 = call1.encode(); let call2 = call_transfer(7, 5); let call2_weight = call2.get_dispatch_info().weight; - let data2 = call2.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data1.clone()), - false, + call1.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -618,8 +415,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 3], None, - OpaqueCall::from_encoded(data2.clone()), - false, + call2.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -627,8 +423,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data1), - false, + call1, call1_weight )); assert_ok!(Multisig::as_multi( @@ -636,8 +431,7 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data2), - false, + call2, call2_weight )); @@ -656,15 +450,13 @@ fn multisig_2_of_3_cannot_reissue_same_call() { let call = call_transfer(6, 10); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -672,8 +464,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), call_weight )); assert_eq!(Balances::free_balance(multi), 5); @@ -683,8 +474,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_ok!(Multisig::as_multi( @@ -692,8 +482,7 @@ fn multisig_2_of_3_cannot_reissue_same_call() { 2, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call.clone(), call_weight )); @@ -714,15 +503,14 @@ fn multisig_2_of_3_cannot_reissue_same_call() { #[test] fn minimum_threshold_check_works() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); + let call = call_transfer(6, 15); assert_noop!( Multisig::as_multi( RuntimeOrigin::signed(1), 0, vec![2], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, @@ -733,8 +521,7 @@ fn minimum_threshold_check_works() { 1, vec![2], None, - OpaqueCall::from_encoded(call.clone()), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, @@ -745,15 +532,14 @@ fn minimum_threshold_check_works() { #[test] fn too_many_signatories_fails() { new_test_ext().execute_with(|| { - let call = call_transfer(6, 15).encode(); + let call = call_transfer(6, 15); assert_noop!( Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3, 4], None, - OpaqueCall::from_encoded(call), - false, + call.clone(), Weight::zero() ), Error::::TooManySignatories, @@ -815,8 +601,8 @@ fn multisig_1_of_3_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(2), multi, 5)); assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); - let call = call_transfer(6, 15).encode(); - let hash = blake2_256(&call); + let call = call_transfer(6, 15); + let hash = blake2_256(&call.encode()); assert_noop!( Multisig::approve_as_multi( RuntimeOrigin::signed(1), @@ -834,17 +620,15 @@ fn multisig_1_of_3_works() { 1, vec![2, 3], None, - OpaqueCall::from_encoded(call), - false, + call.clone(), Weight::zero() ), Error::::MinimumThreshold, ); - let boxed_call = Box::new(call_transfer(6, 15)); assert_ok!(Multisig::as_multi_threshold_1( RuntimeOrigin::signed(1), vec![2, 3], - boxed_call + call_transfer(6, 15) )); assert_eq!(Balances::free_balance(6), 15); @@ -871,14 +655,12 @@ fn weight_check_works() { assert_ok!(Balances::transfer(RuntimeOrigin::signed(3), multi, 5)); let call = call_transfer(6, 15); - let data = call.encode(); assert_ok!(Multisig::as_multi( RuntimeOrigin::signed(1), 2, vec![2, 3], None, - OpaqueCall::from_encoded(data.clone()), - false, + call.clone(), Weight::zero() )); assert_eq!(Balances::free_balance(6), 0); @@ -889,8 +671,7 @@ fn weight_check_works() { 2, vec![1, 3], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, Weight::zero() ), Error::::MaxWeightTooLow, @@ -911,8 +692,7 @@ fn multisig_handles_no_preimage_after_all_approve() { let call = call_transfer(6, 15); let call_weight = call.get_dispatch_info().weight; - let data = call.encode(); - let hash = blake2_256(&data); + let hash = blake2_256(&call.encode()); assert_ok!(Multisig::approve_as_multi( RuntimeOrigin::signed(1), 3, @@ -944,8 +724,7 @@ fn multisig_handles_no_preimage_after_all_approve() { 3, vec![1, 2], Some(now()), - OpaqueCall::from_encoded(data), - false, + call, call_weight )); assert_eq!(Balances::free_balance(6), 15); diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 953cf39cd12db..b3238630d3174 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -140,7 +140,7 @@ pub mod pallet { let sender = ensure_signed(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|()| Error::::TooLong)?; + name.try_into().map_err(|_| Error::::TooLong)?; ensure!(bounded_name.len() >= T::MinLength::get() as usize, Error::::TooShort); let deposit = if let Some((_, deposit)) = >::get(&sender) { @@ -229,7 +229,7 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let bounded_name: BoundedVec<_, _> = - name.try_into().map_err(|()| Error::::TooLong)?; + name.try_into().map_err(|_| Error::::TooLong)?; let target = T::Lookup::lookup(target)?; let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); >::insert(&target, (bounded_name, deposit)); diff --git a/frame/preimage/Cargo.toml b/frame/preimage/Cargo.toml index 9a5cc186cca64..77046f4fb58b6 100644 --- a/frame/preimage/Cargo.toml +++ b/frame/preimage/Cargo.toml @@ -19,6 +19,7 @@ sp-core = { version = "6.0.0", default-features = false, optional = true, path = sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } +log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { version = "4.0.0-dev", path = "../balances" } @@ -36,10 +37,13 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", ] -try-runtime = ["frame-support/try-runtime"] +try-runtime = [ + "frame-support/try-runtime", +] diff --git a/frame/preimage/src/benchmarking.rs b/frame/preimage/src/benchmarking.rs index 3c7be9db573f4..8a61d7d780bfd 100644 --- a/frame/preimage/src/benchmarking.rs +++ b/frame/preimage/src/benchmarking.rs @@ -35,7 +35,7 @@ fn funded_account(name: &'static str, index: u32) -> T::AccountId { } fn preimage_and_hash() -> (Vec, T::Hash) { - sized_preimage_and_hash::(T::MaxSize::get()) + sized_preimage_and_hash::(MAX_SIZE) } fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { @@ -48,7 +48,7 @@ fn sized_preimage_and_hash(size: u32) -> (Vec, T::Hash) { benchmarks! { // Expensive note - will reserve. note_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -58,7 +58,7 @@ benchmarks! { } // Cheap note - will not reserve since it was requested. note_requested_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let caller = funded_account::("caller", 0); whitelist_account!(caller); let (preimage, hash) = sized_preimage_and_hash::(s); @@ -69,7 +69,7 @@ benchmarks! { } // Cheap note - will not reserve since it's the manager. note_no_deposit_preimage { - let s in 0 .. T::MaxSize::get(); + let s in 0 .. MAX_SIZE; let (preimage, hash) = sized_preimage_and_hash::(s); assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: note_preimage(T::ManagerOrigin::successful_origin(), preimage) @@ -101,10 +101,12 @@ benchmarks! { let (preimage, hash) = preimage_and_hash::(); let noter = funded_account::("noter", 0); whitelist_account!(noter); - assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter).into(), preimage)); + assert_ok!(Preimage::::note_preimage(RawOrigin::Signed(noter.clone()).into(), preimage)); }: _(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let deposit = T::BaseDeposit::get() + T::ByteDeposit::get() * MAX_SIZE.into(); + let s = RequestStatus::Requested { deposit: Some((noter, deposit)), count: 1, len: Some(MAX_SIZE) }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - would unreserve the deposit but none was held. request_no_deposit_preimage { @@ -112,14 +114,16 @@ benchmarks! { assert_ok!(Preimage::::note_preimage(T::ManagerOrigin::successful_origin(), preimage)); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 2, len: Some(MAX_SIZE) }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is not yet noted, so deposit to unreserve. request_unnoted_preimage { let (_, hash) = preimage_and_hash::(); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Cheap request - the preimage is already requested, so just a counter bump. request_requested_preimage { @@ -127,7 +131,8 @@ benchmarks! { assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: request_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(2))); + let s = RequestStatus::Requested { deposit: None, count: 2, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } // Expensive unrequest - last reference and it's noted, so will destroy the preimage. @@ -154,7 +159,8 @@ benchmarks! { assert_ok!(Preimage::::request_preimage(T::ManagerOrigin::successful_origin(), hash)); }: unrequest_preimage(T::ManagerOrigin::successful_origin(), hash) verify { - assert_eq!(StatusFor::::get(&hash), Some(RequestStatus::Requested(1))); + let s = RequestStatus::Requested { deposit: None, count: 1, len: None }; + assert_eq!(StatusFor::::get(&hash), Some(s)); } impl_benchmark_test_suite!(Preimage, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/frame/preimage/src/lib.rs b/frame/preimage/src/lib.rs index 90f5ac175f540..e899d3643dbbf 100644 --- a/frame/preimage/src/lib.rs +++ b/frame/preimage/src/lib.rs @@ -30,6 +30,7 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] @@ -37,15 +38,18 @@ mod tests; pub mod weights; use sp_runtime::traits::{BadOrigin, Hash, Saturating}; -use sp_std::prelude::*; +use sp_std::{borrow::Cow, prelude::*}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::Pays, ensure, pallet_prelude::Get, - traits::{Currency, PreimageProvider, PreimageRecipient, ReservableCurrency}, - BoundedVec, + traits::{ + Currency, Defensive, FetchResult, Hash as PreimageHash, PreimageProvider, + PreimageRecipient, QueryPreimage, ReservableCurrency, StorePreimage, + }, + BoundedSlice, BoundedVec, }; use scale_info::TypeInfo; pub use weights::WeightInfo; @@ -59,20 +63,27 @@ pub use pallet::*; #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] pub enum RequestStatus { /// The associated preimage has not yet been requested by the system. The given deposit (if - /// some) is being held until either it becomes requested or the user retracts the primage. - Unrequested(Option<(AccountId, Balance)>), + /// some) is being held until either it becomes requested or the user retracts the preimage. + Unrequested { deposit: (AccountId, Balance), len: u32 }, /// There are a non-zero number of outstanding requests for this hash by this chain. If there - /// is a preimage registered, then it may be removed iff this counter becomes zero. - Requested(u32), + /// is a preimage registered, then `len` is `Some` and it may be removed iff this counter + /// becomes zero. + Requested { deposit: Option<(AccountId, Balance)>, count: u32, len: Option }, } type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +/// Maximum size of preimage we can store is 4mb. +const MAX_SIZE: u32 = 4 * 1024 * 1024; + #[frame_support::pallet] pub mod pallet { use super::*; + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. @@ -88,9 +99,6 @@ pub mod pallet { /// manage existing preimages. type ManagerOrigin: EnsureOrigin; - /// Max size allowed for a preimage. - type MaxSize: Get; - /// The base deposit for placing a preimage on chain. type BaseDeposit: Get>; @@ -100,6 +108,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] pub struct Pallet(PhantomData); #[pallet::event] @@ -116,7 +125,7 @@ pub mod pallet { #[pallet::error] pub enum Error { /// Preimage is too large to store on-chain. - TooLarge, + TooBig, /// Preimage has already been noted on-chain. AlreadyNoted, /// The user is not authorized to perform this action. @@ -134,10 +143,9 @@ pub mod pallet { pub(super) type StatusFor = StorageMap<_, Identity, T::Hash, RequestStatus>>; - /// The preimages stored by this pallet. #[pallet::storage] pub(super) type PreimageFor = - StorageMap<_, Identity, T::Hash, BoundedVec>; + StorageMap<_, Identity, (T::Hash, u32), BoundedVec>>; #[pallet::call] impl Pallet { @@ -150,9 +158,7 @@ pub mod pallet { // We accept a signed origin which will pay a deposit, or a root origin where a deposit // is not taken. let maybe_sender = Self::ensure_signed_or_manager(origin)?; - let bounded_vec = - BoundedVec::::try_from(bytes).map_err(|()| Error::::TooLarge)?; - let system_requested = Self::note_bytes(bounded_vec, maybe_sender.as_ref())?; + let (system_requested, _) = Self::note_bytes(bytes.into(), maybe_sender.as_ref())?; if system_requested || maybe_sender.is_none() { Ok(Pays::No.into()) } else { @@ -161,6 +167,11 @@ pub mod pallet { } /// Clear an unrequested preimage from the runtime storage. + /// + /// If `len` is provided, then it will be a much cheaper operation. + /// + /// - `hash`: The hash of the preimage to be removed from the store. + /// - `len`: The length of the preimage of `hash`. #[pallet::weight(T::WeightInfo::unnote_preimage())] pub fn unnote_preimage(origin: OriginFor, hash: T::Hash) -> DispatchResult { let maybe_sender = Self::ensure_signed_or_manager(origin)?; @@ -203,41 +214,46 @@ impl Pallet { /// Store some preimage on chain. /// + /// If `maybe_depositor` is `None` then it is also requested. If `Some`, then it is not. + /// /// We verify that the preimage is within the bounds of what the pallet supports. /// /// If the preimage was requested to be uploaded, then the user pays no deposits or tx fees. fn note_bytes( - preimage: BoundedVec, + preimage: Cow<[u8]>, maybe_depositor: Option<&T::AccountId>, - ) -> Result { + ) -> Result<(bool, T::Hash), DispatchError> { let hash = T::Hashing::hash(&preimage); - ensure!(!PreimageFor::::contains_key(hash), Error::::AlreadyNoted); + let len = preimage.len() as u32; + ensure!(len <= MAX_SIZE, Error::::TooBig); - // We take a deposit only if there is a provided depositor, and the preimage was not + // We take a deposit only if there is a provided depositor and the preimage was not // previously requested. This also allows the tx to pay no fee. - let was_requested = match (StatusFor::::get(hash), maybe_depositor) { - (Some(RequestStatus::Requested(..)), _) => true, - (Some(RequestStatus::Unrequested(..)), _) => + let status = match (StatusFor::::get(hash), maybe_depositor) { + (Some(RequestStatus::Requested { count, deposit, .. }), _) => + RequestStatus::Requested { count, deposit, len: Some(len) }, + (Some(RequestStatus::Unrequested { .. }), Some(_)) => return Err(Error::::AlreadyNoted.into()), - (None, None) => { - StatusFor::::insert(hash, RequestStatus::Unrequested(None)); - false - }, + (Some(RequestStatus::Unrequested { len, deposit }), None) => + RequestStatus::Requested { deposit: Some(deposit), count: 1, len: Some(len) }, + (None, None) => RequestStatus::Requested { count: 1, len: Some(len), deposit: None }, (None, Some(depositor)) => { let length = preimage.len() as u32; let deposit = T::BaseDeposit::get() .saturating_add(T::ByteDeposit::get().saturating_mul(length.into())); T::Currency::reserve(depositor, deposit)?; - let status = RequestStatus::Unrequested(Some((depositor.clone(), deposit))); - StatusFor::::insert(hash, status); - false + RequestStatus::Unrequested { deposit: (depositor.clone(), deposit), len } }, }; + let was_requested = matches!(status, RequestStatus::Requested { .. }); + StatusFor::::insert(hash, status); + + let _ = Self::insert(&hash, preimage) + .defensive_proof("Unable to insert. Logic error in `note_bytes`?"); - PreimageFor::::insert(hash, preimage); Self::deposit_event(Event::Noted { hash }); - Ok(was_requested) + Ok((was_requested, hash)) } // This function will add a hash to the list of requested preimages. @@ -245,19 +261,15 @@ impl Pallet { // If the preimage already exists before the request is made, the deposit for the preimage is // returned to the user, and removed from their management. fn do_request_preimage(hash: &T::Hash) { - let count = StatusFor::::get(hash).map_or(1, |x| match x { - RequestStatus::Requested(mut count) => { - count.saturating_inc(); - count - }, - RequestStatus::Unrequested(None) => 1, - RequestStatus::Unrequested(Some((owner, deposit))) => { - // Return the deposit - the preimage now has outstanding requests. - T::Currency::unreserve(&owner, deposit); - 1 - }, - }); - StatusFor::::insert(hash, RequestStatus::Requested(count)); + let (count, len, deposit) = + StatusFor::::get(hash).map_or((1, None, None), |x| match x { + RequestStatus::Requested { mut count, len, deposit } => { + count.saturating_inc(); + (count, len, deposit) + }, + RequestStatus::Unrequested { deposit, len } => (1, Some(len), Some(deposit)), + }); + StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); if count == 1 { Self::deposit_event(Event::Requested { hash: *hash }); } @@ -265,6 +277,8 @@ impl Pallet { // Clear a preimage from the storage of the chain, returning any deposit that may be reserved. // + // If `len` is provided, it will be a much cheaper operation. + // // If `maybe_owner` is provided, we verify that it is the correct owner before clearing the // data. fn do_unnote_preimage( @@ -272,51 +286,101 @@ impl Pallet { maybe_check_owner: Option, ) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotNoted)? { - RequestStatus::Unrequested(Some((owner, deposit))) => { + RequestStatus::Requested { deposit: Some((owner, deposit)), count, len } => { ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); T::Currency::unreserve(&owner, deposit); + StatusFor::::insert( + hash, + RequestStatus::Requested { deposit: None, count, len }, + ); + Ok(()) }, - RequestStatus::Unrequested(None) => { + RequestStatus::Requested { deposit: None, .. } => { ensure!(maybe_check_owner.is_none(), Error::::NotAuthorized); + Self::do_unrequest_preimage(hash) + }, + RequestStatus::Unrequested { deposit: (owner, deposit), len } => { + ensure!(maybe_check_owner.map_or(true, |c| c == owner), Error::::NotAuthorized); + T::Currency::unreserve(&owner, deposit); + StatusFor::::remove(hash); + + Self::remove(hash, len); + Self::deposit_event(Event::Cleared { hash: *hash }); + Ok(()) }, - RequestStatus::Requested(_) => return Err(Error::::Requested.into()), } - StatusFor::::remove(hash); - PreimageFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); - Ok(()) } /// Clear a preimage request. fn do_unrequest_preimage(hash: &T::Hash) -> DispatchResult { match StatusFor::::get(hash).ok_or(Error::::NotRequested)? { - RequestStatus::Requested(mut count) if count > 1 => { + RequestStatus::Requested { mut count, len, deposit } if count > 1 => { count.saturating_dec(); - StatusFor::::insert(hash, RequestStatus::Requested(count)); + StatusFor::::insert(hash, RequestStatus::Requested { count, len, deposit }); }, - RequestStatus::Requested(count) => { + RequestStatus::Requested { count, len, deposit } => { debug_assert!(count == 1, "preimage request counter at zero?"); - PreimageFor::::remove(hash); - StatusFor::::remove(hash); - Self::deposit_event(Event::Cleared { hash: *hash }); + match (len, deposit) { + // Preimage was never noted. + (None, _) => StatusFor::::remove(hash), + // Preimage was noted without owner - just remove it. + (Some(len), None) => { + Self::remove(hash, len); + StatusFor::::remove(hash); + Self::deposit_event(Event::Cleared { hash: *hash }); + }, + // Preimage was noted with owner - move to unrequested so they can get refund. + (Some(len), Some(deposit)) => { + StatusFor::::insert(hash, RequestStatus::Unrequested { deposit, len }); + }, + } }, - RequestStatus::Unrequested(_) => return Err(Error::::NotRequested.into()), + RequestStatus::Unrequested { .. } => return Err(Error::::NotRequested.into()), } Ok(()) } + + fn insert(hash: &T::Hash, preimage: Cow<[u8]>) -> Result<(), ()> { + BoundedSlice::>::try_from(preimage.as_ref()) + .map(|s| PreimageFor::::insert((hash, s.len() as u32), s)) + } + + fn remove(hash: &T::Hash, len: u32) { + PreimageFor::::remove((hash, len)) + } + + fn have(hash: &T::Hash) -> bool { + Self::len(hash).is_some() + } + + fn len(hash: &T::Hash) -> Option { + use RequestStatus::*; + match StatusFor::::get(hash) { + Some(Requested { len: Some(len), .. }) | Some(Unrequested { len, .. }) => Some(len), + _ => None, + } + } + + fn fetch(hash: &T::Hash, len: Option) -> FetchResult { + let len = len.or_else(|| Self::len(hash)).ok_or(DispatchError::Unavailable)?; + PreimageFor::::get((hash, len)) + .map(|p| p.into_inner()) + .map(Into::into) + .ok_or(DispatchError::Unavailable) + } } impl PreimageProvider for Pallet { fn have_preimage(hash: &T::Hash) -> bool { - PreimageFor::::contains_key(hash) + Self::have(hash) } fn preimage_requested(hash: &T::Hash) -> bool { - matches!(StatusFor::::get(hash), Some(RequestStatus::Requested(..))) + matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) } fn get_preimage(hash: &T::Hash) -> Option> { - PreimageFor::::get(hash).map(|preimage| preimage.to_vec()) + Self::fetch(hash, None).ok().map(Cow::into_owned) } fn request_preimage(hash: &T::Hash) { @@ -330,15 +394,60 @@ impl PreimageProvider for Pallet { } impl PreimageRecipient for Pallet { - type MaxSize = T::MaxSize; + type MaxSize = ConstU32; // 2**22 fn note_preimage(bytes: BoundedVec) { // We don't really care if this fails, since that's only the case if someone else has // already noted it. - let _ = Self::note_bytes(bytes, None); + let _ = Self::note_bytes(bytes.into_inner().into(), None); } fn unnote_preimage(hash: &T::Hash) { + // Should never fail if authorization check is skipped. + let res = Self::do_unrequest_preimage(hash); + debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); + } +} + +impl> QueryPreimage for Pallet { + fn len(hash: &T::Hash) -> Option { + Pallet::::len(hash) + } + + fn fetch(hash: &T::Hash, len: Option) -> FetchResult { + Pallet::::fetch(hash, len) + } + + fn is_requested(hash: &T::Hash) -> bool { + matches!(StatusFor::::get(hash), Some(RequestStatus::Requested { .. })) + } + + fn request(hash: &T::Hash) { + Self::do_request_preimage(hash) + } + + fn unrequest(hash: &T::Hash) { + let res = Self::do_unrequest_preimage(hash); + debug_assert!(res.is_ok(), "do_unrequest_preimage failed - counter underflow?"); + } +} + +impl> StorePreimage for Pallet { + const MAX_LENGTH: usize = MAX_SIZE as usize; + + fn note(bytes: Cow<[u8]>) -> Result { + // We don't really care if this fails, since that's only the case if someone else has + // already noted it. + let maybe_hash = Self::note_bytes(bytes, None).map(|(_, h)| h); + // Map to the correct trait error. + if maybe_hash == Err(DispatchError::from(Error::::TooBig)) { + Err(DispatchError::Exhausted) + } else { + maybe_hash + } + } + + fn unnote(hash: &T::Hash) { // Should never fail if authorization check is skipped. let res = Self::do_unnote_preimage(hash, None); debug_assert!(res.is_ok(), "unnote_preimage failed - request outstanding?"); diff --git a/frame/preimage/src/migration.rs b/frame/preimage/src/migration.rs new file mode 100644 index 0000000000000..a5d15c23c758a --- /dev/null +++ b/frame/preimage/src/migration.rs @@ -0,0 +1,263 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Storage migrations for the preimage pallet. + +use super::*; +use frame_support::{ + storage_alias, + traits::{ConstU32, OnRuntimeUpgrade}, +}; +use sp_std::collections::btree_map::BTreeMap; + +/// The log target. +const TARGET: &'static str = "runtime::preimage::migration::v1"; + +/// The original data layout of the preimage pallet without a specific version number. +mod v0 { + use super::*; + + #[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, RuntimeDebug)] + pub enum RequestStatus { + Unrequested(Option<(AccountId, Balance)>), + Requested(u32), + } + + #[storage_alias] + pub type PreimageFor = StorageMap< + Pallet, + Identity, + ::Hash, + BoundedVec>, + >; + + #[storage_alias] + pub type StatusFor = StorageMap< + Pallet, + Identity, + ::Hash, + RequestStatus<::AccountId, BalanceOf>, + >; + + /// Returns the number of images or `None` if the storage is corrupted. + #[cfg(feature = "try-runtime")] + pub fn image_count() -> Option { + let images = v0::PreimageFor::::iter_values().count() as u32; + let status = v0::StatusFor::::iter_values().count() as u32; + + if images == status { + Some(images) + } else { + None + } + } +} + +pub mod v1 { + use super::*; + + /// Migration for moving preimage from V0 to V1 storage. + /// + /// Note: This needs to be run with the same hashing algorithm as before + /// since it is not re-hashing the preimages. + pub struct Migration(sp_std::marker::PhantomData); + + impl OnRuntimeUpgrade for Migration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 0, "can only upgrade from version 0"); + + let images = v0::image_count::().expect("v0 storage corrupted"); + log::info!(target: TARGET, "Migrating {} images", &images); + Ok((images as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let mut weight = T::DbWeight::get().reads(1); + if StorageVersion::get::>() != 0 { + log::warn!( + target: TARGET, + "skipping MovePreimagesIntoBuckets: executed on wrong storage version.\ + Expected version 0" + ); + return weight + } + + let status = v0::StatusFor::::drain().collect::>(); + weight.saturating_accrue(T::DbWeight::get().reads(status.len() as u64)); + + let preimages = v0::PreimageFor::::drain().collect::>(); + weight.saturating_accrue(T::DbWeight::get().reads(preimages.len() as u64)); + + for (hash, status) in status.into_iter() { + let preimage = if let Some(preimage) = preimages.get(&hash) { + preimage + } else { + log::error!(target: TARGET, "preimage not found for hash {:?}", &hash); + continue + }; + let len = preimage.len() as u32; + if len > MAX_SIZE { + log::error!( + target: TARGET, + "preimage too large for hash {:?}, len: {}", + &hash, + len + ); + continue + } + + let status = match status { + v0::RequestStatus::Unrequested(deposit) => match deposit { + Some(deposit) => RequestStatus::Unrequested { deposit, len }, + // `None` depositor becomes system-requested. + None => + RequestStatus::Requested { deposit: None, count: 1, len: Some(len) }, + }, + v0::RequestStatus::Requested(count) if count == 0 => { + log::error!(target: TARGET, "preimage has counter of zero: {:?}", hash); + continue + }, + v0::RequestStatus::Requested(count) => + RequestStatus::Requested { deposit: None, count, len: Some(len) }, + }; + log::trace!(target: TARGET, "Moving preimage {:?} with len {}", hash, len); + + crate::StatusFor::::insert(hash, status); + crate::PreimageFor::::insert(&(hash, len), preimage); + + weight.saturating_accrue(T::DbWeight::get().writes(2)); + } + StorageVersion::new(1).put::>(); + + weight.saturating_add(T::DbWeight::get().writes(1)) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let old_images: u32 = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_images = image_count::().expect("V1 storage corrupted"); + + if new_images != old_images { + log::error!( + target: TARGET, + "migrated {} images, expected {}", + new_images, + old_images + ); + } + assert_eq!(StorageVersion::get::>(), 1, "must upgrade"); + Ok(()) + } + } + + /// Returns the number of images or `None` if the storage is corrupted. + #[cfg(feature = "try-runtime")] + pub fn image_count() -> Option { + // Use iter_values() to ensure that the values are decodable. + let images = crate::PreimageFor::::iter_values().count() as u32; + let status = crate::StatusFor::::iter_values().count() as u32; + + if images == status { + Some(images) + } else { + None + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::mock::{Test as T, *}; + + use frame_support::bounded_vec; + + #[test] + fn migration_works() { + new_test_ext().execute_with(|| { + assert_eq!(StorageVersion::get::>(), 0); + // Insert some preimages into the v0 storage: + + // Case 1: Unrequested without deposit + let (p, h) = preimage::(128); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(None)); + // Case 2: Unrequested with deposit + let (p, h) = preimage::(1024); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Unrequested(Some((1, 1)))); + // Case 3: Requested by 0 (invalid) + let (p, h) = preimage::(8192); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Requested(0)); + // Case 4: Requested by 10 + let (p, h) = preimage::(65536); + v0::PreimageFor::::insert(h, p); + v0::StatusFor::::insert(h, v0::RequestStatus::Requested(10)); + + assert_eq!(v0::image_count::(), Some(4)); + assert_eq!(v1::image_count::(), None, "V1 storage should be corrupted"); + + let state = v1::Migration::::pre_upgrade().unwrap(); + let _w = v1::Migration::::on_runtime_upgrade(); + v1::Migration::::post_upgrade(state).unwrap(); + + // V0 and V1 share the same prefix, so `iter_values` still counts the same. + assert_eq!(v0::image_count::(), Some(3)); + assert_eq!(v1::image_count::(), Some(3)); // One gets skipped therefore 3. + assert_eq!(StorageVersion::get::>(), 1); + + // Case 1: Unrequested without deposit becomes system-requested + let (p, h) = preimage::(128); + assert_eq!(crate::PreimageFor::::get(&(h, 128)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Requested { deposit: None, count: 1, len: Some(128) }) + ); + // Case 2: Unrequested with deposit becomes unrequested + let (p, h) = preimage::(1024); + assert_eq!(crate::PreimageFor::::get(&(h, 1024)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Unrequested { deposit: (1, 1), len: 1024 }) + ); + // Case 3: Requested by 0 should be skipped + let (_, h) = preimage::(8192); + assert_eq!(crate::PreimageFor::::get(&(h, 8192)), None); + assert_eq!(crate::StatusFor::::get(h), None); + // Case 4: Requested by 10 becomes requested by 10 + let (p, h) = preimage::(65536); + assert_eq!(crate::PreimageFor::::get(&(h, 65536)), Some(p)); + assert_eq!( + crate::StatusFor::::get(h), + Some(RequestStatus::Requested { deposit: None, count: 10, len: Some(65536) }) + ); + }); + } + + /// Returns a preimage with a given size and its hash. + fn preimage( + len: usize, + ) -> (BoundedVec>, ::Hash) { + let p = bounded_vec![1; len]; + let h = ::Hashing::hash_of(&p); + (p, h) + } +} diff --git a/frame/preimage/src/mock.rs b/frame/preimage/src/mock.rs index e12598a35b4bb..ce74ea65bd8aa 100644 --- a/frame/preimage/src/mock.rs +++ b/frame/preimage/src/mock.rs @@ -105,7 +105,6 @@ impl Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureSignedBy; - type MaxSize = ConstU32<1024>; type BaseDeposit = ConstU64<2>; type ByteDeposit = ConstU64<1>; } diff --git a/frame/preimage/src/tests.rs b/frame/preimage/src/tests.rs index e6b64ae16dd8c..f480b9c36b670 100644 --- a/frame/preimage/src/tests.rs +++ b/frame/preimage/src/tests.rs @@ -17,11 +17,35 @@ //! # Scheduler tests. +#![cfg(test)] + use super::*; use crate::mock::*; -use frame_support::{assert_noop, assert_ok}; +use frame_support::{ + assert_err, assert_noop, assert_ok, assert_storage_noop, bounded_vec, + traits::{Bounded, BoundedInline, Hash as PreimageHash}, + StorageNoopGuard, +}; use pallet_balances::Error as BalancesError; +use sp_core::{blake2_256, H256}; + +/// Returns one `Inline`, `Lookup` and `Legacy` item each with different data and hash. +pub fn make_bounded_values() -> (Bounded>, Bounded>, Bounded>) { + let data: BoundedInline = bounded_vec![1]; + let inline = Bounded::>::Inline(data); + + let data = vec![1, 2]; + let hash: H256 = blake2_256(&data[..]).into(); + let len = data.len() as u32; + let lookup = Bounded::>::unrequested(hash, len); + + let data = vec![1, 2, 3]; + let hash: H256 = blake2_256(&data[..]).into(); + let legacy = Bounded::>::Legacy { hash, dummy: Default::default() }; + + (inline, lookup, legacy) +} #[test] fn user_note_preimage_works() { @@ -56,10 +80,7 @@ fn manager_note_preimage_works() { assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); - assert_noop!( - Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1]), - Error::::AlreadyNoted - ); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); }); } @@ -130,14 +151,16 @@ fn requested_then_noted_preimage_cannot_be_unnoted() { new_test_ext().execute_with(|| { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_noop!( - Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1])), - Error::::Requested - ); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(1), hashed([1]))); + // it's still here. let h = hashed([1]); assert!(Preimage::have_preimage(&h)); assert_eq!(Preimage::get_preimage(&h), Some(vec![1])); + + // now it's gone + assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert!(!Preimage::have_preimage(&hashed([1]))); }); } @@ -145,15 +168,16 @@ fn requested_then_noted_preimage_cannot_be_unnoted() { fn request_note_order_makes_no_difference() { let one_way = new_test_ext().execute_with(|| { assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), ) }); new_test_ext().execute_with(|| { - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![1])); + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -189,6 +213,7 @@ fn request_user_note_order_makes_no_difference() { new_test_ext().execute_with(|| { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); let other_way = ( StatusFor::::iter().collect::>(), PreimageFor::::iter().collect::>(), @@ -226,8 +251,240 @@ fn user_noted_then_requested_preimage_is_refunded_once_only() { assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(2), vec![1])); assert_ok!(Preimage::request_preimage(RuntimeOrigin::signed(1), hashed([1]))); assert_ok!(Preimage::unrequest_preimage(RuntimeOrigin::signed(1), hashed([1]))); + assert_ok!(Preimage::unnote_preimage(RuntimeOrigin::signed(2), hashed([1]))); // Still have reserve from `vec[1; 3]`. assert_eq!(Balances::reserved_balance(2), 5); assert_eq!(Balances::free_balance(2), 95); }); } + +#[test] +fn noted_preimage_use_correct_map() { + new_test_ext().execute_with(|| { + // Add one preimage per bucket... + for i in 0..7 { + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; 128 << (i * 2)])); + } + assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(1), vec![0; MAX_SIZE as usize])); + assert_eq!(PreimageFor::::iter().count(), 8); + + // All are present + assert_eq!(StatusFor::::iter().count(), 8); + + // Now start removing them again... + for i in 0..7 { + assert_ok!(Preimage::unnote_preimage( + RuntimeOrigin::signed(1), + hashed(vec![0; 128 << (i * 2)]) + )); + } + assert_eq!(PreimageFor::::iter().count(), 1); + assert_ok!(Preimage::unnote_preimage( + RuntimeOrigin::signed(1), + hashed(vec![0; MAX_SIZE as usize]) + )); + assert_eq!(PreimageFor::::iter().count(), 0); + + // All are gone + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `StorePreimage` and `QueryPreimage` traits work together. +#[test] +fn query_and_store_preimage_workflow() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 512]; + let encoded = data.encode(); + + // Bound an unbound value. + let bound = Preimage::bound(data.clone()).unwrap(); + let (len, hash) = (bound.len().unwrap(), bound.hash()); + + assert_eq!(hash, blake2_256(&encoded).into()); + assert_eq!(bound.len(), Some(len)); + assert!(bound.lookup_needed(), "Should not be Inlined"); + assert_eq!(bound.lookup_len(), Some(len)); + + // The value is requested and available. + assert!(Preimage::is_requested(&hash)); + assert!(::have(&bound)); + assert_eq!(Preimage::len(&hash), Some(len)); + + // It can be fetched with length. + assert_eq!(Preimage::fetch(&hash, Some(len)).unwrap(), encoded); + // ... and without length. + assert_eq!(Preimage::fetch(&hash, None).unwrap(), encoded); + // ... but not with wrong length. + assert_err!(Preimage::fetch(&hash, Some(0)), DispatchError::Unavailable); + + // It can be peeked and decoded correctly. + assert_eq!(Preimage::peek::>(&bound).unwrap(), (data.clone(), Some(len))); + // Request it two more times. + assert_eq!(Preimage::pick::>(hash, len), bound); + Preimage::request(&hash); + // It is requested thrice. + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 3, .. } + )); + + // It can be realized and decoded correctly. + assert_eq!(Preimage::realize::>(&bound).unwrap(), (data.clone(), Some(len))); + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 2, .. } + )); + // Dropping should unrequest. + Preimage::drop(&bound); + assert!(matches!( + StatusFor::::get(&hash).unwrap(), + RequestStatus::Requested { count: 1, .. } + )); + + // Is still available. + assert!(::have(&bound)); + // Manually unnote it. + Preimage::unnote(&hash); + // Is not available anymore. + assert!(!::have(&bound)); + assert_err!(Preimage::fetch(&hash, Some(len)), DispatchError::Unavailable); + // And not requested since the traits assume permissioned origin. + assert!(!Preimage::is_requested(&hash)); + + // No storage changes remain. Checked by `StorageNoopGuard`. + }); +} + +/// The request function behaves as expected. +#[test] +fn query_preimage_request_works() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 10]; + let hash: PreimageHash = blake2_256(&data[..]).into(); + + // Request the preimage. + ::request(&hash); + + // The preimage is requested with unknown length and cannot be fetched. + assert!(::is_requested(&hash)); + assert!(::len(&hash).is_none()); + assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); + + // Request again. + ::request(&hash); + // The preimage is still requested. + assert!(::is_requested(&hash)); + assert!(::len(&hash).is_none()); + assert_noop!(::fetch(&hash, None), DispatchError::Unavailable); + // But there is only one entry in the map. + assert_eq!(StatusFor::::iter().count(), 1); + + // Un-request the preimage. + ::unrequest(&hash); + // It is still requested. + assert!(::is_requested(&hash)); + // Un-request twice. + ::unrequest(&hash); + // It is not requested anymore. + assert!(!::is_requested(&hash)); + // And there is no entry in the map. + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `QueryPreimage` functions can be used together with `Bounded` values. +#[test] +fn query_preimage_hold_and_drop_work() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let (inline, lookup, legacy) = make_bounded_values(); + + // `hold` does nothing for `Inline` values. + assert_storage_noop!(::hold(&inline)); + // `hold` requests `Lookup` values. + ::hold(&lookup); + assert!(::is_requested(&lookup.hash())); + // `hold` requests `Legacy` values. + ::hold(&legacy); + assert!(::is_requested(&legacy.hash())); + + // There are two values requested in total. + assert_eq!(StatusFor::::iter().count(), 2); + + // Cleanup by dropping both. + ::drop(&lookup); + assert!(!::is_requested(&lookup.hash())); + ::drop(&legacy); + assert!(!::is_requested(&legacy.hash())); + + // There are no values requested anymore. + assert_eq!(StatusFor::::iter().count(), 0); + }); +} + +/// The `StorePreimage` trait works as expected. +#[test] +fn store_preimage_basic_works() { + new_test_ext().execute_with(|| { + let _guard = StorageNoopGuard::default(); + let data: Vec = vec![1; 512]; // Too large to inline. + let encoded = Cow::from(data.encode()); + + // Bound the data. + let bound = ::bound(data.clone()).unwrap(); + // The preimage can be peeked. + assert_ok!(::peek(&bound)); + // Un-note the preimage. + ::unnote(&bound.hash()); + // The preimage cannot be peeked anymore. + assert_err!(::peek(&bound), DispatchError::Unavailable); + // Noting the wrong pre-image does not make it peek-able. + assert_ok!(::note(Cow::Borrowed(&data))); + assert_err!(::peek(&bound), DispatchError::Unavailable); + + // Manually note the preimage makes it peek-able again. + assert_ok!(::note(encoded.clone())); + // Noting again works. + assert_ok!(::note(encoded)); + assert_ok!(::peek(&bound)); + + // Cleanup. + ::unnote(&bound.hash()); + let data_hash = blake2_256(&data); + ::unnote(&data_hash.into()); + + // No storage changes remain. Checked by `StorageNoopGuard`. + }); +} + +#[test] +fn store_preimage_note_too_large_errors() { + new_test_ext().execute_with(|| { + // Works with `MAX_LENGTH`. + let len = ::MAX_LENGTH; + let data = vec![0u8; len]; + assert_ok!(::note(data.into())); + + // Errors with `MAX_LENGTH+1`. + let data = vec![0u8; len + 1]; + assert_err!(::note(data.into()), DispatchError::Exhausted); + }); +} + +#[test] +fn store_preimage_bound_too_large_errors() { + new_test_ext().execute_with(|| { + // Using `MAX_LENGTH` number of bytes in a vector does not work + // since SCALE prepends the length. + let len = ::MAX_LENGTH; + let data: Vec = vec![0; len]; + assert_err!(::bound(data.clone()), DispatchError::Exhausted); + + // Works with `MAX_LENGTH-4`. + let data: Vec = vec![0; len - 4]; + assert_ok!(::bound(data.clone())); + }); +} diff --git a/frame/preimage/src/weights.rs b/frame/preimage/src/weights.rs index ad9e3e569e733..186c41b798c6b 100644 --- a/frame/preimage/src/weights.rs +++ b/frame/preimage/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_preimage //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_preimage // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_preimage +// --chain=dev // --output=./frame/preimage/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -61,88 +63,90 @@ pub trait WeightInfo { /// Weights for pallet_preimage using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(32_591_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(23_350_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(21_436_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(T::DbWeight::get().reads(1 as u64)) + .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as u64) + Weight::from_ref_time(44_567_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as u64) + Weight::from_ref_time(30_065_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as u64) + Weight::from_ref_time(28_470_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as u64) + Weight::from_ref_time(14_601_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as u64) + Weight::from_ref_time(20_121_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as u64) + Weight::from_ref_time(9_440_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as u64) + Weight::from_ref_time(29_013_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as u64) + Weight::from_ref_time(9_223_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(2 as u64)) + .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as u64) + Weight::from_ref_time(9_252_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } @@ -150,88 +154,90 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(32_591_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_requested_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(23_350_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_681 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:0) + // Storage: Preimage StatusFor (r:1 w:1) + // Storage: Preimage PreimageFor (r:0 w:1) + /// The range of component `s` is `[0, 4194304]`. fn note_no_deposit_preimage(s: u32, ) -> Weight { - Weight::from_ref_time(0 as u64) + Weight::from_ref_time(21_436_000 as u64) // Standard Error: 0 - .saturating_add(Weight::from_ref_time(2_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + .saturating_add(Weight::from_ref_time(1_680 as u64).saturating_mul(s as u64)) + .saturating_add(RocksDbWeight::get().reads(1 as u64)) + .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_preimage() -> Weight { - Weight::from_ref_time(44_380_000 as u64) + Weight::from_ref_time(44_567_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unnote_no_deposit_preimage() -> Weight { - Weight::from_ref_time(30_280_000 as u64) + Weight::from_ref_time(30_065_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_preimage() -> Weight { - Weight::from_ref_time(42_809_000 as u64) + Weight::from_ref_time(28_470_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_no_deposit_preimage() -> Weight { - Weight::from_ref_time(28_964_000 as u64) + Weight::from_ref_time(14_601_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_unnoted_preimage() -> Weight { - Weight::from_ref_time(17_555_000 as u64) + Weight::from_ref_time(20_121_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn request_requested_preimage() -> Weight { - Weight::from_ref_time(7_745_000 as u64) + Weight::from_ref_time(9_440_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_preimage() -> Weight { - Weight::from_ref_time(29_758_000 as u64) + Weight::from_ref_time(29_013_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Preimage PreimageFor (r:0 w:1) fn unrequest_unnoted_preimage() -> Weight { - Weight::from_ref_time(18_360_000 as u64) + Weight::from_ref_time(9_223_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) + .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Preimage StatusFor (r:1 w:1) fn unrequest_multi_referenced_preimage() -> Weight { - Weight::from_ref_time(7_439_000 as u64) + Weight::from_ref_time(9_252_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 0b6b7b0f51d9a..18d3d48dc024c 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -452,7 +452,7 @@ pub mod pallet { ensure!(!friends.is_empty(), Error::::NotEnoughFriends); ensure!(threshold as usize <= friends.len(), Error::::NotEnoughFriends); let bounded_friends: FriendsOf = - friends.try_into().map_err(|()| Error::::MaxFriends)?; + friends.try_into().map_err(|_| Error::::MaxFriends)?; ensure!(Self::is_sorted_and_unique(&bounded_friends), Error::::NotSorted); // Total deposit is base fee + number of friends * factor fee let friend_deposit = T::FriendDepositFactor::get() @@ -554,7 +554,7 @@ pub mod pallet { Err(pos) => active_recovery .friends .try_insert(pos, who.clone()) - .map_err(|()| Error::::MaxFriends)?, + .map_err(|_| Error::::MaxFriends)?, } // Update storage with the latest details >::insert(&lost, &rescuer, active_recovery); diff --git a/frame/referenda/src/benchmarking.rs b/frame/referenda/src/benchmarking.rs index 45ec894e2c2c0..bc6fb31bf1127 100644 --- a/frame/referenda/src/benchmarking.rs +++ b/frame/referenda/src/benchmarking.rs @@ -24,10 +24,10 @@ use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist_account} use frame_support::{ assert_ok, dispatch::UnfilteredDispatchable, - traits::{Currency, EnsureOrigin}, + traits::{Bounded, Currency, EnsureOrigin}, }; use frame_system::RawOrigin; -use sp_runtime::traits::{Bounded, Hash}; +use sp_runtime::traits::Bounded as ArithBounded; const SEED: u32 = 0; @@ -42,6 +42,12 @@ fn funded_account, I: 'static>(name: &'static str, index: u32) -> T caller } +fn dummy_call, I: 'static>() -> Bounded<>::RuntimeCall> { + let inner = frame_system::Call::remark { remark: vec![] }; + let call = >::RuntimeCall::from(inner); + T::Preimages::bound(call).unwrap() +} + fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, ReferendumIndex) { let origin: T::RuntimeOrigin = T::SubmitOrigin::successful_origin(); if let Ok(caller) = frame_system::ensure_signed(origin.clone()) { @@ -50,9 +56,9 @@ fn create_referendum, I: 'static>() -> (T::RuntimeOrigin, Referendu } let proposal_origin = Box::new(RawOrigin::Root.into()); - let proposal_hash = T::Hashing::hash_of(&0); + let proposal = dummy_call::(); let enactment_moment = DispatchTime::After(0u32.into()); - let call = Call::::submit { proposal_origin, proposal_hash, enactment_moment }; + let call = crate::Call::::submit { proposal_origin, proposal, enactment_moment }; assert_ok!(call.dispatch_bypass_filter(origin.clone())); let index = ReferendumCount::::get() - 1; (origin, index) @@ -196,7 +202,7 @@ benchmarks_instance_pallet! { }: _( origin, Box::new(RawOrigin::Root.into()), - T::Hashing::hash_of(&0), + dummy_call::(), DispatchTime::After(0u32.into()) ) verify { let index = ReferendumCount::::get().checked_sub(1).unwrap(); diff --git a/frame/referenda/src/lib.rs b/frame/referenda/src/lib.rs index 739f0dbc30ed4..1bdc19d49c414 100644 --- a/frame/referenda/src/lib.rs +++ b/frame/referenda/src/lib.rs @@ -69,11 +69,11 @@ use frame_support::{ ensure, traits::{ schedule::{ - v2::{Anon as ScheduleAnon, Named as ScheduleNamed}, - DispatchTime, MaybeHashed, + v3::{Anon as ScheduleAnon, Named as ScheduleNamed}, + DispatchTime, }, - Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, - ReservableCurrency, VoteTally, + Currency, LockIdentifier, OnUnbalanced, OriginTrait, PollStatus, Polling, QueryPreimage, + ReservableCurrency, StorePreimage, VoteTally, }, BoundedVec, }; @@ -92,10 +92,10 @@ use self::branch::{BeginDecidingBranch, OneFewerDecidingBranch, ServiceBranch}; pub use self::{ pallet::*, types::{ - BalanceOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, InsertSorted, - NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, ReferendumInfoOf, - ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, TrackIdOf, TrackInfo, - TrackInfoOf, TracksInfo, VotesOf, + BalanceOf, BoundedCallOf, CallOf, Curve, DecidingStatus, DecidingStatusOf, Deposit, + InsertSorted, NegativeImbalanceOf, PalletsOriginOf, ReferendumIndex, ReferendumInfo, + ReferendumInfoOf, ReferendumStatus, ReferendumStatusOf, ScheduleAddressOf, TallyOf, + TrackIdOf, TrackInfo, TrackInfoOf, TracksInfo, VotesOf, }, weights::WeightInfo, }; @@ -149,23 +149,16 @@ pub mod pallet { // System level stuff. type RuntimeCall: Parameter + Dispatchable - + From>; + + From> + + IsType<::RuntimeCall> + + From>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; /// The Scheduler. - type Scheduler: ScheduleAnon< - Self::BlockNumber, - CallOf, - PalletsOriginOf, - Hash = Self::Hash, - > + ScheduleNamed< - Self::BlockNumber, - CallOf, - PalletsOriginOf, - Hash = Self::Hash, - >; + type Scheduler: ScheduleAnon, PalletsOriginOf> + + ScheduleNamed, PalletsOriginOf>; /// Currency type for this pallet. type Currency: ReservableCurrency; // Origins and unbalances. @@ -221,6 +214,9 @@ pub mod pallet { Self::BlockNumber, RuntimeOrigin = ::PalletsOrigin, >; + + /// The preimage provider. + type Preimages: QueryPreimage + StorePreimage; } /// The next free referendum index, aka the number of referenda started so far. @@ -259,8 +255,8 @@ pub mod pallet { index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The hash of the proposal up for referendum. - proposal_hash: T::Hash, + /// The proposal for the referendum. + proposal: BoundedCallOf, }, /// The decision deposit has been placed. DecisionDepositPlaced { @@ -293,8 +289,8 @@ pub mod pallet { index: ReferendumIndex, /// The track (and by extension proposal dispatch origin) of this referendum. track: TrackIdOf, - /// The hash of the proposal up for referendum. - proposal_hash: T::Hash, + /// The proposal for the referendum. + proposal: BoundedCallOf, /// The current tally of votes in this referendum. tally: T::Tally, }, @@ -381,7 +377,7 @@ pub mod pallet { /// - `origin`: must be `SubmitOrigin` and the account must have `SubmissionDeposit` funds /// available. /// - `proposal_origin`: The origin from which the proposal should be executed. - /// - `proposal_hash`: The hash of the proposal preimage. + /// - `proposal`: The proposal. /// - `enactment_moment`: The moment that the proposal should be enacted. /// /// Emits `Submitted`. @@ -389,7 +385,7 @@ pub mod pallet { pub fn submit( origin: OriginFor, proposal_origin: Box>, - proposal_hash: T::Hash, + proposal: BoundedCallOf, enactment_moment: DispatchTime, ) -> DispatchResult { let who = T::SubmitOrigin::ensure_origin(origin)?; @@ -403,11 +399,12 @@ pub mod pallet { r }); let now = frame_system::Pallet::::block_number(); - let nudge_call = Call::nudge_referendum { index }; + let nudge_call = + T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index }))?; let status = ReferendumStatus { track, origin: *proposal_origin, - proposal_hash, + proposal: proposal.clone(), enactment: enactment_moment, submitted: now, submission_deposit, @@ -419,7 +416,7 @@ pub mod pallet { }; ReferendumInfoFor::::insert(index, ReferendumInfo::Ongoing(status)); - Self::deposit_event(Event::::Submitted { index, track, proposal_hash }); + Self::deposit_event(Event::::Submitted { index, track, proposal }); Ok(()) } @@ -651,7 +648,8 @@ impl, I: 'static> Polling for Pallet { let mut status = ReferendumStatusOf:: { track: class, origin: frame_support::dispatch::RawOrigin::Root.into(), - proposal_hash: ::hash_of(&index), + proposal: T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) + .map_err(|_| ())?, enactment: DispatchTime::After(Zero::zero()), submitted: now, submission_deposit: Deposit { who: dummy_account_id, amount: Zero::zero() }, @@ -709,18 +707,18 @@ impl, I: 'static> Pallet { track: &TrackInfoOf, desired: DispatchTime, origin: PalletsOriginOf, - call_hash: T::Hash, + call: BoundedCallOf, ) { let now = frame_system::Pallet::::block_number(); let earliest_allowed = now.saturating_add(track.min_enactment_period); let desired = desired.evaluate(now); let ok = T::Scheduler::schedule_named( - (ASSEMBLY_ID, "enactment", index).encode(), + (ASSEMBLY_ID, "enactment", index).using_encoded(sp_io::hashing::blake2_256), DispatchTime::At(desired.max(earliest_allowed)), None, 63, origin, - MaybeHashed::Hash(call_hash), + call, ) .is_ok(); debug_assert!(ok, "LOGIC ERROR: bake_referendum/schedule_named failed"); @@ -728,7 +726,7 @@ impl, I: 'static> Pallet { /// Set an alarm to dispatch `call` at block number `when`. fn set_alarm( - call: impl Into>, + call: BoundedCallOf, when: T::BlockNumber, ) -> Option<(T::BlockNumber, ScheduleAddressOf)> { let alarm_interval = T::AlarmInterval::get().max(One::one()); @@ -739,7 +737,7 @@ impl, I: 'static> Pallet { None, 128u8, frame_system::RawOrigin::Root.into(), - MaybeHashed::Value(call.into()), + call, ) .ok() .map(|x| (when, x)); @@ -776,7 +774,7 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::::DecisionStarted { index, tally: status.tally.clone(), - proposal_hash: status.proposal_hash, + proposal: status.proposal.clone(), track: status.track, }); let confirming = if is_passing { @@ -843,12 +841,21 @@ impl, I: 'static> Pallet { let alarm_interval = T::AlarmInterval::get().max(One::one()); let when = (next_block + alarm_interval - One::one()) / alarm_interval * alarm_interval; + let call = match T::Preimages::bound(CallOf::::from(Call::one_fewer_deciding { + track, + })) { + Ok(c) => c, + Err(_) => { + debug_assert!(false, "Unable to create a bounded call from `one_fewer_deciding`??",); + return + }, + }; let maybe_result = T::Scheduler::schedule( DispatchTime::At(when), None, 128u8, frame_system::RawOrigin::Root.into(), - MaybeHashed::Value(Call::one_fewer_deciding { track }.into()), + call, ); debug_assert!( maybe_result.is_ok(), @@ -871,7 +878,18 @@ impl, I: 'static> Pallet { if status.alarm.as_ref().map_or(true, |&(when, _)| when != alarm) { // Either no alarm or one that was different Self::ensure_no_alarm(status); - status.alarm = Self::set_alarm(Call::nudge_referendum { index }, alarm); + let call = + match T::Preimages::bound(CallOf::::from(Call::nudge_referendum { index })) { + Ok(c) => c, + Err(_) => { + debug_assert!( + false, + "Unable to create a bounded call from `nudge_referendum`??", + ); + return false + }, + }; + status.alarm = Self::set_alarm(call, alarm); true } else { false @@ -987,14 +1005,8 @@ impl, I: 'static> Pallet { // Passed! Self::ensure_no_alarm(&mut status); Self::note_one_fewer_deciding(status.track); - let (desired, call_hash) = (status.enactment, status.proposal_hash); - Self::schedule_enactment( - index, - track, - desired, - status.origin, - call_hash, - ); + let (desired, call) = (status.enactment, status.proposal); + Self::schedule_enactment(index, track, desired, status.origin, call); Self::deposit_event(Event::::Confirmed { index, tally: status.tally, diff --git a/frame/referenda/src/mock.rs b/frame/referenda/src/mock.rs index 920e529bf05ca..c98fbf9a676b1 100644 --- a/frame/referenda/src/mock.rs +++ b/frame/referenda/src/mock.rs @@ -24,7 +24,7 @@ use frame_support::{ assert_ok, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, - PreimageRecipient, SortedMembers, + SortedMembers, }, weights::Weight, }; @@ -32,7 +32,7 @@ use frame_system::{EnsureRoot, EnsureSignedBy}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{BlakeTwo256, Hash, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup}, DispatchResult, Perbill, }; @@ -97,7 +97,6 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<4096>; type BaseDeposit = (); type ByteDeposit = (); } @@ -111,8 +110,7 @@ impl pallet_scheduler::Config for Test { type MaxScheduledPerBlock = ConstU32<100>; type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = ConstU64<10>; + type Preimages = Preimage; } impl pallet_balances::Config for Test { type MaxReserves = (); @@ -229,6 +227,7 @@ impl Config for Test { type UndecidingTimeout = ConstU64<20>; type AlarmInterval = AlarmInterval; type Tracks = TestTracksInfo; + type Preimages = Preimage; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -306,14 +305,13 @@ pub fn set_balance_proposal(value: u64) -> Vec { .encode() } -pub fn set_balance_proposal_hash(value: u64) -> H256 { +pub fn set_balance_proposal_bounded(value: u64) -> BoundedCallOf { let c = RuntimeCall::Balances(pallet_balances::Call::set_balance { who: 42, new_free: value, new_reserved: 0, }); - >::note_preimage(c.encode().try_into().unwrap()); - BlakeTwo256::hash_of(&c) + ::bound(c).unwrap() } #[allow(dead_code)] @@ -321,7 +319,7 @@ pub fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { Referenda::submit( RuntimeOrigin::signed(who), Box::new(frame_system::RawOrigin::Root.into()), - set_balance_proposal_hash(value), + set_balance_proposal_bounded(value), DispatchTime::After(delay), ) } @@ -449,7 +447,7 @@ impl RefState { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(frame_support::dispatch::RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0)); diff --git a/frame/referenda/src/tests.rs b/frame/referenda/src/tests.rs index 778d00e516693..355ce3021b87f 100644 --- a/frame/referenda/src/tests.rs +++ b/frame/referenda/src/tests.rs @@ -44,7 +44,7 @@ fn basic_happy_path_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_eq!(Balances::reserved_balance(&1), 2); @@ -175,7 +175,7 @@ fn queueing_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(5), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(0), + set_balance_proposal_bounded(0), DispatchTime::After(0), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(5), 0)); @@ -187,7 +187,7 @@ fn queueing_works() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(i), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(i), + set_balance_proposal_bounded(i), DispatchTime::After(0), )); assert_ok!(Referenda::place_decision_deposit(RuntimeOrigin::signed(i), i as u32)); @@ -272,7 +272,7 @@ fn auto_timeout_should_happen_with_nothing_but_submit() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(20), )); run_to(20); @@ -292,13 +292,13 @@ fn tracks_are_distinguished() { assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), - set_balance_proposal_hash(1), + set_balance_proposal_bounded(1), DispatchTime::At(10), )); assert_ok!(Referenda::submit( RuntimeOrigin::signed(2), Box::new(RawOrigin::None.into()), - set_balance_proposal_hash(2), + set_balance_proposal_bounded(2), DispatchTime::At(20), )); @@ -315,7 +315,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 0, origin: OriginCaller::system(RawOrigin::Root), - proposal_hash: set_balance_proposal_hash(1), + proposal: set_balance_proposal_bounded(1), enactment: DispatchTime::At(10), submitted: 1, submission_deposit: Deposit { who: 1, amount: 2 }, @@ -331,7 +331,7 @@ fn tracks_are_distinguished() { ReferendumInfo::Ongoing(ReferendumStatus { track: 1, origin: OriginCaller::system(RawOrigin::None), - proposal_hash: set_balance_proposal_hash(2), + proposal: set_balance_proposal_bounded(2), enactment: DispatchTime::At(20), submitted: 1, submission_deposit: Deposit { who: 2, amount: 2 }, @@ -350,13 +350,13 @@ fn tracks_are_distinguished() { #[test] fn submit_errors_work() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); // No track for Signed origins. assert_noop!( Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Signed(2).into()), - h, + h.clone(), DispatchTime::At(10), ), Error::::NoTrack @@ -381,7 +381,7 @@ fn decision_deposit_errors_work() { let e = Error::::NotOngoing; assert_noop!(Referenda::place_decision_deposit(RuntimeOrigin::signed(2), 0), e); - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -403,7 +403,7 @@ fn refund_deposit_works() { let e = Error::::BadReferendum; assert_noop!(Referenda::refund_decision_deposit(RuntimeOrigin::signed(1), 0), e); - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -425,7 +425,7 @@ fn refund_deposit_works() { #[test] fn cancel_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -444,7 +444,7 @@ fn cancel_works() { #[test] fn cancel_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -462,7 +462,7 @@ fn cancel_errors_works() { #[test] fn kill_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), @@ -482,7 +482,7 @@ fn kill_works() { #[test] fn kill_errors_works() { new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash(1); + let h = set_balance_proposal_bounded(1); assert_ok!(Referenda::submit( RuntimeOrigin::signed(1), Box::new(RawOrigin::Root.into()), diff --git a/frame/referenda/src/types.rs b/frame/referenda/src/types.rs index a6311e5f925be..2ce93cb6adc3c 100644 --- a/frame/referenda/src/types.rs +++ b/frame/referenda/src/types.rs @@ -19,7 +19,10 @@ use super::*; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::{traits::schedule::Anon, Parameter}; +use frame_support::{ + traits::{schedule::v3::Anon, Bounded}, + Parameter, +}; use scale_info::TypeInfo; use sp_arithmetic::{Rounding::*, SignedRounding::*}; use sp_runtime::{FixedI64, PerThing, RuntimeDebug}; @@ -31,6 +34,7 @@ pub type NegativeImbalanceOf = <>::Currency as Currency< ::AccountId, >>::NegativeImbalance; pub type CallOf = >::RuntimeCall; +pub type BoundedCallOf = Bounded<>::RuntimeCall>; pub type VotesOf = >::Votes; pub type TallyOf = >::Tally; pub type PalletsOriginOf = @@ -39,7 +43,7 @@ pub type ReferendumInfoOf = ReferendumInfo< TrackIdOf, PalletsOriginOf, ::BlockNumber, - ::Hash, + BoundedCallOf, BalanceOf, TallyOf, ::AccountId, @@ -49,7 +53,7 @@ pub type ReferendumStatusOf = ReferendumStatus< TrackIdOf, PalletsOriginOf, ::BlockNumber, - ::Hash, + BoundedCallOf, BalanceOf, TallyOf, ::AccountId, @@ -160,7 +164,7 @@ pub struct ReferendumStatus< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -171,7 +175,7 @@ pub struct ReferendumStatus< /// The origin for this referendum. pub(crate) origin: RuntimeOrigin, /// The hash of the proposal up for referendum. - pub(crate) proposal_hash: Hash, + pub(crate) proposal: Call, /// The time the proposal should be scheduled for enactment. pub(crate) enactment: DispatchTime, /// The time of submission. Once `UndecidingTimeout` passes, it may be closed by anyone if it @@ -197,7 +201,7 @@ pub enum ReferendumInfo< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, @@ -209,7 +213,7 @@ pub enum ReferendumInfo< TrackId, RuntimeOrigin, Moment, - Hash, + Call, Balance, Tally, AccountId, @@ -232,12 +236,12 @@ impl< TrackId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, RuntimeOrigin: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Moment: Parameter + Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone + EncodeLike, - Hash: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, + Call: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Balance: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, Tally: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, AccountId: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, ScheduleAddress: Eq + PartialEq + Debug + Encode + Decode + TypeInfo + Clone, - > ReferendumInfo + > ReferendumInfo { /// Take the Decision Deposit from `self`, if there is one. Returns an `Err` if `self` is not /// in a valid state for the Decision Deposit to be refunded. diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 9397c66170425..aaa30fd88ffda 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -18,201 +18,219 @@ //! Scheduler pallet benchmarking. use super::*; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{account, benchmarks}; use frame_support::{ ensure, - traits::{OnInitialize, PreimageProvider, PreimageRecipient}, + traits::{schedule::Priority, BoundedInline}, }; -use sp_runtime::traits::Hash; +use frame_system::RawOrigin; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; -use frame_system::Pallet as System; +use frame_system::Call as SystemCall; + +const SEED: u32 = 0; const BLOCK_NUMBER: u32 = 2; type SystemOrigin = ::RuntimeOrigin; -/// Add `n` named items to the schedule. +/// Add `n` items to the schedule. /// /// For `resolved`: +/// - ` /// - `None`: aborted (hash without preimage) /// - `Some(true)`: hash resolves into call if possible, plain call otherwise /// - `Some(false)`: plain call -fn fill_schedule( - when: T::BlockNumber, - n: u32, +fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { + let t = DispatchTime::At(when); + let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); + for i in 0..n { + let call = make_call::(None); + let period = Some(((i + 100).into(), 100)); + let name = u32_to_name(i); + Scheduler::::do_schedule_named(name, t, period, 0, origin.clone(), call)?; + } + ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); + Ok(()) +} + +fn u32_to_name(i: u32) -> TaskName { + i.using_encoded(blake2_256) +} + +fn make_task( periodic: bool, named: bool, - resolved: Option, -) -> Result<(), &'static str> { - for i in 0..n { - // Named schedule is strictly heavier than anonymous - let (call, hash) = call_and_hash::(i); - let call_or_hash = match resolved { - Some(true) => { - T::PreimageProvider::note_preimage(call.encode().try_into().unwrap()); - if T::PreimageProvider::have_preimage(&hash) { - CallOrHashOf::::Hash(hash) - } else { - call.into() - } + signed: bool, + maybe_lookup_len: Option, + priority: Priority, +) -> ScheduledOf { + let call = make_call::(maybe_lookup_len); + let maybe_periodic = match periodic { + true => Some((100u32.into(), 100)), + false => None, + }; + let maybe_id = match named { + true => Some(u32_to_name(0)), + false => None, + }; + let origin = make_origin::(signed); + Scheduled { maybe_id, priority, call, maybe_periodic, origin, _phantom: PhantomData } +} + +fn bounded(len: u32) -> Option::RuntimeCall>> { + let call = + <::RuntimeCall>::from(SystemCall::remark { remark: vec![0; len as usize] }); + T::Preimages::bound(call).ok() +} + +fn make_call(maybe_lookup_len: Option) -> Bounded<::RuntimeCall> { + let bound = BoundedInline::bound() as u32; + let mut len = match maybe_lookup_len { + Some(len) => len.min(T::Preimages::MAX_LENGTH as u32 - 2).max(bound) - 3, + None => bound.saturating_sub(4), + }; + + loop { + let c = match bounded::(len) { + Some(x) => x, + None => { + len -= 1; + continue }, - Some(false) => call.into(), - None => CallOrHashOf::::Hash(hash), }; - let period = match periodic { - true => Some(((i + 100).into(), 100)), - false => None, - }; - let t = DispatchTime::At(when); - let origin = frame_system::RawOrigin::Root.into(); - if named { - Scheduler::::do_schedule_named(i.encode(), t, period, 0, origin, call_or_hash)?; + if c.lookup_needed() == maybe_lookup_len.is_some() { + break c + } + if maybe_lookup_len.is_some() { + len += 1; } else { - Scheduler::::do_schedule(t, period, 0, origin, call_or_hash)?; + if len > 0 { + len -= 1; + } else { + break c + } } } - ensure!(Agenda::::get(when).len() == n as usize, "didn't fill schedule"); - Ok(()) } -fn call_and_hash(i: u32) -> (::RuntimeCall, T::Hash) { - // Essentially a no-op call. - let call: ::RuntimeCall = frame_system::Call::remark { remark: i.encode() }.into(); - let hash = T::Hashing::hash_of(&call); - (call, hash) +fn make_origin(signed: bool) -> ::PalletsOrigin { + match signed { + true => frame_system::RawOrigin::Signed(account("origin", 0, SEED)).into(), + false => frame_system::RawOrigin::Root.into(), + } +} + +fn dummy_counter() -> WeightCounter { + WeightCounter { used: Weight::zero(), limit: Weight::MAX } } benchmarks! { - on_initialize_periodic_named_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `service_agendas` when no work is done. + service_agendas_base { + let now = T::BlockNumber::from(BLOCK_NUMBER); + IncompleteSince::::put(now - One::one()); + }: { + Scheduler::::service_agendas(&mut dummy_counter(), now, 0); + } verify { + assert_eq!(IncompleteSince::::get(), Some(now - One::one())); } - on_initialize_named_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - assert!(Agenda::::iter().count() == 0); + // `service_agenda` when no work is done. + service_agenda_base { + let now = BLOCK_NUMBER.into(); + let s in 0 .. T::MaxScheduledPerBlock::get(); + fill_schedule::(now, s)?; + let mut executed = 0; + }: { + Scheduler::::service_agenda(&mut dummy_counter(), &mut executed, now, now, 0); + } verify { + assert_eq!(executed, 0); } - on_initialize_periodic_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, false, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `service_task` when the task is a non-periodic, non-named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_base { + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, false, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { + //assert_eq!(result, Ok(())); } - on_initialize_resolved { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, Some(true))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s * 2); - assert!(Agenda::::iter().count() == 0); + // `service_task` when the task is a non-periodic, non-named, fetched call (with a known + // preimage length) and which is not dispatched (e.g. due to being overweight). + service_task_fetched { + let s in (BoundedInline::bound() as u32) .. (T::Preimages::MAX_LENGTH as u32); + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, false, false, Some(s), 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_named_aborted { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, None)?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), 0); - if let Some(delay) = T::NoPreimagePostponement::get() { - assert_eq!(Agenda::::get(when + delay).len(), s as usize); - } else { - assert!(Agenda::::iter().count() == 0); - } + // `service_task` when the task is a non-periodic, named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_named { + let now = BLOCK_NUMBER.into(); + let task = make_task::(false, true, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_aborted { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, None)?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), 0); - if let Some(delay) = T::NoPreimagePostponement::get() { - assert_eq!(Agenda::::get(when + delay).len(), s as usize); - } else { - assert!(Agenda::::iter().count() == 0); - } + // `service_task` when the task is a periodic, non-named, non-fetched call which is not + // dispatched (e.g. due to being overweight). + service_task_periodic { + let now = BLOCK_NUMBER.into(); + let task = make_task::(true, false, false, None, 0); + // prevent any tasks from actually being executed as we only want the surrounding weight. + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::zero() }; + }: { + let result = Scheduler::::service_task(&mut counter, now, now, 0, true, task); + } verify { } - on_initialize_periodic_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } + // `execute_dispatch` when the origin is `Signed`, not counting the dispatable's weight. + execute_dispatch_signed { + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let origin = make_origin::(true); + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); } - - on_initialize_periodic { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, false, Some(false))?; - }: { Scheduler::::on_initialize(when); } verify { - assert_eq!(System::::event_count(), s); - for i in 0..s { - assert_eq!(Agenda::::get(when + (i + 100).into()).len(), 1 as usize); - } } - on_initialize_named { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, true, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } - verify { - assert_eq!(System::::event_count(), s); - assert!(Agenda::::iter().count() == 0); + // `execute_dispatch` when the origin is not `Signed`, not counting the dispatable's weight. + execute_dispatch_unsigned { + let mut counter = WeightCounter { used: Weight::zero(), limit: Weight::MAX }; + let origin = make_origin::(false); + let call = T::Preimages::realize(&make_call::(None)).unwrap().0; + }: { + assert!(Scheduler::::execute_dispatch(&mut counter, origin, call).is_ok()); } - - on_initialize { - let s in 1 .. T::MaxScheduledPerBlock::get(); - let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, false, false, Some(false))?; - }: { Scheduler::::on_initialize(BLOCK_NUMBER.into()); } verify { - assert_eq!(System::::event_count(), s); - assert!(Agenda::::iter().count() == 0); } schedule { - let s in 0 .. T::MaxScheduledPerBlock::get(); + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); - let call = Box::new(CallOrHashOf::::Value(inner_call)); + let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, when, periodic, priority, call) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -224,13 +242,13 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; + fill_schedule::(when, s)?; assert_eq!(Agenda::::get(when).len(), s as usize); let schedule_origin = T::ScheduleOrigin::successful_origin(); }: _>(schedule_origin, when, 0) verify { ensure!( - Lookup::::get(0.encode()).is_none(), + Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE @@ -241,18 +259,16 @@ benchmarks! { } schedule_named { - let s in 0 .. T::MaxScheduledPerBlock::get(); - let id = s.encode(); + let s in 0 .. (T::MaxScheduledPerBlock::get() - 1); + let id = u32_to_name(s); let when = BLOCK_NUMBER.into(); let periodic = Some((T::BlockNumber::one(), 100)); let priority = 0; // Essentially a no-op call. - let inner_call = frame_system::Call::set_storage { items: vec![] }.into(); - let call = Box::new(CallOrHashOf::::Value(inner_call)); + let call = Box::new(SystemCall::set_storage { items: vec![] }.into()); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, id, when, periodic, priority, call) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, id, when, periodic, priority, call) verify { ensure!( Agenda::::get(when).len() == (s + 1) as usize, @@ -264,12 +280,11 @@ benchmarks! { let s in 1 .. T::MaxScheduledPerBlock::get(); let when = BLOCK_NUMBER.into(); - fill_schedule::(when, s, true, true, Some(false))?; - let schedule_origin = T::ScheduleOrigin::successful_origin(); - }: _>(schedule_origin, 0.encode()) + fill_schedule::(when, s)?; + }: _(RawOrigin::Root, u32_to_name(0)) verify { ensure!( - Lookup::::get(0.encode()).is_none(), + Lookup::::get(u32_to_name(0)).is_none(), "didn't remove from lookup" ); // Removed schedule is NONE diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 143fa37a9261d..b5ea0deeba9a3 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -52,27 +52,33 @@ #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] mod tests; pub mod weights; -use codec::{Codec, Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - dispatch::{DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter}, + dispatch::{ + DispatchError, DispatchResult, Dispatchable, GetDispatchInfo, Parameter, RawOrigin, + }, + ensure, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - EnsureOrigin, Get, IsType, OriginTrait, PalletInfoAccess, PrivilegeCmp, StorageVersion, + Bounded, CallerTrait, EnsureOrigin, Get, Hash as PreimageHash, IsType, OriginTrait, + PalletInfoAccess, PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, }, weights::Weight, }; -use frame_system::{self as system, ensure_signed}; +use frame_system::{self as system}; pub use pallet::*; use scale_info::TypeInfo; +use sp_io::hashing::blake2_256; use sp_runtime::{ traits::{BadOrigin, One, Saturating, Zero}, - RuntimeDebug, + BoundedVec, RuntimeDebug, }; use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; pub use weights::WeightInfo; @@ -96,24 +102,25 @@ struct ScheduledV1 { /// Information regarding an item to be executed in the future. #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq))] -#[derive(Clone, RuntimeDebug, Encode, Decode, TypeInfo)] -pub struct ScheduledV3 { +#[derive(Clone, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] +pub struct Scheduled { /// The unique identity for this task, if there is one. - maybe_id: Option>, + maybe_id: Option, /// This task's priority. priority: schedule::Priority, /// The call to be dispatched. call: Call, /// If the call is periodic, then this points to the information concerning that. maybe_periodic: Option>, - /// The origin to dispatch the call. + /// The origin with which to dispatch the call. origin: PalletsOrigin, _phantom: PhantomData, } -use crate::ScheduledV3 as ScheduledV2; +use crate::{Scheduled as ScheduledV3, Scheduled as ScheduledV2}; -pub type ScheduledV2Of = ScheduledV3< +pub type ScheduledV2Of = ScheduledV2< + Vec, ::RuntimeCall, ::BlockNumber, ::PalletsOrigin, @@ -121,57 +128,54 @@ pub type ScheduledV2Of = ScheduledV3< >; pub type ScheduledV3Of = ScheduledV3< + Vec, CallOrHashOf, ::BlockNumber, ::PalletsOrigin, ::AccountId, >; -pub type ScheduledOf = ScheduledV3Of; - -/// The current version of Scheduled struct. -pub type Scheduled = - ScheduledV2; +pub type ScheduledOf = Scheduled< + TaskName, + Bounded<::RuntimeCall>, + ::BlockNumber, + ::PalletsOrigin, + ::AccountId, +>; -#[cfg(feature = "runtime-benchmarks")] -mod preimage_provider { - use frame_support::traits::PreimageRecipient; - pub trait PreimageProviderAndMaybeRecipient: PreimageRecipient {} - impl> PreimageProviderAndMaybeRecipient for T {} +struct WeightCounter { + used: Weight, + limit: Weight, } - -#[cfg(not(feature = "runtime-benchmarks"))] -mod preimage_provider { - use frame_support::traits::PreimageProvider; - pub trait PreimageProviderAndMaybeRecipient: PreimageProvider {} - impl> PreimageProviderAndMaybeRecipient for T {} +impl WeightCounter { + fn check_accrue(&mut self, w: Weight) -> bool { + let test = self.used.saturating_add(w); + if test.any_gt(self.limit) { + false + } else { + self.used = test; + true + } + } + fn can_accrue(&mut self, w: Weight) -> bool { + self.used.saturating_add(w).all_lte(self.limit) + } } -pub use preimage_provider::PreimageProviderAndMaybeRecipient; - pub(crate) trait MarginalWeightInfo: WeightInfo { - fn item(periodic: bool, named: bool, resolved: Option) -> Weight { - match (periodic, named, resolved) { - (_, false, None) => Self::on_initialize_aborted(2) - Self::on_initialize_aborted(1), - (_, true, None) => - Self::on_initialize_named_aborted(2) - Self::on_initialize_named_aborted(1), - (false, false, Some(false)) => Self::on_initialize(2) - Self::on_initialize(1), - (false, true, Some(false)) => - Self::on_initialize_named(2) - Self::on_initialize_named(1), - (true, false, Some(false)) => - Self::on_initialize_periodic(2) - Self::on_initialize_periodic(1), - (true, true, Some(false)) => - Self::on_initialize_periodic_named(2) - Self::on_initialize_periodic_named(1), - (false, false, Some(true)) => - Self::on_initialize_resolved(2) - Self::on_initialize_resolved(1), - (false, true, Some(true)) => - Self::on_initialize_named_resolved(2) - Self::on_initialize_named_resolved(1), - (true, false, Some(true)) => - Self::on_initialize_periodic_resolved(2) - Self::on_initialize_periodic_resolved(1), - (true, true, Some(true)) => - Self::on_initialize_periodic_named_resolved(2) - - Self::on_initialize_periodic_named_resolved(1), + fn service_task(maybe_lookup_len: Option, named: bool, periodic: bool) -> Weight { + let base = Self::service_task_base(); + let mut total = match maybe_lookup_len { + None => base, + Some(l) => Self::service_task_fetched(l as u32), + }; + if named { + total.saturating_accrue(Self::service_task_named().saturating_sub(base)); } + if periodic { + total.saturating_accrue(Self::service_task_periodic().saturating_sub(base)); + } + total } } impl MarginalWeightInfo for T {} @@ -179,11 +183,7 @@ impl MarginalWeightInfo for T {} #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - dispatch::PostDispatchInfo, - pallet_prelude::*, - traits::{schedule::LookupError, PreimageProvider}, - }; + use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; /// The current storage version. @@ -192,7 +192,6 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] - #[pallet::without_storage_info] pub struct Pallet(_); /// `system::Config` should always be included in our implied traits. @@ -207,7 +206,9 @@ pub mod pallet { + IsType<::RuntimeOrigin>; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: From> + Codec + Clone + Eq + TypeInfo; + type PalletsOrigin: From> + + CallerTrait + + MaxEncodedLen; /// The aggregated call type. type RuntimeCall: Parameter @@ -217,8 +218,7 @@ pub mod pallet { > + GetDispatchInfo + From>; - /// The maximum weight that may be scheduled per block for any dispatchables of less - /// priority than `schedule::HARD_DEADLINE`. + /// The maximum weight that may be scheduled per block for any dispatchables. #[pallet::constant] type MaximumWeight: Get; @@ -235,7 +235,6 @@ pub mod pallet { type OriginPrivilegeCmp: PrivilegeCmp; /// The maximum number of scheduled calls in the queue for a single block. - /// Not strictly enforced, but used for weight estimation. #[pallet::constant] type MaxScheduledPerBlock: Get; @@ -243,21 +242,29 @@ pub mod pallet { type WeightInfo: WeightInfo; /// The preimage provider with which we look up call hashes to get the call. - type PreimageProvider: PreimageProviderAndMaybeRecipient; - - /// If `Some` then the number of blocks to postpone execution for when the item is delayed. - type NoPreimagePostponement: Get>; + type Preimages: QueryPreimage + StorePreimage; } - /// Items to be executed, indexed by the block number that they should be executed on. #[pallet::storage] - pub type Agenda = - StorageMap<_, Twox64Concat, T::BlockNumber, Vec>>, ValueQuery>; + pub type IncompleteSince = StorageValue<_, T::BlockNumber>; - /// Lookup from identity to the block number and index of the task. + /// Items to be executed, indexed by the block number that they should be executed on. + #[pallet::storage] + pub type Agenda = StorageMap< + _, + Twox64Concat, + T::BlockNumber, + BoundedVec>, T::MaxScheduledPerBlock>, + ValueQuery, + >; + + /// Lookup from a name to the block number and index of the task. + /// + /// For v3 -> v4 the previously unbounded identities are Blake2-256 hashed to form the v4 + /// identities. #[pallet::storage] pub(crate) type Lookup = - StorageMap<_, Twox64Concat, Vec, TaskAddress>; + StorageMap<_, Twox64Concat, TaskName, TaskAddress>; /// Events type. #[pallet::event] @@ -270,15 +277,15 @@ pub mod pallet { /// Dispatched some task. Dispatched { task: TaskAddress, - id: Option>, + id: Option<[u8; 32]>, result: DispatchResult, }, /// The call for the provided hash was not found so the task has been aborted. - CallLookupFailed { - task: TaskAddress, - id: Option>, - error: LookupError, - }, + CallUnavailable { task: TaskAddress, id: Option<[u8; 32]> }, + /// The given task was unable to be renewed since the agenda is full at that block. + PeriodicFailed { task: TaskAddress, id: Option<[u8; 32]> }, + /// The given task can never be executed since it is overweight. + PermanentlyOverweight { task: TaskAddress, id: Option<[u8; 32]> }, } #[pallet::error] @@ -291,134 +298,18 @@ pub mod pallet { TargetBlockNumberInPast, /// Reschedule failed because it does not change scheduled time. RescheduleNoChange, + /// Attempt to use a non-named function on a named task. + Named, } #[pallet::hooks] impl Hooks> for Pallet { /// Execute the scheduled calls fn on_initialize(now: T::BlockNumber) -> Weight { - let limit = T::MaximumWeight::get(); - - let mut queued = Agenda::::take(now) - .into_iter() - .enumerate() - .filter_map(|(index, s)| Some((index as u32, s?))) - .collect::>(); - - if queued.len() as u32 > T::MaxScheduledPerBlock::get() { - log::warn!( - target: "runtime::scheduler", - "Warning: This block has more items queued in Scheduler than \ - expected from the runtime configuration. An update might be needed." - ); - } - - queued.sort_by_key(|(_, s)| s.priority); - - let next = now + One::one(); - - let mut total_weight: Weight = T::WeightInfo::on_initialize(0); - for (order, (index, mut s)) in queued.into_iter().enumerate() { - let named = if let Some(ref id) = s.maybe_id { - Lookup::::remove(id); - true - } else { - false - }; - - let (call, maybe_completed) = s.call.resolved::(); - s.call = call; - - let resolved = if let Some(completed) = maybe_completed { - T::PreimageProvider::unrequest_preimage(&completed); - true - } else { - false - }; - - let call = match s.call.as_value().cloned() { - Some(c) => c, - None => { - // Preimage not available - postpone until some block. - total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); - if let Some(delay) = T::NoPreimagePostponement::get() { - let until = now.saturating_add(delay); - if let Some(ref id) = s.maybe_id { - let index = Agenda::::decode_len(until).unwrap_or(0); - Lookup::::insert(id, (until, index as u32)); - } - Agenda::::append(until, Some(s)); - } - continue - }, - }; - - let periodic = s.maybe_periodic.is_some(); - let call_weight = call.get_dispatch_info().weight; - let mut item_weight = T::WeightInfo::item(periodic, named, Some(resolved)); - let origin = <::RuntimeOrigin as From>::from( - s.origin.clone(), - ) - .into(); - if ensure_signed(origin).is_ok() { - // Weights of Signed dispatches expect their signing account to be whitelisted. - item_weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - } - - // We allow a scheduled call if any is true: - // - It's priority is `HARD_DEADLINE` - // - It does not push the weight past the limit. - // - It is the first item in the schedule - let hard_deadline = s.priority <= schedule::HARD_DEADLINE; - let test_weight = - total_weight.saturating_add(call_weight).saturating_add(item_weight); - if !hard_deadline && order > 0 && test_weight.any_gt(limit) { - // Cannot be scheduled this block - postpone until next. - total_weight.saturating_accrue(T::WeightInfo::item(false, named, None)); - if let Some(ref id) = s.maybe_id { - // NOTE: We could reasonably not do this (in which case there would be one - // block where the named and delayed item could not be referenced by name), - // but we will do it anyway since it should be mostly free in terms of - // weight and it is slightly cleaner. - let index = Agenda::::decode_len(next).unwrap_or(0); - Lookup::::insert(id, (next, index as u32)); - } - Agenda::::append(next, Some(s)); - continue - } - - let dispatch_origin = s.origin.clone().into(); - let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { - Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => - (error_and_info.post_info.actual_weight, Err(error_and_info.error)), - }; - let actual_call_weight = maybe_actual_call_weight.unwrap_or(call_weight); - total_weight.saturating_accrue(item_weight); - total_weight.saturating_accrue(actual_call_weight); - - Self::deposit_event(Event::Dispatched { - task: (now, index), - id: s.maybe_id.clone(), - result, - }); - - if let &Some((period, count)) = &s.maybe_periodic { - if count > 1 { - s.maybe_periodic = Some((period, count - 1)); - } else { - s.maybe_periodic = None; - } - let wake = now + period; - // If scheduled is named, place its information in `Lookup` - if let Some(ref id) = s.maybe_id { - let wake_index = Agenda::::decode_len(wake).unwrap_or(0); - Lookup::::insert(id, (wake, wake_index as u32)); - } - Agenda::::append(wake, Some(s)); - } - } - total_weight + let mut weight_counter = + WeightCounter { used: Weight::zero(), limit: T::MaximumWeight::get() }; + Self::service_agendas(&mut weight_counter, now, u32::max_value()); + weight_counter.used } } @@ -431,7 +322,7 @@ pub mod pallet { when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -440,7 +331,7 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } @@ -458,11 +349,11 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named( origin: OriginFor, - id: Vec, + id: TaskName, when: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -472,14 +363,14 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } /// Cancel a named scheduled task. #[pallet::weight(::WeightInfo::cancel_named(T::MaxScheduledPerBlock::get()))] - pub fn cancel_named(origin: OriginFor, id: Vec) -> DispatchResult { + pub fn cancel_named(origin: OriginFor, id: TaskName) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); Self::do_cancel_named(Some(origin.caller().clone()), id)?; @@ -497,7 +388,7 @@ pub mod pallet { after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -506,7 +397,7 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } @@ -519,11 +410,11 @@ pub mod pallet { #[pallet::weight(::WeightInfo::schedule_named(T::MaxScheduledPerBlock::get()))] pub fn schedule_named_after( origin: OriginFor, - id: Vec, + id: TaskName, after: T::BlockNumber, maybe_periodic: Option>, priority: schedule::Priority, - call: Box>, + call: Box<::RuntimeCall>, ) -> DispatchResult { T::ScheduleOrigin::ensure_origin(origin.clone())?; let origin = ::RuntimeOrigin::from(origin); @@ -533,41 +424,70 @@ pub mod pallet { maybe_periodic, priority, origin.caller().clone(), - *call, + T::Preimages::bound(*call)?, )?; Ok(()) } } } -impl Pallet { - /// Migrate storage format from V1 to V3. +impl> Pallet { + /// Migrate storage format from V1 to V4. /// /// Returns the weight consumed by this migration. - pub fn migrate_v1_to_v3() -> Weight { + pub fn migrate_v1_to_v4() -> Weight { + use migration::v1 as old; let mut weight = T::DbWeight::get().reads_writes(1, 1); + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let keys = old::Agenda::::iter_keys().collect::>(); + for key in keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&key) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&key); + log::warn!("Deleted undecodable agenda"); + } + } + Agenda::::translate::< Vec::RuntimeCall, T::BlockNumber>>>, _, >(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule.map(|schedule| ScheduledV3 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call.into(), - maybe_periodic: schedule.maybe_periodic, - origin: system::RawOrigin::Root.into(), - _phantom: Default::default(), + schedule.and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + let call = T::Preimages::bound(schedule.call).ok()?; + + if call.lookup_needed() { + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), + }) }) }) .collect::>(), - ) + )) }); #[allow(deprecated)] @@ -577,34 +497,62 @@ impl Pallet { &[], ); - StorageVersion::new(3).put::(); + StorageVersion::new(4).put::(); weight + T::DbWeight::get().writes(2) } - /// Migrate storage format from V2 to V3. + /// Migrate storage format from V2 to V4. /// /// Returns the weight consumed by this migration. - pub fn migrate_v2_to_v3() -> Weight { + pub fn migrate_v2_to_v4() -> Weight { + use migration::v2 as old; let mut weight = T::DbWeight::get().reads_writes(1, 1); + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let keys = old::Agenda::::iter_keys().collect::>(); + for key in keys { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&key) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&key); + log::warn!("Deleted undecodable agenda"); + } + } + Agenda::::translate::>>, _>(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); - schedule.map(|schedule| ScheduledV3 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call.into(), - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin, - _phantom: Default::default(), + schedule.and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } + + let call = T::Preimages::bound(schedule.call).ok()?; + if call.lookup_needed() { + weight.saturating_accrue(T::DbWeight::get().reads_writes(0, 1)); + } + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin, + _phantom: Default::default(), + }) }) }) .collect::>(), - ) + )) }); #[allow(deprecated)] @@ -614,34 +562,140 @@ impl Pallet { &[], ); - StorageVersion::new(3).put::(); + StorageVersion::new(4).put::(); weight + T::DbWeight::get().writes(2) } - #[cfg(feature = "try-runtime")] - pub fn pre_migrate_to_v3() -> Result<(), &'static str> { - Ok(()) - } + /// Migrate storage format from V3 to V4. + /// + /// Returns the weight consumed by this migration. + #[allow(deprecated)] + pub fn migrate_v3_to_v4() -> Weight { + use migration::v3 as old; + let mut weight = T::DbWeight::get().reads_writes(2, 1); + + // Delete all undecodable values. + // `StorageMap::translate` is not enough since it just skips them and leaves the keys in. + let blocks = old::Agenda::::iter_keys().collect::>(); + for block in blocks { + weight.saturating_accrue(T::DbWeight::get().reads(1)); + if let Err(_) = old::Agenda::::try_get(&block) { + weight.saturating_accrue(T::DbWeight::get().writes(1)); + old::Agenda::::remove(&block); + log::warn!("Deleted undecodable agenda of block: {:?}", block); + } + } - #[cfg(feature = "try-runtime")] - pub fn post_migrate_to_v3() -> Result<(), &'static str> { - use frame_support::dispatch::GetStorageVersion; + Agenda::::translate::>>, _>(|block, agenda| { + log::info!("Migrating agenda of block: {:?}", &block); + Some(BoundedVec::truncate_from( + agenda + .into_iter() + .map(|schedule| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + schedule + .and_then(|schedule| { + if let Some(id) = schedule.maybe_id.as_ref() { + let name = blake2_256(id); + if let Some(item) = old::Lookup::::take(id) { + Lookup::::insert(name, item); + log::info!("Migrated name for id: {:?}", id); + } else { + log::error!("No name in Lookup for id: {:?}", &id); + } + weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); + } else { + log::info!("Schedule is unnamed"); + } + + let call = match schedule.call { + MaybeHashed::Hash(h) => { + let bounded = Bounded::from_legacy_hash(h); + // Check that the call can be decoded in the new runtime. + if let Err(err) = T::Preimages::peek::< + ::RuntimeCall, + >(&bounded) + { + log::error!( + "Dropping undecodable call {}: {:?}", + &h, + &err + ); + return None + } + weight.saturating_accrue(T::DbWeight::get().reads(1)); + log::info!("Migrated call by hash, hash: {:?}", h); + bounded + }, + MaybeHashed::Value(v) => { + let call = T::Preimages::bound(v) + .map_err(|e| { + log::error!("Could not bound Call: {:?}", e) + }) + .ok()?; + if call.lookup_needed() { + weight.saturating_accrue( + T::DbWeight::get().reads_writes(0, 1), + ); + } + log::info!( + "Migrated call by value, hash: {:?}", + call.hash() + ); + call + }, + }; + + Some(Scheduled { + maybe_id: schedule.maybe_id.map(|x| blake2_256(&x[..])), + priority: schedule.priority, + call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin, + _phantom: Default::default(), + }) + }) + .or_else(|| { + log::info!("Schedule in agenda for block {:?} is empty - nothing to do here.", &block); + None + }) + }) + .collect::>(), + )) + }); - assert!(Self::current_storage_version() == 3); - for k in Agenda::::iter_keys() { - let _ = Agenda::::try_get(k).map_err(|()| "Invalid item in Agenda")?; - } - Ok(()) + #[allow(deprecated)] + frame_support::storage::migration::remove_storage_prefix( + Self::name().as_bytes(), + b"StorageVersion", + &[], + ); + + StorageVersion::new(4).put::(); + + weight + T::DbWeight::get().writes(2) } +} +impl Pallet { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec, T::BlockNumber, OldOrigin, T::AccountId>>>, + Vec< + Option< + Scheduled< + TaskName, + Bounded<::RuntimeCall>, + T::BlockNumber, + OldOrigin, + T::AccountId, + >, + >, + >, _, >(|_, agenda| { - Some( + Some(BoundedVec::truncate_from( agenda .into_iter() .map(|schedule| { @@ -655,7 +709,7 @@ impl Pallet { }) }) .collect::>(), - ) + )) }); } @@ -676,34 +730,64 @@ impl Pallet { Ok(when) } + fn place_task( + when: T::BlockNumber, + what: ScheduledOf, + ) -> Result, (DispatchError, ScheduledOf)> { + let maybe_name = what.maybe_id; + let index = Self::push_to_agenda(when, what)?; + let address = (when, index); + if let Some(name) = maybe_name { + Lookup::::insert(name, address) + } + Self::deposit_event(Event::Scheduled { when: address.0, index: address.1 }); + Ok(address) + } + + fn push_to_agenda( + when: T::BlockNumber, + what: ScheduledOf, + ) -> Result)> { + let mut agenda = Agenda::::get(when); + let index = if (agenda.len() as u32) < T::MaxScheduledPerBlock::get() { + // will always succeed due to the above check. + let _ = agenda.try_push(Some(what)); + agenda.len() as u32 - 1 + } else { + if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) { + agenda[hole_index] = Some(what); + hole_index as u32 + } else { + return Err((DispatchError::Exhausted, what)) + } + }; + Agenda::::insert(when, agenda); + Ok(index) + } + fn do_schedule( when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: CallOrHashOf, + call: Bounded<::RuntimeCall>, ) -> Result, DispatchError> { let when = Self::resolve_time(when)?; - call.ensure_requested::(); // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Some(Scheduled { + let task = Scheduled { maybe_id: None, priority, call, maybe_periodic, origin, - _phantom: PhantomData::::default(), - }); - Agenda::::append(when, s); - let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; - Self::deposit_event(Event::Scheduled { when, index }); - - Ok((when, index)) + _phantom: PhantomData, + }; + Self::place_task(when, task).map_err(|x| x.0) } fn do_cancel( @@ -713,7 +797,7 @@ impl Pallet { let scheduled = Agenda::::try_mutate(when, |agenda| { agenda.get_mut(index as usize).map_or( Ok(None), - |s| -> Result>, DispatchError> { + |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if matches!( T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), @@ -727,7 +811,7 @@ impl Pallet { ) })?; if let Some(s) = scheduled { - s.call.ensure_unrequested::(); + T::Preimages::drop(&s.call); if let Some(id) = s.maybe_id { Lookup::::remove(id); } @@ -748,27 +832,23 @@ impl Pallet { return Err(Error::::RescheduleNoChange.into()) } - Agenda::::try_mutate(when, |agenda| -> DispatchResult { + let task = Agenda::::try_mutate(when, |agenda| { let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - let task = task.take().ok_or(Error::::NotFound)?; - Agenda::::append(new_time, Some(task)); - Ok(()) + ensure!(!matches!(task, Some(Scheduled { maybe_id: Some(_), .. })), Error::::Named); + task.take().ok_or(Error::::NotFound) })?; - - let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; Self::deposit_event(Event::Canceled { when, index }); - Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); - Ok((new_time, new_index)) + Self::place_task(new_time, task).map_err(|x| x.0) } fn do_schedule_named( - id: Vec, + id: TaskName, when: DispatchTime, maybe_periodic: Option>, priority: schedule::Priority, origin: T::PalletsOrigin, - call: CallOrHashOf, + call: Bounded<::RuntimeCall>, ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { @@ -777,32 +857,24 @@ impl Pallet { let when = Self::resolve_time(when)?; - call.ensure_requested::(); - // sanitize maybe_periodic let maybe_periodic = maybe_periodic .filter(|p| p.1 > 1 && !p.0.is_zero()) // Remove one from the number of repetitions since we will schedule one now. .map(|(p, c)| (p, c - 1)); - let s = Scheduled { - maybe_id: Some(id.clone()), + let task = Scheduled { + maybe_id: Some(id), priority, call, maybe_periodic, origin, _phantom: Default::default(), }; - Agenda::::append(when, Some(s)); - let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; - let address = (when, index); - Lookup::::insert(&id, &address); - Self::deposit_event(Event::Scheduled { when, index }); - - Ok(address) + Self::place_task(when, task).map_err(|x| x.0) } - fn do_cancel_named(origin: Option, id: Vec) -> DispatchResult { + fn do_cancel_named(origin: Option, id: TaskName) -> DispatchResult { Lookup::::try_mutate_exists(id, |lookup| -> DispatchResult { if let Some((when, index)) = lookup.take() { let i = index as usize; @@ -815,7 +887,7 @@ impl Pallet { ) { return Err(BadOrigin.into()) } - s.call.ensure_unrequested::(); + T::Preimages::drop(&s.call); } *s = None; } @@ -830,42 +902,245 @@ impl Pallet { } fn do_reschedule_named( - id: Vec, + id: TaskName, new_time: DispatchTime, ) -> Result, DispatchError> { let new_time = Self::resolve_time(new_time)?; - Lookup::::try_mutate_exists( - id, - |lookup| -> Result, DispatchError> { - let (when, index) = lookup.ok_or(Error::::NotFound)?; + let lookup = Lookup::::get(id); + let (when, index) = lookup.ok_or(Error::::NotFound)?; - if new_time == when { - return Err(Error::::RescheduleNoChange.into()) - } + if new_time == when { + return Err(Error::::RescheduleNoChange.into()) + } - Agenda::::try_mutate(when, |agenda| -> DispatchResult { - let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; - let task = task.take().ok_or(Error::::NotFound)?; - Agenda::::append(new_time, Some(task)); + let task = Agenda::::try_mutate(when, |agenda| { + let task = agenda.get_mut(index as usize).ok_or(Error::::NotFound)?; + task.take().ok_or(Error::::NotFound) + })?; + Self::deposit_event(Event::Canceled { when, index }); + Self::place_task(new_time, task).map_err(|x| x.0) + } +} - Ok(()) - })?; +enum ServiceTaskError { + /// Could not be executed due to missing preimage. + Unavailable, + /// Could not be executed due to weight limitations. + Overweight, +} +use ServiceTaskError::*; - let new_index = Agenda::::decode_len(new_time).unwrap_or(1) as u32 - 1; - Self::deposit_event(Event::Canceled { when, index }); - Self::deposit_event(Event::Scheduled { when: new_time, index: new_index }); +impl Pallet { + /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. + fn service_agendas(weight: &mut WeightCounter, now: T::BlockNumber, max: u32) { + if !weight.check_accrue(T::WeightInfo::service_agendas_base()) { + return + } + + let mut incomplete_since = now + One::one(); + let mut when = IncompleteSince::::take().unwrap_or(now); + let mut executed = 0; + + let max_items = T::MaxScheduledPerBlock::get(); + let mut count_down = max; + let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); + while count_down > 0 && when <= now && weight.can_accrue(service_agenda_base_weight) { + if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { + incomplete_since = incomplete_since.min(when); + } + when.saturating_inc(); + count_down.saturating_dec(); + } + incomplete_since = incomplete_since.min(when); + if incomplete_since <= now { + IncompleteSince::::put(incomplete_since); + } + } + + /// Returns `true` if the agenda was fully completed, `false` if it should be revisited at a + /// later block. + fn service_agenda( + weight: &mut WeightCounter, + executed: &mut u32, + now: T::BlockNumber, + when: T::BlockNumber, + max: u32, + ) -> bool { + let mut agenda = Agenda::::get(when); + let mut ordered = agenda + .iter() + .enumerate() + .filter_map(|(index, maybe_item)| { + maybe_item.as_ref().map(|item| (index as u32, item.priority)) + }) + .collect::>(); + ordered.sort_by_key(|k| k.1); + let within_limit = + weight.check_accrue(T::WeightInfo::service_agenda_base(ordered.len() as u32)); + debug_assert!(within_limit, "weight limit should have been checked in advance"); + + // Items which we know can be executed and have postponed for execution in a later block. + let mut postponed = (ordered.len() as u32).saturating_sub(max); + // Items which we don't know can ever be executed. + let mut dropped = 0; + + for (agenda_index, _) in ordered.into_iter().take(max as usize) { + let task = match agenda[agenda_index as usize].take() { + None => continue, + Some(t) => t, + }; + let base_weight = T::WeightInfo::service_task( + task.call.lookup_len().map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + ); + if !weight.can_accrue(base_weight) { + postponed += 1; + break + } + let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); + agenda[agenda_index as usize] = match result { + Err((Unavailable, slot)) => { + dropped += 1; + slot + }, + Err((Overweight, slot)) => { + postponed += 1; + slot + }, + Ok(()) => { + *executed += 1; + None + }, + }; + } + if postponed > 0 || dropped > 0 { + Agenda::::insert(when, agenda); + } else { + Agenda::::remove(when); + } + postponed == 0 + } - *lookup = Some((new_time, new_index)); + /// Service (i.e. execute) the given task, being careful not to overflow the `weight` counter. + /// + /// This involves: + /// - removing and potentially replacing the `Lookup` entry for the task. + /// - realizing the task's call which can include a preimage lookup. + /// - Rescheduling the task for execution in a later agenda if periodic. + fn service_task( + weight: &mut WeightCounter, + now: T::BlockNumber, + when: T::BlockNumber, + agenda_index: u32, + is_first: bool, + mut task: ScheduledOf, + ) -> Result<(), (ServiceTaskError, Option>)> { + if let Some(ref id) = task.maybe_id { + Lookup::::remove(id); + } - Ok((new_time, new_index)) + let (call, lookup_len) = match T::Preimages::peek(&task.call) { + Ok(c) => c, + Err(_) => return Err((Unavailable, Some(task))), + }; + + weight.check_accrue(T::WeightInfo::service_task( + lookup_len.map(|x| x as usize), + task.maybe_id.is_some(), + task.maybe_periodic.is_some(), + )); + + match Self::execute_dispatch(weight, task.origin.clone(), call) { + Err(Unavailable) => { + debug_assert!(false, "Checked to exist with `peek`"); + Self::deposit_event(Event::CallUnavailable { + task: (when, agenda_index), + id: task.maybe_id, + }); + Err((Unavailable, Some(task))) + }, + Err(Overweight) if is_first => { + T::Preimages::drop(&task.call); + Self::deposit_event(Event::PermanentlyOverweight { + task: (when, agenda_index), + id: task.maybe_id, + }); + Err((Unavailable, Some(task))) + }, + Err(Overweight) => Err((Overweight, Some(task))), + Ok(result) => { + Self::deposit_event(Event::Dispatched { + task: (when, agenda_index), + id: task.maybe_id, + result, + }); + if let &Some((period, count)) = &task.maybe_periodic { + if count > 1 { + task.maybe_periodic = Some((period, count - 1)); + } else { + task.maybe_periodic = None; + } + let wake = now.saturating_add(period); + match Self::place_task(wake, task) { + Ok(_) => {}, + Err((_, task)) => { + // TODO: Leave task in storage somewhere for it to be rescheduled + // manually. + T::Preimages::drop(&task.call); + Self::deposit_event(Event::PeriodicFailed { + task: (when, agenda_index), + id: task.maybe_id, + }); + }, + } + } else { + T::Preimages::drop(&task.call); + } + Ok(()) }, - ) + } + } + + /// Make a dispatch to the given `call` from the given `origin`, ensuring that the `weight` + /// counter does not exceed its limit and that it is counted accurately (e.g. accounted using + /// post info if available). + /// + /// NOTE: Only the weight for this function will be counted (origin lookup, dispatch and the + /// call itself). + fn execute_dispatch( + weight: &mut WeightCounter, + origin: T::PalletsOrigin, + call: ::RuntimeCall, + ) -> Result { + let base_weight = match origin.as_system_ref() { + Some(&RawOrigin::Signed(_)) => T::WeightInfo::execute_dispatch_signed(), + _ => T::WeightInfo::execute_dispatch_unsigned(), + }; + let call_weight = call.get_dispatch_info().weight; + // We only allow a scheduled call if it cannot push the weight past the limit. + let max_weight = base_weight.saturating_add(call_weight); + + if !weight.can_accrue(max_weight) { + return Err(Overweight) + } + + let dispatch_origin = origin.into(); + let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { + Ok(post_info) => (post_info.actual_weight, Ok(())), + Err(error_and_info) => + (error_and_info.post_info.actual_weight, Err(error_and_info.error)), + }; + let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); + weight.check_accrue(base_weight); + weight.check_accrue(call_weight); + Ok(result) } } -impl schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> - for Pallet +impl> + schedule::v2::Anon::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -877,6 +1152,8 @@ impl schedule::v2::Anon::RuntimeCall, T origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { + let call = call.as_value().ok_or(DispatchError::CannotLookup)?; + let call = T::Preimages::bound(call)?.transmute(); Self::do_schedule(when, maybe_periodic, priority, origin, call) } @@ -896,8 +1173,8 @@ impl schedule::v2::Anon::RuntimeCall, T } } -impl schedule::v2::Named::RuntimeCall, T::PalletsOrigin> - for Pallet +impl> + schedule::v2::Named::RuntimeCall, T::PalletsOrigin> for Pallet { type Address = TaskAddress; type Hash = T::Hash; @@ -910,23 +1187,108 @@ impl schedule::v2::Named::RuntimeCall, origin: T::PalletsOrigin, call: CallOrHashOf, ) -> Result { - Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call).map_err(|_| ()) + let call = call.as_value().ok_or(())?; + let call = T::Preimages::bound(call).map_err(|_| ())?.transmute(); + let name = blake2_256(&id[..]); + Self::do_schedule_named(name, when, maybe_periodic, priority, origin, call).map_err(|_| ()) } fn cancel_named(id: Vec) -> Result<(), ()> { - Self::do_cancel_named(None, id).map_err(|_| ()) + let name = blake2_256(&id[..]); + Self::do_cancel_named(None, name).map_err(|_| ()) } fn reschedule_named( id: Vec, when: DispatchTime, ) -> Result { - Self::do_reschedule_named(id, when) + let name = blake2_256(&id[..]); + Self::do_reschedule_named(name, when) } fn next_dispatch_time(id: Vec) -> Result { - Lookup::::get(id) + let name = blake2_256(&id[..]); + Lookup::::get(name) .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) .ok_or(()) } } + +impl schedule::v3::Anon::RuntimeCall, T::PalletsOrigin> + for Pallet +{ + type Address = TaskAddress; + + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: schedule::Priority, + origin: T::PalletsOrigin, + call: Bounded<::RuntimeCall>, + ) -> Result { + Self::do_schedule(when, maybe_periodic, priority, origin, call) + } + + fn cancel((when, index): Self::Address) -> Result<(), DispatchError> { + Self::do_cancel(None, (when, index)).map_err(map_err_to_v3_err::) + } + + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result { + Self::do_reschedule(address, when).map_err(map_err_to_v3_err::) + } + + fn next_dispatch_time((when, index): Self::Address) -> Result { + Agenda::::get(when) + .get(index as usize) + .ok_or(DispatchError::Unavailable) + .map(|_| when) + } +} + +use schedule::v3::TaskName; + +impl schedule::v3::Named::RuntimeCall, T::PalletsOrigin> + for Pallet +{ + type Address = TaskAddress; + + fn schedule_named( + id: TaskName, + when: DispatchTime, + maybe_periodic: Option>, + priority: schedule::Priority, + origin: T::PalletsOrigin, + call: Bounded<::RuntimeCall>, + ) -> Result { + Self::do_schedule_named(id, when, maybe_periodic, priority, origin, call) + } + + fn cancel_named(id: TaskName) -> Result<(), DispatchError> { + Self::do_cancel_named(None, id).map_err(map_err_to_v3_err::) + } + + fn reschedule_named( + id: TaskName, + when: DispatchTime, + ) -> Result { + Self::do_reschedule_named(id, when).map_err(map_err_to_v3_err::) + } + + fn next_dispatch_time(id: TaskName) -> Result { + Lookup::::get(id) + .and_then(|(when, index)| Agenda::::get(when).get(index as usize).map(|_| when)) + .ok_or(DispatchError::Unavailable) + } +} + +/// Maps a pallet error to an `schedule::v3` error. +fn map_err_to_v3_err(err: DispatchError) -> DispatchError { + if err == DispatchError::from(Error::::NotFound) { + DispatchError::Unavailable + } else { + err + } +} diff --git a/frame/scheduler/src/migration.rs b/frame/scheduler/src/migration.rs new file mode 100644 index 0000000000000..6769d20023196 --- /dev/null +++ b/frame/scheduler/src/migration.rs @@ -0,0 +1,402 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations for the scheduler pallet. + +use super::*; +use frame_support::traits::OnRuntimeUpgrade; + +/// The log target. +const TARGET: &'static str = "runtime::scheduler::migration"; + +pub mod v1 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec< + Option< + ScheduledV1<::RuntimeCall, ::BlockNumber>, + >, + >, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; +} + +pub mod v2 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec>>, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; +} + +pub mod v3 { + use super::*; + use frame_support::pallet_prelude::*; + + #[frame_support::storage_alias] + pub(crate) type Agenda = StorageMap< + Pallet, + Twox64Concat, + ::BlockNumber, + Vec>>, + ValueQuery, + >; + + #[frame_support::storage_alias] + pub(crate) type Lookup = StorageMap< + Pallet, + Twox64Concat, + Vec, + TaskAddress<::BlockNumber>, + >; + + /// Migrate the scheduler pallet from V3 to V4. + pub struct MigrateToV4(sp_std::marker::PhantomData); + + impl> OnRuntimeUpgrade for MigrateToV4 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + assert_eq!(StorageVersion::get::>(), 3, "Can only upgrade from version 3"); + + let agendas = Agenda::::iter_keys().count() as u32; + let decodable_agendas = Agenda::::iter_values().count() as u32; + if agendas != decodable_agendas { + // This is not necessarily an error, but can happen when there are Calls + // in an Agenda that are not valid anymore with the new runtime. + log::error!( + target: TARGET, + "Can only decode {} of {} agendas - others will be dropped", + decodable_agendas, + agendas + ); + } + log::info!(target: TARGET, "Trying to migrate {} agendas...", decodable_agendas); + + // Check that no agenda overflows `MaxScheduledPerBlock`. + let max_scheduled_per_block = T::MaxScheduledPerBlock::get() as usize; + for (block_number, agenda) in Agenda::::iter() { + if agenda.iter().cloned().filter_map(|s| s).count() > max_scheduled_per_block { + log::error!( + target: TARGET, + "Would truncate agenda of block {:?} from {} items to {} items.", + block_number, + agenda.len(), + max_scheduled_per_block, + ); + return Err("Agenda would overflow `MaxScheduledPerBlock`.") + } + } + // Check that bounding the calls will not overflow `MAX_LENGTH`. + let max_length = T::Preimages::MAX_LENGTH as usize; + for (block_number, agenda) in Agenda::::iter() { + for schedule in agenda.iter().cloned().filter_map(|s| s) { + match schedule.call { + frame_support::traits::schedule::MaybeHashed::Value(call) => { + let l = call.using_encoded(|c| c.len()); + if l > max_length { + log::error!( + target: TARGET, + "Call in agenda of block {:?} is too large: {} byte", + block_number, + l, + ); + return Err("Call is too large.") + } + }, + _ => (), + } + } + } + + Ok((decodable_agendas as u32).encode()) + } + + fn on_runtime_upgrade() -> Weight { + let version = StorageVersion::get::>(); + if version != 3 { + log::warn!( + target: TARGET, + "skipping v3 to v4 migration: executed on wrong storage version.\ + Expected version 3, found {:?}", + version, + ); + return T::DbWeight::get().reads(1) + } + + crate::Pallet::::migrate_v3_to_v4() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + assert_eq!(StorageVersion::get::>(), 4, "Must upgrade"); + + // Check that everything decoded fine. + for k in crate::Agenda::::iter_keys() { + assert!(crate::Agenda::::try_get(k).is_ok(), "Cannot decode V4 Agenda"); + } + + let old_agendas: u32 = + Decode::decode(&mut &state[..]).expect("pre_upgrade provides a valid state; qed"); + let new_agendas = crate::Agenda::::iter_keys().count() as u32; + if old_agendas != new_agendas { + // This is not necessarily an error, but can happen when there are Calls + // in an Agenda that are not valid anymore in the new runtime. + log::error!( + target: TARGET, + "Did not migrate all Agendas. Previous {}, Now {}", + old_agendas, + new_agendas, + ); + } else { + log::info!(target: TARGET, "Migrated {} agendas.", new_agendas); + } + + Ok(()) + } + } +} + +#[cfg(test)] +#[cfg(feature = "try-runtime")] +mod test { + use super::*; + use crate::mock::*; + use frame_support::Hashable; + use sp_std::borrow::Cow; + use substrate_test_utils::assert_eq_uvec; + + #[test] + #[allow(deprecated)] + fn migration_v3_to_v4_works() { + new_test_ext().execute_with(|| { + // Assume that we are at V3. + StorageVersion::new(3).put::(); + + // Call that will be bounded to a `Lookup`. + let large_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 1024] }); + // Call that can be inlined. + let small_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 10] }); + // Call that is already hashed and can will be converted to `Legacy`. + let hashed_call = + RuntimeCall::System(frame_system::Call::remark { remark: vec![0; 2048] }); + let bound_hashed_call = Preimage::bound(hashed_call.clone()).unwrap(); + assert!(bound_hashed_call.lookup_needed()); + // A Call by hash that will fail to decode becomes `None`. + let trash_data = vec![255u8; 1024]; + let undecodable_hash = Preimage::note(Cow::Borrowed(&trash_data)).unwrap(); + + for i in 0..2u64 { + let k = i.twox_64_concat(); + let old = vec![ + Some(ScheduledV3Of:: { + maybe_id: None, + priority: i as u8 + 10, + call: small_call.clone().into(), + maybe_periodic: None, // 1 + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledV3Of:: { + maybe_id: Some(vec![i as u8; 32]), + priority: 123, + call: large_call.clone().into(), + maybe_periodic: Some((4u64, 20)), + origin: signed(i), + _phantom: PhantomData::::default(), + }), + Some(ScheduledV3Of:: { + maybe_id: Some(vec![255 - i as u8; 320]), + priority: 123, + call: MaybeHashed::Hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(i), + _phantom: PhantomData::::default(), + }), + Some(ScheduledV3Of:: { + maybe_id: Some(vec![i as u8; 320]), + priority: 123, + call: MaybeHashed::Hash(undecodable_hash.clone()), + maybe_periodic: Some((4u64, 20)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ]; + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); + } + + let state = v3::MigrateToV4::::pre_upgrade().unwrap(); + let _w = v3::MigrateToV4::::on_runtime_upgrade(); + v3::MigrateToV4::::post_upgrade(state).unwrap(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + + let bound_large_call = Preimage::bound(large_call).unwrap(); + assert!(bound_large_call.lookup_needed()); + let bound_small_call = Preimage::bound(small_call).unwrap(); + assert!(!bound_small_call.lookup_needed()); + + let expected = vec![ + ( + 0, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 10, + call: bound_small_call.clone(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[0u8; 32])), + priority: 123, + call: bound_large_call.clone(), + maybe_periodic: Some((4u64, 20)), + origin: signed(0), + _phantom: PhantomData::::default(), + }), + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[255u8; 320])), + priority: 123, + call: Bounded::from_legacy_hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(0), + _phantom: PhantomData::::default(), + }), + None, + ], + ), + ( + 1, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 11, + call: bound_small_call.clone(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[1u8; 32])), + priority: 123, + call: bound_large_call.clone(), + maybe_periodic: Some((4u64, 20)), + origin: signed(1), + _phantom: PhantomData::::default(), + }), + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&[254u8; 320])), + priority: 123, + call: Bounded::from_legacy_hash(bound_hashed_call.hash()), + maybe_periodic: Some((8u64, 10)), + origin: signed(1), + _phantom: PhantomData::::default(), + }), + None, + ], + ), + ]; + for (outer, (i, j)) in x.iter().zip(expected.iter()).enumerate() { + assert_eq!(i.0, j.0); + for (inner, (x, y)) in i.1.iter().zip(j.1.iter()).enumerate() { + assert_eq!(x, y, "at index: outer {} inner {}", outer, inner); + } + } + assert_eq_uvec!(x, expected); + + assert_eq!(StorageVersion::get::(), 4); + }); + } + + #[test] + #[allow(deprecated)] + fn migration_v3_to_v4_too_large_calls_are_ignored() { + new_test_ext().execute_with(|| { + // Assume that we are at V3. + StorageVersion::new(3).put::(); + + let too_large_call = RuntimeCall::System(frame_system::Call::remark { + remark: vec![0; ::Preimages::MAX_LENGTH + 1], + }); + + let i = 0u64; + let k = i.twox_64_concat(); + let old = vec![Some(ScheduledV3Of:: { + maybe_id: None, + priority: 1, + call: too_large_call.clone().into(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + })]; + frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); + + // The pre_upgrade hook fails: + let err = v3::MigrateToV4::::pre_upgrade().unwrap_err(); + assert!(err.contains("Call is too large")); + // But the migration itself works: + let _w = v3::MigrateToV4::::on_runtime_upgrade(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + // The call becomes `None`. + let expected = vec![(0, vec![None])]; + assert_eq_uvec!(x, expected); + + assert_eq!(StorageVersion::get::(), 4); + }); + } + + fn signed(i: u64) -> OriginCaller { + system::RawOrigin::Signed(i).into() + } +} diff --git a/frame/scheduler/src/mock.rs b/frame/scheduler/src/mock.rs index 6aaad13e48183..61efdfb67b73e 100644 --- a/frame/scheduler/src/mock.rs +++ b/frame/scheduler/src/mock.rs @@ -124,7 +124,7 @@ parameter_types! { } impl system::Config for Test { type BaseCallFilter = BaseFilter; - type BlockWeights = (); + type BlockWeights = BlockWeights; type BlockLength = (); type DbWeight = RocksDbWeight; type RuntimeOrigin = RuntimeOrigin; @@ -151,10 +151,6 @@ impl system::Config for Test { impl logger::Config for Test { type RuntimeEvent = RuntimeEvent; } -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; - pub const NoPreimagePostponement: Option = Some(2); -} ord_parameter_types! { pub const One: u64 = 1; } @@ -164,11 +160,54 @@ impl pallet_preimage::Config for Test { type WeightInfo = (); type Currency = (); type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<1024>; type BaseDeposit = (); type ByteDeposit = (); } +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn service_agendas_base() -> Weight { + Weight::from_ref_time(0b0000_0001) + } + fn service_agenda_base(i: u32) -> Weight { + Weight::from_ref_time((i << 8) as u64 + 0b0000_0010) + } + fn service_task_base() -> Weight { + Weight::from_ref_time(0b0000_0100) + } + fn service_task_periodic() -> Weight { + Weight::from_ref_time(0b0000_1100) + } + fn service_task_named() -> Weight { + Weight::from_ref_time(0b0001_0100) + } + fn service_task_fetched(s: u32) -> Weight { + Weight::from_ref_time((s << 8) as u64 + 0b0010_0100) + } + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(0b0100_0000) + } + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(0b1000_0000) + } + fn schedule(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn cancel(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn schedule_named(_s: u32) -> Weight { + Weight::from_ref_time(50) + } + fn cancel_named(_s: u32) -> Weight { + Weight::from_ref_time(50) + } +} +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; +} + impl Config for Test { type RuntimeEvent = RuntimeEvent; type RuntimeOrigin = RuntimeOrigin; @@ -177,10 +216,9 @@ impl Config for Test { type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EitherOfDiverse, EnsureSignedBy>; type MaxScheduledPerBlock = ConstU32<10>; - type WeightInfo = (); + type WeightInfo = TestWeightInfo; type OriginPrivilegeCmp = EqualPrivilegeOnly; - type PreimageProvider = Preimage; - type NoPreimagePostponement = NoPreimagePostponement; + type Preimages = Preimage; } pub type LoggerCall = logger::Call; diff --git a/frame/scheduler/src/tests.rs b/frame/scheduler/src/tests.rs index f6db70ae42d49..033d787946709 100644 --- a/frame/scheduler/src/tests.rs +++ b/frame/scheduler/src/tests.rs @@ -23,7 +23,7 @@ use crate::mock::{ }; use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{Contains, GetStorageVersion, OnInitialize, PreimageProvider}, + traits::{Contains, GetStorageVersion, OnInitialize, QueryPreimage, StorePreimage}, Hashable, }; use sp_runtime::traits::Hash; @@ -33,9 +33,15 @@ use substrate_test_utils::assert_eq_uvec; fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); @@ -49,51 +55,19 @@ fn basic_scheduling_works() { fn scheduling_with_preimages_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash); + let len = call.using_encoded(|x| x.len()) as u32; + let hashed = Preimage::pick(hash, len); assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); - assert!(Preimage::preimage_requested(&hash)); + assert!(Preimage::is_requested(&hash)); run_to_block(3); assert!(logger::log().is_empty()); run_to_block(4); - assert!(!Preimage::have_preimage(&hash)); - assert!(!Preimage::preimage_requested(&hash)); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - run_to_block(100); - assert_eq!(logger::log(), vec![(root(), 42u32)]); - }); -} - -#[test] -fn scheduling_with_preimage_postpones_correctly() { - new_test_ext().execute_with(|| { - let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); - let hash = ::Hashing::hash_of(&call); - let hashed = MaybeHashed::Hash(hash); - - assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), hashed)); - assert!(Preimage::preimage_requested(&hash)); - - run_to_block(4); - // #4 empty due to no preimage - assert!(logger::log().is_empty()); - - // Register preimage. - assert_ok!(Preimage::note_preimage(RuntimeOrigin::signed(0), call.encode())); - - run_to_block(5); - // #5 empty since postponement is 2 blocks. - assert!(logger::log().is_empty()); - - run_to_block(6); - // #6 is good. + assert!(!Preimage::len(&hash).is_some()); + assert!(!Preimage::is_requested(&hash)); assert_eq!(logger::log(), vec![(root(), 42u32)]); - assert!(!Preimage::have_preimage(&hash)); - assert!(!Preimage::preimage_requested(&hash)); - run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32)]); }); @@ -104,10 +78,16 @@ fn schedule_after_works() { new_test_ext().execute_with(|| { run_to_block(2); let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 - assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::After(3), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); run_to_block(5); assert!(logger::log().is_empty()); run_to_block(6); @@ -122,9 +102,15 @@ fn schedule_after_zero_works() { new_test_ext().execute_with(|| { run_to_block(2); let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); - assert_ok!(Scheduler::do_schedule(DispatchTime::After(0), None, 127, root(), call.into())); + assert_ok!(Scheduler::do_schedule( + DispatchTime::After(0), + None, + 127, + root(), + Preimage::bound(call).unwrap() + )); // Will trigger on the next block. run_to_block(3); assert_eq!(logger::log(), vec![(root(), 42u32)]); @@ -142,8 +128,11 @@ fn periodic_scheduling_works() { Some((3, 3)), 127, root(), - RuntimeCall::Logger(logger::Call::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into() + Preimage::bound(RuntimeCall::Logger(logger::Call::log { + i: 42, + weight: Weight::from_ref_time(10) + })) + .unwrap() )); run_to_block(3); assert!(logger::log().is_empty()); @@ -166,10 +155,17 @@ fn periodic_scheduling_works() { fn reschedule_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( - Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call.into()).unwrap(), + Scheduler::do_schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap() + ) + .unwrap(), (4, 0) ); @@ -198,16 +194,16 @@ fn reschedule_works() { fn reschedule_named_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - call.into(), + Preimage::bound(call).unwrap(), ) .unwrap(), (4, 0) @@ -216,13 +212,10 @@ fn reschedule_named_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), - (6, 0) - ); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); assert_noop!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), + Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)), Error::::RescheduleNoChange ); @@ -241,16 +234,16 @@ fn reschedule_named_works() { fn reschedule_named_perodic_works() { new_test_ext().execute_with(|| { let call = - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); assert!(!::BaseCallFilter::contains(&call)); assert_eq!( Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), Some((3, 3)), 127, root(), - call.into(), + Preimage::bound(call).unwrap(), ) .unwrap(), (4, 0) @@ -259,14 +252,8 @@ fn reschedule_named_perodic_works() { run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), - (5, 0) - ); - assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), - (6, 0) - ); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(5)).unwrap(), (5, 0)); + assert_eq!(Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(6)).unwrap(), (6, 0)); run_to_block(5); assert!(logger::log().is_empty()); @@ -275,7 +262,7 @@ fn reschedule_named_perodic_works() { assert_eq!(logger::log(), vec![(root(), 42u32)]); assert_eq!( - Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), + Scheduler::do_reschedule_named([1u8; 32], DispatchTime::At(10)).unwrap(), (10, 0) ); @@ -298,13 +285,16 @@ fn cancel_named_scheduling_works_with_normal_cancel() { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); let i = Scheduler::do_schedule( @@ -312,13 +302,16 @@ fn cancel_named_scheduling_works_with_normal_cancel() { None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); + assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); assert_ok!(Scheduler::do_cancel(None, i)); run_to_block(100); assert!(logger::log().is_empty()); @@ -330,35 +323,44 @@ fn cancel_named_periodic_scheduling_works() { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), Some((3, 3)), 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); // same id results in error. assert!(Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10) + })) + .unwrap(), ) .is_err()); // different id is ok. Scheduler::do_schedule_named( - 2u32.encode(), + [2u8; 32], DispatchTime::At(8), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), ) .unwrap(); run_to_block(3); @@ -366,7 +368,7 @@ fn cancel_named_periodic_scheduling_works() { run_to_block(4); assert_eq!(logger::log(), vec![(root(), 42u32)]); run_to_block(6); - assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); + assert_ok!(Scheduler::do_cancel_named(None, [1u8; 32])); run_to_block(100); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); }); @@ -374,28 +376,23 @@ fn cancel_named_periodic_scheduling_works() { #[test] fn scheduler_respects_weight_limits() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); // 69 and 42 do not fit together run_to_block(4); @@ -405,62 +402,128 @@ fn scheduler_respects_weight_limits() { }); } +/// Permanently overweight calls are not deleted but also not executed. #[test] -fn scheduler_respects_hard_deadlines_more() { +fn scheduler_does_not_delete_permanently_overweight_call() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, - 0, + 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + // Never executes. + run_to_block(100); + assert_eq!(logger::log(), vec![]); + + // Assert the `PermanentlyOverweight` event. + assert_eq!( + System::events().last().unwrap().event, + crate::Event::PermanentlyOverweight { task: (4, 0), id: None }.into(), + ); + // The call is still in the agenda. + assert!(Agenda::::get(4)[0].is_some()); + }); +} + +#[test] +fn scheduler_handles_periodic_failure() { + let max_weight: Weight = ::MaximumWeight::get(); + let max_per_block = ::MaxScheduledPerBlock::get(); + + new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); + let bound = Preimage::bound(call).unwrap(); + assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), - None, - 0, + Some((4, u32::MAX)), + 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + bound.clone(), + )); + // Executes 5 times till block 20. + run_to_block(20); + assert_eq!(logger::log().len(), 5); + + // Block 28 will already be full. + for _ in 0..max_per_block { + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(28), + None, + 120, + root(), + bound.clone(), + )); + } + + // Going to block 24 will emit a `PeriodicFailed` event. + run_to_block(24); + assert_eq!(logger::log().len(), 6); + + assert_eq!( + System::events().last().unwrap().event, + crate::Event::PeriodicFailed { task: (24, 0), id: None }.into(), + ); + }); +} + +#[test] +fn scheduler_handles_periodic_unavailable_preimage() { + let max_weight: Weight = ::MaximumWeight::get(); + + new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: (max_weight / 3) * 2 }); + let hash = ::Hashing::hash_of(&call); + let len = call.using_encoded(|x| x.len()) as u32; + let bound = Preimage::pick(hash, len); + assert_ok!(Preimage::note(call.encode().into())); + + assert_ok!(Scheduler::do_schedule( + DispatchTime::At(4), + Some((4, u32::MAX)), + 127, + root(), + bound.clone(), )); - // With base weights, 69 and 42 should not fit together, but do because of hard - // deadlines + // Executes 1 times till block 4. run_to_block(4); - assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 69u32)]); + assert_eq!(logger::log().len(), 1); + + // Unnote the preimage. + Preimage::unnote(&hash); + + // Does not ever execute again. + run_to_block(100); + assert_eq!(logger::log().len(), 1); + + // The preimage is not requested anymore. + assert!(!Preimage::is_requested(&hash)); }); } #[test] fn scheduler_respects_priority_ordering() { + let max_weight: Weight = ::MaximumWeight::get(); new_test_ext().execute_with(|| { + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 1, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 3 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 0, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: MaximumSchedulerWeight::get() / 2 - }) - .into(), + Preimage::bound(call).unwrap(), )); run_to_block(4); assert_eq!(logger::log(), vec![(root(), 69u32), (root(), 42u32)]); @@ -470,35 +533,30 @@ fn scheduler_respects_priority_ordering() { #[test] fn scheduler_respects_priority_ordering_with_soft_deadlines() { new_test_ext().execute_with(|| { - let max_weight = MaximumSchedulerWeight::get() - <() as WeightInfo>::on_initialize(0); - let item_weight = - <() as WeightInfo>::on_initialize(1) - <() as WeightInfo>::on_initialize(0); + let max_weight: Weight = ::MaximumWeight::get(); + let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 5 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 255, root(), - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: max_weight / 2 - item_weight }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 5 * 2 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: max_weight / 2 - item_weight }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { i: 2600, weight: max_weight / 5 * 4 }); assert_ok!(Scheduler::do_schedule( DispatchTime::At(4), None, 126, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: max_weight / 2 - item_weight + Weight::from_ref_time(1) - }) - .into(), + Preimage::bound(call).unwrap(), )); // 2600 does not fit with 69 or 42, but has higher priority, so will go through @@ -513,90 +571,96 @@ fn scheduler_respects_priority_ordering_with_soft_deadlines() { #[test] fn on_initialize_weight_is_correct() { new_test_ext().execute_with(|| { - let base_weight = <() as WeightInfo>::on_initialize(0); - let call_weight = MaximumSchedulerWeight::get() / 4; + let call_weight = Weight::from_ref_time(25); // Named + let call = RuntimeCall::Logger(LoggerCall::log { + i: 3, + weight: call_weight + Weight::from_ref_time(1), + }); assert_ok!(Scheduler::do_schedule_named( - 1u32.encode(), + [1u8; 32], DispatchTime::At(3), None, 255, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 3, - weight: call_weight + Weight::from_ref_time(1) - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: call_weight + Weight::from_ref_time(2), + }); // Anon Periodic assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), Some((1000, 3)), 128, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 42, - weight: call_weight + Weight::from_ref_time(2) - }) - .into(), + Preimage::bound(call).unwrap(), )); + let call = RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: call_weight + Weight::from_ref_time(3), + }); // Anon assert_ok!(Scheduler::do_schedule( DispatchTime::At(2), None, 127, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: call_weight + Weight::from_ref_time(3) - }) - .into(), + Preimage::bound(call).unwrap(), )); // Named Periodic + let call = RuntimeCall::Logger(LoggerCall::log { + i: 2600, + weight: call_weight + Weight::from_ref_time(4), + }); assert_ok!(Scheduler::do_schedule_named( - 2u32.encode(), + [2u8; 32], DispatchTime::At(1), Some((1000, 3)), 126, root(), - RuntimeCall::Logger(LoggerCall::log { - i: 2600, - weight: call_weight + Weight::from_ref_time(4) - }) - .into(), + Preimage::bound(call).unwrap(), )); // Will include the named periodic only - let actual_weight = Scheduler::on_initialize(1); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(4) + - <() as MarginalWeightInfo>::item(true, true, Some(false)) + Scheduler::on_initialize(1), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(4) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic - let actual_weight = Scheduler::on_initialize(2); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(2) + - <() as MarginalWeightInfo>::item(false, false, Some(false)) + + Scheduler::on_initialize(2), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(2) + + ::service_task(None, false, true) + + TestWeightInfo::execute_dispatch_unsigned() + call_weight + Weight::from_ref_time(3) + - <() as MarginalWeightInfo>::item(true, false, Some(false)) + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(2) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only - let actual_weight = Scheduler::on_initialize(3); assert_eq!( - actual_weight, - base_weight + - call_weight + Weight::from_ref_time(1) + - <() as MarginalWeightInfo>::item(false, true, Some(false)) + Scheduler::on_initialize(3), + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_ref_time(1) ); + assert_eq!(IncompleteSince::::get(), None); assert_eq!( logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)] @@ -604,35 +668,33 @@ fn on_initialize_weight_is_correct() { // Will contain none let actual_weight = Scheduler::on_initialize(4); - assert_eq!(actual_weight, base_weight); + assert_eq!( + actual_weight, + TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(0) + ); }); } #[test] fn root_calls_works() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + assert_ok!( + Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 4, None, 127, call,) ); - assert_ok!(Scheduler::schedule_named( - RuntimeOrigin::root(), - 1u32.encode(), - 4, - None, - 127, - call, - )); assert_ok!(Scheduler::schedule(RuntimeOrigin::root(), 4, None, 127, call2)); run_to_block(3); // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), 1u32.encode())); + assert_ok!(Scheduler::cancel_named(RuntimeOrigin::root(), [1u8; 32])); assert_ok!(Scheduler::cancel(RuntimeOrigin::root(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -645,29 +707,30 @@ fn fails_to_schedule_task_in_the_past() { new_test_ext().execute_with(|| { run_to_block(3); - let call1 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call3 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); - assert_err!( - Scheduler::schedule_named(RuntimeOrigin::root(), 1u32.encode(), 2, None, 127, call1), + let call1 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + let call3 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); + + assert_noop!( + Scheduler::schedule_named(RuntimeOrigin::root(), [1u8; 32], 2, None, 127, call1), Error::::TargetBlockNumberInPast, ); - assert_err!( + assert_noop!( Scheduler::schedule(RuntimeOrigin::root(), 2, None, 127, call2), Error::::TargetBlockNumberInPast, ); - assert_err!( + assert_noop!( Scheduler::schedule(RuntimeOrigin::root(), 3, None, 127, call3), Error::::TargetBlockNumberInPast, ); @@ -675,19 +738,19 @@ fn fails_to_schedule_task_in_the_past() { } #[test] -fn should_use_orign() { +fn should_use_origin() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -698,7 +761,7 @@ fn should_use_orign() { // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), 1u32.encode())); + assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), [1u8; 32])); assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -707,20 +770,20 @@ fn should_use_orign() { } #[test] -fn should_check_orign() { +fn should_check_origin() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 69, weight: Weight::from_ref_time(1000) }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_noop!( Scheduler::schedule_named( system::RawOrigin::Signed(2).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -736,25 +799,19 @@ fn should_check_orign() { } #[test] -fn should_check_orign_for_cancel() { +fn should_check_origin_for_cancel() { new_test_ext().execute_with(|| { - let call = Box::new( - RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 69, - weight: Weight::from_ref_time(1000), - }) - .into(), - ); - let call2 = Box::new( - RuntimeCall::Logger(LoggerCall::log_without_filter { - i: 42, - weight: Weight::from_ref_time(1000), - }) - .into(), - ); + let call = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { + i: 69, + weight: Weight::from_ref_time(10), + })); + let call2 = Box::new(RuntimeCall::Logger(LoggerCall::log_without_filter { + i: 42, + weight: Weight::from_ref_time(10), + })); assert_ok!(Scheduler::schedule_named( system::RawOrigin::Signed(1).into(), - 1u32.encode(), + [1u8; 32], 4, None, 127, @@ -766,14 +823,11 @@ fn should_check_orign_for_cancel() { assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), + Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), [1u8; 32]), BadOrigin ); assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); - assert_noop!( - Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), - BadOrigin - ); + assert_noop!(Scheduler::cancel_named(system::RawOrigin::Root.into(), [1u8; 32]), BadOrigin); assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); run_to_block(5); assert_eq!( @@ -787,7 +841,7 @@ fn should_check_orign_for_cancel() { } #[test] -fn migration_to_v3_works() { +fn migration_to_v4_works() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); @@ -807,7 +861,7 @@ fn migration_to_v3_works() { priority: 123, call: RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000), + weight: Weight::from_ref_time(10), }), maybe_periodic: Some((456u64, 10)), }), @@ -815,103 +869,109 @@ fn migration_to_v3_works() { frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } - Scheduler::migrate_v1_to_v3(); - - assert_eq_uvec!( - Agenda::::iter().collect::>(), - vec![ - ( - 0, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 10, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 1, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 11, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ), - ( - 2, - vec![ - Some(ScheduledV3Of:: { - maybe_id: None, - priority: 12, - call: RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_ref_time(100) - }) - .into(), - maybe_periodic: None, - origin: root(), - _phantom: PhantomData::::default(), - }), - None, - Some(ScheduledV3Of:: { - maybe_id: Some(b"test".to_vec()), - priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), - maybe_periodic: Some((456u64, 10)), - origin: root(), - _phantom: PhantomData::::default(), - }), - ] - ) - ] - ); + Scheduler::migrate_v1_to_v4(); + + let mut x = Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(); + x.sort_by_key(|x| x.0); + let expected = vec![ + ( + 0, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 10, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ( + 1, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 11, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ( + 2, + vec![ + Some(ScheduledOf:: { + maybe_id: None, + priority: 12, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_ref_time(100), + })) + .unwrap(), + maybe_periodic: None, + origin: root(), + _phantom: PhantomData::::default(), + }), + None, + Some(ScheduledOf:: { + maybe_id: Some(blake2_256(&b"test"[..])), + priority: 123, + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_ref_time(10), + })) + .unwrap(), + maybe_periodic: Some((456u64, 10)), + origin: root(), + _phantom: PhantomData::::default(), + }), + ], + ), + ]; + for (i, j) in x.iter().zip(expected.iter()) { + assert_eq!(i.0, j.0); + for (x, y) in i.1.iter().zip(j.1.iter()) { + assert_eq!(x, y); + } + } + assert_eq_uvec!(x, expected); assert_eq!(Scheduler::current_storage_version(), 3); }); @@ -922,29 +982,29 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec, u64, u32, u64>>> = vec![ + let old: Vec, u64, u32, u64>>> = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100), - }) - .into(), + })) + .unwrap(), origin: 3u32, maybe_periodic: None, _phantom: Default::default(), }), None, Some(Scheduled { - maybe_id: Some(b"test".to_vec()), + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, origin: 2u32, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000), - }) - .into(), + weight: Weight::from_ref_time(10), + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), }), @@ -965,32 +1025,32 @@ fn test_migrate_origin() { Scheduler::migrate_origin::(); assert_eq_uvec!( - Agenda::::iter().collect::>(), + Agenda::::iter().map(|x| (x.0, x.1.into_inner())).collect::>(), vec![ ( 0, vec![ - Some(ScheduledV2::, u64, OriginCaller, u64> { + Some(ScheduledOf:: { maybe_id: None, priority: 10, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1000,27 +1060,27 @@ fn test_migrate_origin() { ( 1, vec![ - Some(ScheduledV2 { + Some(Scheduled { maybe_id: None, priority: 11, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1030,27 +1090,27 @@ fn test_migrate_origin() { ( 2, vec![ - Some(ScheduledV2 { + Some(Scheduled { maybe_id: None, priority: 12, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 96, weight: Weight::from_ref_time(100) - }) - .into(), + })) + .unwrap(), maybe_periodic: None, origin: system::RawOrigin::Root.into(), _phantom: PhantomData::::default(), }), None, - Some(ScheduledV2 { - maybe_id: Some(b"test".to_vec()), + Some(Scheduled { + maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - call: RuntimeCall::Logger(LoggerCall::log { + call: Preimage::bound(RuntimeCall::Logger(LoggerCall::log { i: 69, - weight: Weight::from_ref_time(1000) - }) - .into(), + weight: Weight::from_ref_time(10) + })) + .unwrap(), maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), _phantom: PhantomData::::default(), @@ -1061,3 +1121,649 @@ fn test_migrate_origin() { ); }); } + +#[test] +fn postponed_named_task_cannot_be_rescheduled() { + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(1000) }); + let hash = ::Hashing::hash_of(&call); + let len = call.using_encoded(|x| x.len()) as u32; + let hashed = Preimage::pick(hash, len); + let name: [u8; 32] = hash.as_ref().try_into().unwrap(); + + let address = Scheduler::do_schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + hashed.clone(), + ) + .unwrap(); + assert!(Preimage::is_requested(&hash)); + assert!(Lookup::::contains_key(name)); + + // Run to a very large block. + run_to_block(10); + // It was not executed. + assert!(logger::log().is_empty()); + assert!(Preimage::is_requested(&hash)); + // Postponing removes the lookup. + assert!(!Lookup::::contains_key(name)); + + // The agenda still contains the call. + let agenda = Agenda::::iter().collect::>(); + assert_eq!(agenda.len(), 1); + assert_eq!( + agenda[0].1, + vec![Some(Scheduled { + maybe_id: Some(name), + priority: 127, + call: hashed, + maybe_periodic: None, + origin: root().into(), + _phantom: Default::default(), + })] + ); + + // Finally add the preimage. + assert_ok!(Preimage::note(call.encode().into())); + run_to_block(1000); + // It did not execute. + assert!(logger::log().is_empty()); + assert!(Preimage::is_requested(&hash)); + + // Manually re-schedule the call by name does not work. + assert_err!( + Scheduler::do_reschedule_named(name, DispatchTime::At(1001)), + Error::::NotFound + ); + // Manually re-scheduling the call by address errors. + assert_err!( + Scheduler::do_reschedule(address, DispatchTime::At(1001)), + Error::::Named + ); + }); +} + +/// Using the scheduler as `v3::Anon` works. +#[test] +fn scheduler_v3_anon_basic_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule a call. + let _address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + // Executes in block 4. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // ... but not again. + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +#[test] +fn scheduler_v3_anon_cancel_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(address)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_err!(>::cancel(address), DispatchError::Unavailable); + }); +} + +#[test] +fn scheduler_v3_anon_reschedule_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Cannot re-schedule into the same block. + assert_noop!( + >::reschedule(address, DispatchTime::At(4)), + Error::::RescheduleNoChange + ); + // Cannot re-schedule into the past. + assert_noop!( + >::reschedule(address, DispatchTime::At(3)), + Error::::TargetBlockNumberInPast + ); + // Re-schedule to block 5. + assert_ok!(>::reschedule(address, DispatchTime::At(5))); + // Scheduled for block 5. + run_to_block(4); + assert!(logger::log().is_empty()); + run_to_block(5); + // Does execute in block 5. + assert_eq!(logger::log(), vec![(root(), 42)]); + // Cannot re-schedule executed task. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + DispatchError::Unavailable + ); + }); +} + +/// Cancelling a call and then scheduling a second call for the same +/// block results in different addresses. +#[test] +fn scheduler_v3_anon_schedule_does_not_resuse_addr() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule both calls. + let addr_1 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call.clone()).unwrap(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(addr_1)); + let addr_2 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + // Should not re-use the address. + assert!(addr_1 != addr_2); + }); +} + +#[test] +fn scheduler_v3_anon_next_schedule_time_works() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(address), Ok(4)); + // Block 4 executes it. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42)]); + + // It has no dispatch time anymore. + assert_noop!( + >::next_dispatch_time(address), + DispatchError::Unavailable + ); + }); +} + +/// Re-scheduling a task changes its next dispatch time. +#[test] +fn scheduler_v3_anon_reschedule_and_next_schedule_time_work() { + use frame_support::traits::schedule::v3::Anon; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule a call. + let old_address = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(old_address), Ok(4)); + // Re-schedule to block 5. + let address = + >::reschedule(old_address, DispatchTime::At(5)).unwrap(); + assert!(address != old_address); + // Scheduled for block 5. + assert_eq!(>::next_dispatch_time(address), Ok(5)); + + // Block 4 does nothing. + run_to_block(4); + assert!(logger::log().is_empty()); + // Block 5 executes it. + run_to_block(5); + assert_eq!(logger::log(), vec![(root(), 42)]); + }); +} + +#[test] +fn scheduler_v3_anon_schedule_agenda_overflows() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + } + + // One more time and it errors. + assert_noop!( + >::schedule(DispatchTime::At(4), None, 127, root(), bound,), + DispatchError::Exhausted + ); + + run_to_block(4); + // All scheduled calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Cancelling and scheduling does not overflow the agenda but fills holes. +#[test] +fn scheduler_v3_anon_cancel_and_schedule_fills_holes() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 3, "This test only makes sense for MaxScheduledPerBlock > 3"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let mut addrs = Vec::<_>::default(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + addrs.push( + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(), + ); + } + // Cancel three of them. + for addr in addrs.into_iter().take(3) { + >::cancel(addr).unwrap(); + } + // Schedule three new ones. + for i in 0..3 { + let (_block, index) = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + assert_eq!(i, index); + } + + run_to_block(4); + // Maximum number of calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Re-scheduling does not overflow the agenda but fills holes. +#[test] +fn scheduler_v3_anon_reschedule_fills_holes() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 3, "pre-condition: This test only makes sense for MaxScheduledPerBlock > 3"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let mut addrs = Vec::<_>::default(); + + // Schedule the maximal number allowed per block. + for _ in 0..max { + addrs.push( + >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(), + ); + } + let mut new_addrs = Vec::<_>::default(); + // Reversed last three elements of block 4. + let last_three = addrs.into_iter().rev().take(3).collect::>(); + // Re-schedule three of them to block 5. + for addr in last_three.iter().cloned() { + new_addrs + .push(>::reschedule(addr, DispatchTime::At(5)).unwrap()); + } + // Re-scheduling them back into block 3 should result in the same addrs. + for (old, want) in new_addrs.into_iter().zip(last_three.into_iter().rev()) { + let new = >::reschedule(old, DispatchTime::At(4)).unwrap(); + assert_eq!(new, want); + } + + run_to_block(4); + // Maximum number of calls are executed. + assert_eq!(logger::log().len() as u32, max); + }); +} + +/// Re-scheduling into the same block produces a different address +/// if there is still space in the agenda. +#[test] +fn scheduler_v3_anon_reschedule_does_not_resuse_addr_if_agenda_not_full() { + use frame_support::traits::schedule::v3::Anon; + let max: u32 = ::MaxScheduledPerBlock::get(); + assert!(max > 1, "This test only makes sense for MaxScheduledPerBlock > 1"); + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + + // Schedule both calls. + let addr_1 = >::schedule( + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call.clone()).unwrap(), + ) + .unwrap(); + // Cancel the call. + assert_ok!(>::cancel(addr_1)); + let addr_2 = >::schedule( + DispatchTime::At(5), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + // Re-schedule `call` to block 4. + let addr_3 = >::reschedule(addr_2, DispatchTime::At(4)).unwrap(); + + // Should not re-use the address. + assert!(addr_1 != addr_3); + }); +} + +/// The scheduler can be used as `v3::Named` trait. +#[test] +fn scheduler_v3_named_basic_works() { + use frame_support::traits::schedule::v3::Named; + + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let name = [1u8; 32]; + + // Schedule a call. + let _address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + // Executes in block 4. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + // ... but not again. + run_to_block(100); + assert_eq!(logger::log(), vec![(root(), 42u32)]); + }); +} + +/// A named task can be cancelled by its name. +#[test] +fn scheduler_v3_named_cancel_named_works() { + use frame_support::traits::schedule::v3::Named; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call by name. + assert_ok!(>::cancel_named(name)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_noop!(>::cancel_named(name), DispatchError::Unavailable); + }); +} + +/// A named task can also be cancelled by its address. +#[test] +fn scheduler_v3_named_cancel_without_name_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + // Cancel the call by address. + assert_ok!(>::cancel(address)); + // It did not get executed. + run_to_block(100); + assert!(logger::log().is_empty()); + // Cannot cancel again. + assert_err!(>::cancel(address), DispatchError::Unavailable); + }); +} + +/// A named task can be re-scheduled by its name but not by its address. +#[test] +fn scheduler_v3_named_reschedule_named_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + Preimage::bound(call).unwrap(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Cannot re-schedule by address. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + Error::::Named, + ); + // Cannot re-schedule into the same block. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(4)), + Error::::RescheduleNoChange + ); + // Cannot re-schedule into the past. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(3)), + Error::::TargetBlockNumberInPast + ); + // Re-schedule to block 5. + assert_ok!(>::reschedule_named(name, DispatchTime::At(5))); + // Scheduled for block 5. + run_to_block(4); + assert!(logger::log().is_empty()); + run_to_block(5); + // Does execute in block 5. + assert_eq!(logger::log(), vec![(root(), 42)]); + // Cannot re-schedule executed task. + assert_noop!( + >::reschedule_named(name, DispatchTime::At(10)), + DispatchError::Unavailable + ); + // Also not by address. + assert_noop!( + >::reschedule(address, DispatchTime::At(10)), + DispatchError::Unavailable + ); + }); +} + +#[test] +fn scheduler_v3_named_next_schedule_time_works() { + use frame_support::traits::schedule::v3::{Anon, Named}; + new_test_ext().execute_with(|| { + let call = + RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_ref_time(10) }); + let bound = Preimage::bound(call).unwrap(); + let name = [1u8; 32]; + + // Schedule a call. + let address = >::schedule_named( + name, + DispatchTime::At(4), + None, + 127, + root(), + bound.clone(), + ) + .unwrap(); + + run_to_block(3); + // Did not execute till block 3. + assert!(logger::log().is_empty()); + + // Scheduled for block 4. + assert_eq!(>::next_dispatch_time(name), Ok(4)); + // Also works by address. + assert_eq!(>::next_dispatch_time(address), Ok(4)); + // Block 4 executes it. + run_to_block(4); + assert_eq!(logger::log(), vec![(root(), 42)]); + + // It has no dispatch time anymore. + assert_noop!( + >::next_dispatch_time(name), + DispatchError::Unavailable + ); + // Also not by address. + assert_noop!( + >::next_dispatch_time(address), + DispatchError::Unavailable + ); + }); +} diff --git a/frame/scheduler/src/weights.rs b/frame/scheduler/src/weights.rs index afbcf9373b2de..cb72fe3e2fdda 100644 --- a/frame/scheduler/src/weights.rs +++ b/frame/scheduler/src/weights.rs @@ -18,22 +18,24 @@ //! Autogenerated weights for pallet_scheduler //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2022-05-23, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2022-10-03, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/substrate +// /home/benchbot/cargo_target_dir/production/substrate // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_scheduler // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --template=./.maintain/frame-weight-template.hbs +// --heap-pages=4096 +// --pallet=pallet_scheduler +// --chain=dev // --output=./frame/scheduler/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -44,16 +46,14 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_scheduler. pub trait WeightInfo { - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight; - fn on_initialize_named_resolved(s: u32, ) -> Weight; - fn on_initialize_periodic_resolved(s: u32, ) -> Weight; - fn on_initialize_resolved(s: u32, ) -> Weight; - fn on_initialize_named_aborted(s: u32, ) -> Weight; - fn on_initialize_aborted(s: u32, ) -> Weight; - fn on_initialize_periodic_named(s: u32, ) -> Weight; - fn on_initialize_periodic(s: u32, ) -> Weight; - fn on_initialize_named(s: u32, ) -> Weight; - fn on_initialize(s: u32, ) -> Weight; + fn service_agendas_base() -> Weight; + fn service_agenda_base(s: u32, ) -> Weight; + fn service_task_base() -> Weight; + fn service_task_fetched(s: u32, ) -> Weight; + fn service_task_named() -> Weight; + fn service_task_periodic() -> Weight; + fn execute_dispatch_signed() -> Weight; + fn execute_dispatch_unsigned() -> Weight; fn schedule(s: u32, ) -> Weight; fn cancel(s: u32, ) -> Weight; fn schedule_named(s: u32, ) -> Weight; @@ -63,149 +63,84 @@ pub trait WeightInfo { /// Weights for pallet_scheduler using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + // Storage: Scheduler IncompleteSince (r:1 w:1) + fn service_agendas_base() -> Weight { + Weight::from_ref_time(4_992_000 as u64) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[0, 512]`. + fn service_agenda_base(s: u32, ) -> Weight { + Weight::from_ref_time(4_320_000 as u64) + // Standard Error: 619 + .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((3 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((3 as u64).saturating_mul(s as u64))) + fn service_task_base() -> Weight { + Weight::from_ref_time(10_864_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(2 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + Weight::from_ref_time(24_586_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(T::DbWeight::get().writes(2 as u64)) } - // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn service_task_named() -> Weight { + Weight::from_ref_time(13_127_000 as u64) .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn service_task_periodic() -> Weight { + Weight::from_ref_time(11_053_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) - .saturating_add(T::DbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(4_158_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) - .saturating_add(T::DbWeight::get().reads(1 as u64)) - .saturating_add(T::DbWeight::get().writes(1 as u64)) + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(4_104_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(20_074_000 as u64) + // Standard Error: 765 + .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(21_509_000 as u64) + // Standard Error: 708 + .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(1 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_427_000 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_875_000 as u64) + // Standard Error: 693 + .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) .saturating_add(T::DbWeight::get().reads(2 as u64)) .saturating_add(T::DbWeight::get().writes(2 as u64)) } @@ -213,149 +148,84 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(9_994_000 as u64) - // Standard Error: 20_000 - .saturating_add(Weight::from_ref_time(19_843_000 as u64).saturating_mul(s as u64)) + // Storage: Scheduler IncompleteSince (r:1 w:1) + fn service_agendas_base() -> Weight { + Weight::from_ref_time(4_992_000 as u64) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((4 as u64).saturating_mul(s as u64))) } // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(10_318_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(15_451_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[0, 512]`. + fn service_agenda_base(s: u32, ) -> Weight { + Weight::from_ref_time(4_320_000 as u64) + // Standard Error: 619 + .saturating_add(Weight::from_ref_time(336_713 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:1) - // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_periodic_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_675_000 as u64) - // Standard Error: 17_000 - .saturating_add(Weight::from_ref_time(17_019_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((3 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((3 as u64).saturating_mul(s as u64))) + fn service_task_base() -> Weight { + Weight::from_ref_time(10_864_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) // Storage: Preimage PreimageFor (r:1 w:1) // Storage: Preimage StatusFor (r:1 w:1) - fn on_initialize_resolved(s: u32, ) -> Weight { - Weight::from_ref_time(11_934_000 as u64) - // Standard Error: 11_000 - .saturating_add(Weight::from_ref_time(14_134_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((2 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(7_279_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(5_388_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(2 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) - } - // Storage: Scheduler Agenda (r:2 w:2) - // Storage: Preimage PreimageFor (r:1 w:0) - fn on_initialize_aborted(s: u32, ) -> Weight { - Weight::from_ref_time(8_619_000 as u64) - // Standard Error: 4_000 - .saturating_add(Weight::from_ref_time(2_969_000 as u64).saturating_mul(s as u64)) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + Weight::from_ref_time(24_586_000 as u64) + // Standard Error: 1 + .saturating_add(Weight::from_ref_time(1_138 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } - // Storage: Scheduler Agenda (r:2 w:2) // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_periodic_named(s: u32, ) -> Weight { - Weight::from_ref_time(16_129_000 as u64) - // Standard Error: 7_000 - .saturating_add(Weight::from_ref_time(9_772_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) + fn service_task_named() -> Weight { + Weight::from_ref_time(13_127_000 as u64) .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((2 as u64).saturating_mul(s as u64))) } - // Storage: Scheduler Agenda (r:2 w:2) - fn on_initialize_periodic(s: u32, ) -> Weight { - Weight::from_ref_time(15_785_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(7_208_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().reads((1 as u64).saturating_mul(s as u64))) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn service_task_periodic() -> Weight { + Weight::from_ref_time(11_053_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - // Storage: Scheduler Lookup (r:0 w:1) - fn on_initialize_named(s: u32, ) -> Weight { - Weight::from_ref_time(15_778_000 as u64) - // Standard Error: 3_000 - .saturating_add(Weight::from_ref_time(5_597_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) - .saturating_add(RocksDbWeight::get().writes((1 as u64).saturating_mul(s as u64))) + fn execute_dispatch_signed() -> Weight { + Weight::from_ref_time(4_158_000 as u64) } - // Storage: Scheduler Agenda (r:1 w:1) - fn on_initialize(s: u32, ) -> Weight { - Weight::from_ref_time(15_912_000 as u64) - // Standard Error: 5_000 - .saturating_add(Weight::from_ref_time(4_530_000 as u64).saturating_mul(s as u64)) - .saturating_add(RocksDbWeight::get().reads(1 as u64)) - .saturating_add(RocksDbWeight::get().writes(1 as u64)) + fn execute_dispatch_unsigned() -> Weight { + Weight::from_ref_time(4_104_000 as u64) } // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule(s: u32, ) -> Weight { - Weight::from_ref_time(18_013_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(87_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(20_074_000 as u64) + // Standard Error: 765 + .saturating_add(Weight::from_ref_time(343_285 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(1 as u64)) } // Storage: Scheduler Agenda (r:1 w:1) // Storage: Scheduler Lookup (r:0 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel(s: u32, ) -> Weight { - Weight::from_ref_time(18_131_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(21_509_000 as u64) + // Standard Error: 708 + .saturating_add(Weight::from_ref_time(323_013 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(1 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[0, 511]`. fn schedule_named(s: u32, ) -> Weight { - Weight::from_ref_time(21_230_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(98_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_427_000 as u64) + // Standard Error: 850 + .saturating_add(Weight::from_ref_time(357_265 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) + /// The range of component `s` is `[1, 512]`. fn cancel_named(s: u32, ) -> Weight { - Weight::from_ref_time(20_139_000 as u64) - // Standard Error: 1_000 - .saturating_add(Weight::from_ref_time(595_000 as u64).saturating_mul(s as u64)) + Weight::from_ref_time(22_875_000 as u64) + // Standard Error: 693 + .saturating_add(Weight::from_ref_time(336_643 as u64).saturating_mul(s as u64)) .saturating_add(RocksDbWeight::get().reads(2 as u64)) .saturating_add(RocksDbWeight::get().writes(2 as u64)) } diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index acbcb65a3e986..1551d85ea4c96 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -169,6 +169,10 @@ pub fn expand_outer_origin( &self.caller } + fn into_caller(self) -> Self::PalletsOrigin { + self.caller + } + fn try_with_caller( mut self, f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -190,13 +194,6 @@ pub fn expand_outer_origin( fn signed(by: Self::AccountId) -> Self { #system_path::RawOrigin::Signed(by).into() } - - fn as_signed(self) -> Option { - match self.caller { - OriginCaller::system(#system_path::RawOrigin::Signed(by)) => Some(by), - _ => None, - } - } } #[derive( @@ -215,7 +212,6 @@ pub fn expand_outer_origin( // For backwards compatibility and ease of accessing these functions. #[allow(dead_code)] impl RuntimeOrigin { - #[doc = #doc_string_none_origin] pub fn none() -> Self { ::none() @@ -238,6 +234,21 @@ pub fn expand_outer_origin( } } + impl #scrate::traits::CallerTrait<<#runtime as #system_path::Config>::AccountId> for OriginCaller { + fn into_system(self) -> Option<#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { + match self { + OriginCaller::system(x) => Some(x), + _ => None, + } + } + fn as_system_ref(&self) -> Option<&#system_path::RawOrigin<<#runtime as #system_path::Config>::AccountId>> { + match &self { + OriginCaller::system(o) => Some(o), + _ => None, + } + } + } + impl TryFrom for #system_path::Origin<#runtime> { type Error = OriginCaller; fn try_from(x: OriginCaller) diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index db2bc90658ee2..d497a672e2970 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -3181,8 +3181,8 @@ mod tests { dispatch::{DispatchClass, DispatchInfo, Pays}, metadata::*, traits::{ - CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, - OnRuntimeUpgrade, PalletInfo, + CallerTrait, CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, + OnInitialize, OnRuntimeUpgrade, PalletInfo, }, }; use sp_weights::RuntimeDbWeight; @@ -3300,6 +3300,16 @@ mod tests { } } + impl CallerTrait<::AccountId> for OuterOrigin { + fn into_system(self) -> Option::AccountId>> { + unimplemented!("Not required in tests!") + } + + fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { + unimplemented!("Not required in tests!") + } + } + impl crate::traits::OriginTrait for OuterOrigin { type Call = ::RuntimeCall; type PalletsOrigin = OuterOrigin; @@ -3325,6 +3335,10 @@ mod tests { unimplemented!("Not required in tests!") } + fn into_caller(self) -> Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -3344,6 +3358,9 @@ mod tests { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } + fn as_system_ref(&self) -> Option<&RawOrigin> { + unimplemented!("Not required in tests!") + } } impl system::Config for TraitImpl { diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index d51c32649a797..302d3354dae5e 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -58,10 +58,11 @@ pub use misc::{ Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveSaturating, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PreimageProvider, - PreimageRecipient, PrivilegeCmp, SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, - WrapperKeepOpaque, WrapperOpaque, + IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, + SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, WrapperKeepOpaque, WrapperOpaque, }; +#[allow(deprecated)] +pub use misc::{PreimageProvider, PreimageRecipient}; #[doc(hidden)] pub use misc::{DEFENSIVE_OP_INTERNAL_ERROR, DEFENSIVE_OP_PUBLIC_ERROR}; @@ -96,8 +97,9 @@ mod dispatch; #[allow(deprecated)] pub use dispatch::EnsureOneOf; pub use dispatch::{ - AsEnsureOriginWithArg, EitherOf, EitherOfDiverse, EnsureOrigin, EnsureOriginWithArg, - MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, UnfilteredDispatchable, + AsEnsureOriginWithArg, CallerTrait, EitherOf, EitherOfDiverse, EnsureOrigin, + EnsureOriginWithArg, MapSuccess, NeverEnsureOrigin, OriginTrait, TryMapSuccess, + UnfilteredDispatchable, }; mod voting; @@ -106,6 +108,9 @@ pub use voting::{ U128CurrencyToVote, VoteTally, }; +mod preimages; +pub use preimages::{Bounded, BoundedInline, FetchResult, Hash, QueryPreimage, StorePreimage}; + #[cfg(feature = "try-runtime")] mod try_runtime; #[cfg(feature = "try-runtime")] diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index c0e7e32a5529e..b96cfae4500e2 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -236,17 +236,25 @@ pub trait UnfilteredDispatchable { fn dispatch_bypass_filter(self, origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo; } +/// The trait implemented by the overarching enumeration of the different pallets' origins. +/// Unlike `OriginTrait` impls, this does not include any kind of dispatch/call filter. Also, this +/// trait is more flexible in terms of how it can be used: it is a `Parameter` and `Member`, so it +/// can be used as dispatchable parameters as well as in storage items. +pub trait CallerTrait: Parameter + Member + From> { + /// Extract the signer from the message if it is a `Signed` origin. + fn into_system(self) -> Option>; + + /// Extract a reference to the system-level `RawOrigin` if it is that. + fn as_system_ref(&self) -> Option<&RawOrigin>; +} + /// Methods available on `frame_system::Config::RuntimeOrigin`. pub trait OriginTrait: Sized { /// Runtime call type, as in `frame_system::Config::Call` type Call; /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin: Parameter - + Member - + Into - + From> - + MaxEncodedLen; + type PalletsOrigin: Into + CallerTrait + MaxEncodedLen; /// The AccountId used across the system. type AccountId; @@ -266,9 +274,12 @@ pub trait OriginTrait: Sized { /// For root origin caller, the filters are bypassed and true is returned. fn filter_call(&self, call: &Self::Call) -> bool; - /// Get the caller. + /// Get a reference to the caller (`CallerTrait` impl). fn caller(&self) -> &Self::PalletsOrigin; + /// Consume `self` and return the caller. + fn into_caller(self) -> Self::PalletsOrigin; + /// Do something with the caller, consuming self but returning it if the caller was unused. fn try_with_caller( self, @@ -285,7 +296,20 @@ pub trait OriginTrait: Sized { fn signed(by: Self::AccountId) -> Self; /// Extract the signer from the message if it is a `Signed` origin. - fn as_signed(self) -> Option; + fn as_signed(self) -> Option { + self.into_caller().into_system().and_then(|s| { + if let RawOrigin::Signed(who) = s { + Some(who) + } else { + None + } + }) + } + + /// Extract a reference to the sytsem origin, if that's what the caller is. + fn as_system_ref(&self) -> Option<&RawOrigin> { + self.caller().as_system_ref() + } } #[cfg(test)] diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 7fc4a6fb08a5a..5a976478fa7c4 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -932,7 +932,7 @@ pub trait PreimageRecipient: PreimageProvider { /// Maximum size of a preimage. type MaxSize: Get; - /// Store the bytes of a preimage on chain. + /// Store the bytes of a preimage on chain infallible due to the bounded type. fn note_preimage(bytes: crate::BoundedVec); /// Clear a previously noted preimage. This is infallible and should be treated more like a diff --git a/frame/support/src/traits/preimages.rs b/frame/support/src/traits/preimages.rs new file mode 100644 index 0000000000000..594532ba96903 --- /dev/null +++ b/frame/support/src/traits/preimages.rs @@ -0,0 +1,317 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Stuff for dealing with 32-byte hashed preimages. + +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; +use sp_core::{RuntimeDebug, H256}; +use sp_io::hashing::blake2_256; +use sp_runtime::{traits::ConstU32, DispatchError}; +use sp_std::borrow::Cow; + +pub type Hash = H256; +pub type BoundedInline = crate::BoundedVec>; + +#[derive( + Encode, Decode, MaxEncodedLen, Clone, Eq, PartialEq, scale_info::TypeInfo, RuntimeDebug, +)] +#[codec(mel_bound())] +pub enum Bounded { + /// A Blake2 256 hash with no preimage length. We + /// do not support creation of this except for transitioning from legacy state. + /// In the future we will make this a pure `Dummy` item storing only the final `dummy` field. + Legacy { hash: Hash, dummy: sp_std::marker::PhantomData }, + /// A an bounded `Call`. Its encoding must be at most 128 bytes. + Inline(BoundedInline), + /// A Blake2-256 hash of the call together with an upper limit for its size. + Lookup { hash: Hash, len: u32 }, +} + +impl Bounded { + /// Casts the wrapped type into something that encodes alike. + /// + /// # Examples + /// ``` + /// use frame_support::traits::Bounded; + /// + /// // Transmute from `String` to `&str`. + /// let x: Bounded = Bounded::Inline(Default::default()); + /// let _: Bounded<&str> = x.transmute(); + /// ``` + pub fn transmute(self) -> Bounded + where + T: Encode + EncodeLike, + { + use Bounded::*; + match self { + Legacy { hash, .. } => Legacy { hash, dummy: sp_std::marker::PhantomData }, + Inline(x) => Inline(x), + Lookup { hash, len } => Lookup { hash, len }, + } + } + + /// Returns the hash of the preimage. + /// + /// The hash is re-calculated every time if the preimage is inlined. + pub fn hash(&self) -> H256 { + use Bounded::*; + match self { + Legacy { hash, .. } => *hash, + Inline(x) => blake2_256(x.as_ref()).into(), + Lookup { hash, .. } => *hash, + } + } +} + +// The maximum we expect a single legacy hash lookup to be. +const MAX_LEGACY_LEN: u32 = 1_000_000; + +impl Bounded { + /// Returns the length of the preimage or `None` if the length is unknown. + pub fn len(&self) -> Option { + match self { + Self::Legacy { .. } => None, + Self::Inline(i) => Some(i.len() as u32), + Self::Lookup { len, .. } => Some(*len), + } + } + + /// Returns whether the image will require a lookup to be peeked. + pub fn lookup_needed(&self) -> bool { + match self { + Self::Inline(..) => false, + Self::Legacy { .. } | Self::Lookup { .. } => true, + } + } + + /// The maximum length of the lookup that is needed to peek `Self`. + pub fn lookup_len(&self) -> Option { + match self { + Self::Inline(..) => None, + Self::Legacy { .. } => Some(MAX_LEGACY_LEN), + Self::Lookup { len, .. } => Some(*len), + } + } + + /// Constructs a `Lookup` bounded item. + pub fn unrequested(hash: Hash, len: u32) -> Self { + Self::Lookup { hash, len } + } + + /// Constructs a `Legacy` bounded item. + #[deprecated = "This API is only for transitioning to Scheduler v3 API"] + pub fn from_legacy_hash(hash: impl Into) -> Self { + Self::Legacy { hash: hash.into(), dummy: sp_std::marker::PhantomData } + } +} + +pub type FetchResult = Result, DispatchError>; + +/// A interface for looking up preimages from their hash on chain. +pub trait QueryPreimage { + /// Returns whether a preimage exists for a given hash and if so its length. + fn len(hash: &Hash) -> Option; + + /// Returns the preimage for a given hash. If given, `len` must be the size of the preimage. + fn fetch(hash: &Hash, len: Option) -> FetchResult; + + /// Returns whether a preimage request exists for a given hash. + fn is_requested(hash: &Hash) -> bool; + + /// Request that someone report a preimage. Providers use this to optimise the economics for + /// preimage reporting. + fn request(hash: &Hash); + + /// Cancel a previous preimage request. + fn unrequest(hash: &Hash); + + /// Request that the data required for decoding the given `bounded` value is made available. + fn hold(bounded: &Bounded) { + use Bounded::*; + match bounded { + Inline(..) => {}, + Legacy { hash, .. } | Lookup { hash, .. } => Self::request(hash), + } + } + + /// No longer request that the data required for decoding the given `bounded` value is made + /// available. + fn drop(bounded: &Bounded) { + use Bounded::*; + match bounded { + Inline(..) => {}, + Legacy { hash, .. } | Lookup { hash, .. } => Self::unrequest(hash), + } + } + + /// Check to see if all data required for the given `bounded` value is available for its + /// decoding. + fn have(bounded: &Bounded) -> bool { + use Bounded::*; + match bounded { + Inline(..) => true, + Legacy { hash, .. } | Lookup { hash, .. } => Self::len(hash).is_some(), + } + } + + /// Create a `Bounded` instance based on the `hash` and `len` of the encoded value. This may not + /// be `peek`-able or `realize`-able. + fn pick(hash: Hash, len: u32) -> Bounded { + Self::request(&hash); + Bounded::Lookup { hash, len } + } + + /// Convert the given `bounded` instance back into its original instance, also returning the + /// exact size of its encoded form if it needed to be looked-up from a stored preimage). + /// + /// NOTE: This does not remove any data needed for realization. If you will no longer use the + /// `bounded`, call `realize` instead or call `drop` afterwards. + fn peek(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + use Bounded::*; + match bounded { + Inline(data) => T::decode(&mut &data[..]).ok().map(|x| (x, None)), + Lookup { hash, len } => { + let data = Self::fetch(hash, Some(*len))?; + T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) + }, + Legacy { hash, .. } => { + let data = Self::fetch(hash, None)?; + T::decode(&mut &data[..]).ok().map(|x| (x, Some(data.len() as u32))) + }, + } + .ok_or(DispatchError::Corruption) + } + + /// Convert the given `bounded` value back into its original instance. If successful, + /// `drop` any data backing it. This will not break the realisability of independently + /// created instances of `Bounded` which happen to have identical data. + fn realize(bounded: &Bounded) -> Result<(T, Option), DispatchError> { + let r = Self::peek(bounded)?; + Self::drop(bounded); + Ok(r) + } +} + +/// A interface for managing preimages to hashes on chain. +/// +/// Note that this API does not assume any underlying user is calling, and thus +/// does not handle any preimage ownership or fees. Other system level logic that +/// uses this API should implement that on their own side. +pub trait StorePreimage: QueryPreimage { + /// The maximum length of preimage we can store. + /// + /// This is the maximum length of the *encoded* value that can be passed to `bound`. + const MAX_LENGTH: usize; + + /// Request and attempt to store the bytes of a preimage on chain. + /// + /// May return `DispatchError::Exhausted` if the preimage is just too big. + fn note(bytes: Cow<[u8]>) -> Result; + + /// Attempt to clear a previously noted preimage. Exactly the same as `unrequest` but is + /// provided for symmetry. + fn unnote(hash: &Hash) { + Self::unrequest(hash) + } + + /// Convert an otherwise unbounded or large value into a type ready for placing in storage. The + /// result is a type whose `MaxEncodedLen` is 131 bytes. + /// + /// NOTE: Once this API is used, you should use either `drop` or `realize`. + fn bound(t: T) -> Result, DispatchError> { + let data = t.encode(); + let len = data.len() as u32; + Ok(match BoundedInline::try_from(data) { + Ok(bounded) => Bounded::Inline(bounded), + Err(unbounded) => Bounded::Lookup { hash: Self::note(unbounded.into())?, len }, + }) + } +} + +impl QueryPreimage for () { + fn len(_: &Hash) -> Option { + None + } + fn fetch(_: &Hash, _: Option) -> FetchResult { + Err(DispatchError::Unavailable) + } + fn is_requested(_: &Hash) -> bool { + false + } + fn request(_: &Hash) {} + fn unrequest(_: &Hash) {} +} + +impl StorePreimage for () { + const MAX_LENGTH: usize = 0; + fn note(_: Cow<[u8]>) -> Result { + Err(DispatchError::Exhausted) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{bounded_vec, BoundedVec}; + + #[test] + fn bounded_size_is_correct() { + assert_eq!(> as MaxEncodedLen>::max_encoded_len(), 131); + } + + #[test] + fn bounded_basic_works() { + let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; + let len = data.len() as u32; + let hash = blake2_256(&data).into(); + + // Inline works + { + let bound: Bounded> = Bounded::Inline(data.clone()); + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), Some(len)); + assert!(!bound.lookup_needed()); + assert_eq!(bound.lookup_len(), None); + } + // Legacy works + { + let bound: Bounded> = Bounded::Legacy { hash, dummy: Default::default() }; + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), None); + assert!(bound.lookup_needed()); + assert_eq!(bound.lookup_len(), Some(1_000_000)); + } + // Lookup works + { + let bound: Bounded> = Bounded::Lookup { hash, len: data.len() as u32 }; + assert_eq!(bound.hash(), hash); + assert_eq!(bound.len(), Some(len)); + assert!(bound.lookup_needed()); + assert_eq!(bound.lookup_len(), Some(len)); + } + } + + #[test] + fn bounded_transmuting_works() { + let data: BoundedVec = bounded_vec![b'a', b'b', b'c']; + + // Transmute a `String` into a `&str`. + let x: Bounded = Bounded::Inline(data.clone()); + let y: Bounded<&str> = x.transmute(); + assert_eq!(y, Bounded::Inline(data)); + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index 0dbbbd9e2a553..b8e6a7f807904 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -17,6 +17,8 @@ //! Traits and associated utilities for scheduling dispatchables in FRAME. +#[allow(deprecated)] +use super::PreimageProvider; use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime::{traits::Saturating, DispatchError, RuntimeDebug}; @@ -128,6 +130,7 @@ impl MaybeHashed { } } +// TODO: deprecate pub mod v1 { use super::*; @@ -283,6 +286,7 @@ pub mod v1 { } } +// TODO: deprecate pub mod v2 { use super::*; @@ -375,6 +379,97 @@ pub mod v2 { } } -pub use v1::*; +pub mod v3 { + use super::*; + use crate::traits::Bounded; -use super::PreimageProvider; + /// A type that can be used as a scheduler. + pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + Debug + TypeInfo; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Bounded, + ) -> Result; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), DispatchError>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. For periodic tasks, + /// this dispatch is guaranteed to succeed only before the *initial* execution; for + /// others, use `reschedule_named`. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an `Unavailable` error if the `address` is invalid. + fn next_dispatch_time(address: Self::Address) -> Result; + } + + pub type TaskName = [u8; 32]; + + /// A type that can be used as a scheduler. + pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + MaxEncodedLen + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: TaskName, + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Bounded, + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: TaskName) -> Result<(), DispatchError>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + fn reschedule_named( + id: TaskName, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an `Unavailable` error if the `id` is invalid. + fn next_dispatch_time(id: TaskName) -> Result; + } +} + +pub use v1::*; diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index b0716d569409c..b8a9a1128d669 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 160 others + and 161 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 926dc92530659..5032f63bc1b1b 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -69,7 +69,7 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied (A, B, C, D) (A, B, C, D, E) (A, B, C, D, E, F) - and 160 others + and 161 others = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 279 others + and 280 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index 563190a06f76f..8d3d7a71a313e 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -13,5 +13,5 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 77 others + and 78 others = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index c10005223b674..ebf24a1232e3c 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -13,6 +13,6 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6) (TupleElement0, TupleElement1, TupleElement2, TupleElement3, TupleElement4, TupleElement5, TupleElement6, TupleElement7) - and 77 others + and 78 others = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index dc74157da79de..7577d0dc6b158 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -222,7 +222,10 @@ pub mod pallet { + OriginTrait; /// The aggregated `RuntimeCall` type. - type RuntimeCall: Dispatchable + Debug; + type RuntimeCall: Parameter + + Dispatchable + + Debug + + From>; /// Account index (aka nonce) type. This stores the number of previous transactions /// associated with a sender account. diff --git a/frame/whitelist/src/mock.rs b/frame/whitelist/src/mock.rs index 44aea86be6f19..d4446cb8031ab 100644 --- a/frame/whitelist/src/mock.rs +++ b/frame/whitelist/src/mock.rs @@ -96,7 +96,6 @@ impl pallet_preimage::Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; type ManagerOrigin = EnsureRoot; - type MaxSize = ConstU32<{ 4096 * 1024 }>; // PreimageMaxSize Taken from Polkadot as reference. type BaseDeposit = ConstU64<1>; type ByteDeposit = ConstU64<1>; type WeightInfo = (); diff --git a/primitives/core/src/bounded/bounded_vec.rs b/primitives/core/src/bounded/bounded_vec.rs index 85f2bed316793..1832e43e8646c 100644 --- a/primitives/core/src/bounded/bounded_vec.rs +++ b/primitives/core/src/bounded/bounded_vec.rs @@ -276,6 +276,14 @@ impl<'a, T, S> sp_std::iter::IntoIterator for BoundedSlice<'a, T, S> { } } +impl<'a, T, S: Get> BoundedSlice<'a, T, S> { + /// Create an instance from the first elements of the given slice (or all of it if it is smaller + /// than the length bound). + pub fn truncate_from(s: &'a [T]) -> Self { + Self(&s[0..(s.len().min(S::get() as usize))], PhantomData) + } +} + impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; @@ -620,12 +628,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if `index > len`. - pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), T> { if self.len() < Self::bound() { self.0.insert(index, element); Ok(()) } else { - Err(()) + Err(element) } } @@ -635,12 +643,12 @@ impl> BoundedVec { /// # Panics /// /// Panics if the new capacity exceeds isize::MAX bytes. - pub fn try_push(&mut self, element: T) -> Result<(), ()> { + pub fn try_push(&mut self, element: T) -> Result<(), T> { if self.len() < Self::bound() { self.0.push(element); Ok(()) } else { - Err(()) + Err(element) } } } @@ -673,13 +681,13 @@ where } impl> TryFrom> for BoundedVec { - type Error = (); + type Error = Vec; fn try_from(t: Vec) -> Result { if t.len() <= Self::bound() { // explicit check just above Ok(Self::unchecked_from(t)) } else { - Err(()) + Err(t) } } } @@ -886,6 +894,16 @@ pub mod test { use super::*; use crate::{bounded_vec, ConstU32}; + #[test] + fn slice_truncate_from_works() { + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4, 5]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3, 4]); + assert_eq!(bounded.deref(), &[1, 2, 3, 4]); + let bounded = BoundedSlice::>::truncate_from(&[1, 2, 3]); + assert_eq!(bounded.deref(), &[1, 2, 3]); + } + #[test] fn slide_works() { let mut b: BoundedVec> = bounded_vec![0, 1, 2, 3, 4, 5]; diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 8017a6ac529a2..96706dd919650 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -547,6 +547,12 @@ pub enum DispatchError { /// The number of transactional layers has been reached, or we are not in a transactional /// layer. Transactional(TransactionalError), + /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. + Exhausted, + /// The state is corrupt; this is generally not going to fix itself. + Corruption, + /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. + Unavailable, } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -671,18 +677,21 @@ impl From<&'static str> for DispatchError { impl From for &'static str { fn from(err: DispatchError) -> &'static str { + use DispatchError::*; match err { - DispatchError::Other(msg) => msg, - DispatchError::CannotLookup => "Cannot lookup", - DispatchError::BadOrigin => "Bad origin", - DispatchError::Module(ModuleError { message, .. }) => - message.unwrap_or("Unknown module error"), - DispatchError::ConsumerRemaining => "Consumer remaining", - DispatchError::NoProviders => "No providers", - DispatchError::TooManyConsumers => "Too many consumers", - DispatchError::Token(e) => e.into(), - DispatchError::Arithmetic(e) => e.into(), - DispatchError::Transactional(e) => e.into(), + Other(msg) => msg, + CannotLookup => "Cannot lookup", + BadOrigin => "Bad origin", + Module(ModuleError { message, .. }) => message.unwrap_or("Unknown module error"), + ConsumerRemaining => "Consumer remaining", + NoProviders => "No providers", + TooManyConsumers => "Too many consumers", + Token(e) => e.into(), + Arithmetic(e) => e.into(), + Transactional(e) => e.into(), + Exhausted => "Resources exhausted", + Corruption => "State corrupt", + Unavailable => "Resource unavailable", } } } @@ -698,33 +707,37 @@ where impl traits::Printable for DispatchError { fn print(&self) { + use DispatchError::*; "DispatchError".print(); match self { - Self::Other(err) => err.print(), - Self::CannotLookup => "Cannot lookup".print(), - Self::BadOrigin => "Bad origin".print(), - Self::Module(ModuleError { index, error, message }) => { + Other(err) => err.print(), + CannotLookup => "Cannot lookup".print(), + BadOrigin => "Bad origin".print(), + Module(ModuleError { index, error, message }) => { index.print(); error.print(); if let Some(msg) = message { msg.print(); } }, - Self::ConsumerRemaining => "Consumer remaining".print(), - Self::NoProviders => "No providers".print(), - Self::TooManyConsumers => "Too many consumers".print(), - Self::Token(e) => { + ConsumerRemaining => "Consumer remaining".print(), + NoProviders => "No providers".print(), + TooManyConsumers => "Too many consumers".print(), + Token(e) => { "Token error: ".print(); <&'static str>::from(*e).print(); }, - Self::Arithmetic(e) => { + Arithmetic(e) => { "Arithmetic error: ".print(); <&'static str>::from(*e).print(); }, - Self::Transactional(e) => { + Transactional(e) => { "Transactional error: ".print(); <&'static str>::from(*e).print(); }, + Exhausted => "Resources exhausted".print(), + Corruption => "State corrupt".print(), + Unavailable => "Resource unavailable".print(), } } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index a64e3f25ef041..3db0e5510057b 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -37,8 +37,9 @@ use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; use frame_support::{ + dispatch::RawOrigin, parameter_types, - traits::{ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, + traits::{CallerTrait, ConstU32, ConstU64, CrateVersion, KeyOwnerProofSystem}, weights::{RuntimeDbWeight, Weight}, }; use frame_system::limits::{BlockLength, BlockWeights}; @@ -119,7 +120,7 @@ pub fn native_version() -> NativeVersion { } /// Calls in transactions. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct Transfer { pub from: AccountId, pub to: AccountId, @@ -150,7 +151,7 @@ impl Transfer { } /// Extrinsic for test-runtime. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum Extrinsic { AuthoritiesChange(Vec), Transfer { @@ -446,11 +447,22 @@ impl GetRuntimeBlockType for Runtime { #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq, TypeInfo, MaxEncodedLen)] pub struct RuntimeOrigin; -impl From> for RuntimeOrigin { - fn from(_o: frame_system::Origin) -> Self { +impl From::AccountId>> for RuntimeOrigin { + fn from(_: RawOrigin<::AccountId>) -> Self { unimplemented!("Not required in tests!") } } + +impl CallerTrait<::AccountId> for RuntimeOrigin { + fn into_system(self) -> Option::AccountId>> { + unimplemented!("Not required in tests!") + } + + fn as_system_ref(&self) -> Option<&RawOrigin<::AccountId>> { + unimplemented!("Not required in tests!") + } +} + impl From for Result, RuntimeOrigin> { fn from(_origin: RuntimeOrigin) -> Result, RuntimeOrigin> { unimplemented!("Not required in tests!") @@ -482,6 +494,10 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { unimplemented!("Not required in tests!") } + fn into_caller(self) -> Self::PalletsOrigin { + unimplemented!("Not required in tests!") + } + fn try_with_caller( self, _f: impl FnOnce(Self::PalletsOrigin) -> Result, @@ -501,6 +517,9 @@ impl frame_support::traits::OriginTrait for RuntimeOrigin { fn as_signed(self) -> Option { unimplemented!("Not required in tests!") } + fn as_system_ref(&self) -> Option<&RawOrigin> { + unimplemented!("Not required in tests!") + } } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)] @@ -583,6 +602,12 @@ parameter_types! { BlockWeights::with_sensible_defaults(Weight::from_ref_time(4 * 1024 * 1024), Perbill::from_percent(75)); } +impl From> for Extrinsic { + fn from(_: frame_system::Call) -> Self { + unimplemented!("Not required in tests!") + } +} + impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; From fc67cbb66d8c484bc7b7506fc1300344d12ecbad Mon Sep 17 00:00:00 2001 From: Andronik Date: Wed, 5 Oct 2022 23:07:15 +0200 Subject: [PATCH 213/348] update kvdb & co (#12312) * upgrade kvdb & co * remove patch * update Cargo.lock * upgrade impl-serde * fix parsing test * actually fix it * FFS --- Cargo.lock | 89 +++++++++---------------- bin/node/bench/Cargo.toml | 6 +- bin/node/bench/src/tempdb.rs | 13 ++-- bin/node/inspect/src/lib.rs | 4 +- client/db/Cargo.toml | 8 +-- client/db/src/upgrade.rs | 9 ++- client/informant/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/state-db/Cargo.toml | 2 +- client/transaction-pool/Cargo.toml | 2 +- frame/support/Cargo.toml | 2 +- primitives/arithmetic/Cargo.toml | 2 +- primitives/arithmetic/fuzzer/Cargo.toml | 2 +- primitives/core/Cargo.toml | 6 +- primitives/core/src/uint.rs | 3 - primitives/database/Cargo.toml | 2 +- primitives/runtime-interface/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 2 +- primitives/storage/Cargo.toml | 2 +- primitives/test-primitives/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 4 +- primitives/version/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 4 +- utils/frame/benchmarking-cli/Cargo.toml | 4 +- 24 files changed, 73 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 53f370a930626..52b74628f07d3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -551,19 +551,18 @@ checksum = "50ae17cabbc8a38a1e3e4c1a6a664e9a09672dc14d0896fa8d865d3a5a446b07" [[package]] name = "bincode" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" dependencies = [ - "byteorder", "serde", ] [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -778,9 +777,9 @@ dependencies = [ [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -981,7 +980,7 @@ checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" dependencies = [ "glob", "libc", - "libloading 0.7.0", + "libloading", ] [[package]] @@ -2063,9 +2062,9 @@ dependencies = [ [[package]] name = "fixed-hash" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", "rand 0.8.5", @@ -2442,18 +2441,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "fs-swap" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" -dependencies = [ - "lazy_static", - "libc", - "libloading 0.5.2", - "winapi", -] - [[package]] name = "fs2" version = "0.4.3" @@ -3055,9 +3042,9 @@ dependencies = [ [[package]] name = "impl-serde" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ "serde", ] @@ -3449,9 +3436,9 @@ dependencies = [ [[package]] name = "kvdb" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" +checksum = "585089ceadba0197ffe9af6740ab350b325e3c1f5fccfbc3522e0250c750409b" dependencies = [ "parity-util-mem", "smallvec", @@ -3459,9 +3446,9 @@ dependencies = [ [[package]] name = "kvdb-memorydb" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece7e668abd21387aeb6628130a6f4c802787f014fa46bc83221448322250357" +checksum = "40d109c87bfb7759edd2a49b2649c1afe25af785d930ad6a38479b4dc70dd873" dependencies = [ "kvdb", "parity-util-mem", @@ -3470,15 +3457,13 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" -version = "0.15.2" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca7fbdfd71cd663dceb0faf3367a99f8cf724514933e9867cec4995b6027cbc1" +checksum = "c076cc2cdbac89b9910c853a36c957d3862a779f31c2661174222cefb49ee597" dependencies = [ - "fs-swap", "kvdb", "log", "num_cpus", - "owning_ref", "parity-util-mem", "parking_lot 0.12.1", "regex", @@ -3522,16 +3507,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "libloading" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" -dependencies = [ - "cc", - "winapi", -] - [[package]] name = "libloading" version = "0.7.0" @@ -4065,9 +4040,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.6.1+6.28.2" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc587013734dadb7cf23468e531aa120788b87243648be42e2d3a072186291" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", "bzip2-sys", @@ -4364,9 +4339,9 @@ dependencies = [ [[package]] name = "memory-db" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6566c70c1016f525ced45d7b7f97730a2bafb037c788211d0c186ef5b2189f0a" +checksum = "34ac11bb793c28fa095b7554466f53b3a60a2cd002afdac01bcf135cbd73a269" dependencies = [ "hash-db", "hashbrown 0.12.3", @@ -6637,9 +6612,9 @@ checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" [[package]] name = "parity-util-mem" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" +checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ "cfg-if 1.0.0", "hashbrown 0.12.3", @@ -7011,9 +6986,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.11.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a" dependencies = [ "fixed-hash", "impl-codec", @@ -7637,9 +7612,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620f4129485ff1a7128d184bc687470c21c7951b64779ebc9cfdad3dcd920290" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -10938,9 +10913,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.2+5.2.1-patched.2" +version = "0.5.1+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +checksum = "931e876f91fed0827f863a2d153897790da0b24d882c721a79cb3beb0b903261" dependencies = [ "cc", "fs_extra", @@ -11203,9 +11178,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5704f0d6130bd83608e4370c19e20c8a6ec03e80363e493d0234efca005265a" +checksum = "f0dae77b1daad50cd3ed94c506d2dab27e2e47f7b5153a6d4b1992bb3f6028cb" dependencies = [ "criterion", "hash-db", @@ -11792,7 +11767,7 @@ dependencies = [ "enum-iterator", "enumset", "leb128", - "libloading 0.7.0", + "libloading", "loupe", "object 0.28.3", "rkyv", diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml index a9c367ae8aa3d..42953da837100 100644 --- a/bin/node/bench/Cargo.toml +++ b/bin/node/bench/Cargo.toml @@ -23,8 +23,8 @@ sp-state-machine = { version = "0.12.0", path = "../../../primitives/state-machi serde = "1.0.136" serde_json = "1.0.85" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } -kvdb = "0.11.0" -kvdb-rocksdb = "0.15.1" +kvdb = "0.12.0" +kvdb-rocksdb = "0.16.0" sp-trie = { version = "6.0.0", path = "../../../primitives/trie" } sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } @@ -37,7 +37,7 @@ tempfile = "3.1.0" fs_extra = "1" rand = { version = "0.7.2", features = ["small_rng"] } lazy_static = "1.4.0" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parity-db = { version = "0.3" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 22c5980fd6524..eb3bb1d3fccd7 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use kvdb::{DBTransaction, KeyValueDB}; +use kvdb::{DBKeyValue, DBTransaction, KeyValueDB}; use kvdb_rocksdb::{Database, DatabaseConfig}; use std::{io, path::PathBuf, sync::Arc}; @@ -38,7 +38,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Get a value by partial key. Only works for flushed data. - fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> Option> { + fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> io::Result>> { unimplemented!() } @@ -56,7 +56,7 @@ impl KeyValueDB for ParityDbWrapper { } /// Iterate over flushed data for a given column. - fn iter<'a>(&'a self, _col: u32) -> Box, Box<[u8]>)> + 'a> { + fn iter<'a>(&'a self, _col: u32) -> Box> + 'a> { unimplemented!() } @@ -65,12 +65,7 @@ impl KeyValueDB for ParityDbWrapper { &'a self, _col: u32, _prefix: &'a [u8], - ) -> Box, Box<[u8]>)> + 'a> { - unimplemented!() - } - - /// Attempt to replace this database with a new one located at the given path. - fn restore(&self, _new_db: &str) -> io::Result<()> { + ) -> Box> + 'a> { unimplemented!() } } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index b37c5aa7ca2e8..aacae0ff7a0d9 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -296,7 +296,7 @@ mod tests { let b2 = ExtrinsicAddress::from_str("0 0"); let b3 = ExtrinsicAddress::from_str("0x0012345f"); - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); + assert_eq!(e0, Ok(ExtrinsicAddress::Bytes(vec![0x12, 0x34]))); assert_eq!( b0, Ok(ExtrinsicAddress::Block( @@ -305,7 +305,7 @@ mod tests { )) ); assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); + assert_eq!(b2, Ok(ExtrinsicAddress::Bytes(vec![0, 0]))); assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); } } diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 7f564ae642433..b21038b77564f 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -17,9 +17,9 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive", ] } hash-db = "0.15.2" -kvdb = "0.11.0" -kvdb-memorydb = "0.11.0" -kvdb-rocksdb = { version = "0.15.2", optional = true } +kvdb = "0.12.0" +kvdb-memorydb = "0.12.0" +kvdb-rocksdb = { version = "0.16.0", optional = true } linked-hash-map = "0.5.4" log = "0.4.17" parity-db = "0.3.16" @@ -36,7 +36,7 @@ sp-trie = { version = "6.0.0", path = "../../primitives/trie" } [dev-dependencies] criterion = "0.3.3" -kvdb-rocksdb = "0.15.1" +kvdb-rocksdb = "0.16.0" rand = "0.8.4" tempfile = "3.1.0" quickcheck = { version = "1.0.3", default-features = false } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 292905663a20b..51750bf689759 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -115,7 +115,7 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> Upgra /// 2) transactions column is added; fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; + let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } @@ -126,7 +126,10 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr let db = Database::open(&db_cfg, db_path)?; // Get all the keys we need to update - let keys: Vec<_> = db.iter(columns::JUSTIFICATIONS).map(|entry| entry.0).collect(); + let keys: Vec<_> = db + .iter(columns::JUSTIFICATIONS) + .map(|r| r.map(|e| e.0)) + .collect::>()?; // Read and update each entry let mut transaction = db.transaction(); @@ -152,7 +155,7 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> Upgr /// 2) BODY_INDEX column is added; fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> UpgradeResult<()> { let db_cfg = DatabaseConfig::with_columns(V3_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path)?; + let mut db = Database::open(&db_cfg, db_path)?; db.add_column().map_err(Into::into) } diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index b3ac5d892fd58..073199d005fd1 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -17,7 +17,7 @@ ansi_term = "0.12.1" futures = "0.3.21" futures-timer = "3.0.1" log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index e46c65cf018f5..42bd2ae7276e2 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -80,7 +80,7 @@ sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sc-sysinfo = { version = "6.0.0-dev", path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } -parity-util-mem = { version = "0.11.0", default-features = false, features = [ +parity-util-mem = { version = "0.12.0", default-features = false, features = [ "primitive-types", ] } async-trait = "0.1.57" diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 4243968ec79b4..7f9a502aef8e9 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parity-util-mem-derive = "0.1.0" parking_lot = "0.12.1" sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index cd7cb297e8c4a..5e005f5523ae8 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -18,7 +18,7 @@ futures = "0.3.21" futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } parking_lot = "0.12.1" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 98db3aa6aa49d..be9cb1f1bf316 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -44,7 +44,7 @@ serde_json = "1.0.85" assert_matches = "1.3.0" pretty_assertions = "1.2.1" frame-system = { version = "4.0.0-dev", path = "../system" } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } [features] default = ["std"] diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index d7046b3254699..60eac2247e830 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -28,7 +28,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] criterion = "0.3" -primitive-types = "0.11.1" +primitive-types = "0.12.0" sp-core = { version = "6.0.0", features = ["full_crypto"], path = "../core" } rand = "0.7.2" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 990106f990323..b1ffa746b6b63 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] honggfuzz = "0.5.49" num-bigint = "0.2" -primitive-types = "0.11.1" +primitive-types = "0.12.0" sp-arithmetic = { version = "5.0.0", path = ".." } [[bin]] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 65d989a74c030..b7bc6dfdce496 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -21,8 +21,8 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" log = { version = "0.4.17", default-features = false } serde = { version = "1.0.136", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.11.1", default-features = false, features = ["codec", "scale-info"] } -impl-serde = { version = "0.3.0", optional = true } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } +impl-serde = { version = "0.4.0", optional = true } wasmi = { version = "0.13", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } @@ -40,7 +40,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.30", optional = true } diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index f4eb3a19ac36c..4bf914bde2ee1 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -35,7 +35,6 @@ mod tests { ($name::from(2), "0x2"), ($name::from(10), "0xa"), ($name::from(15), "0xf"), - ($name::from(15), "0xf"), ($name::from(16), "0x10"), ($name::from(1_000), "0x3e8"), ($name::from(100_000), "0x186a0"), @@ -52,8 +51,6 @@ mod tests { assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } }; } diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index a3f09536f4f5c..f19a647fed032 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -11,5 +11,5 @@ documentation = "https://docs.rs/sp-database" readme = "README.md" [dependencies] -kvdb = "0.11.0" +kvdb = "0.12.0" parking_lot = "0.12.1" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 51367d40d0cd0..e7f0cee3f140f 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" } sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" } codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" -primitive-types = { version = "0.11.1", default-features = false } +primitive-types = { version = "0.12.0", default-features = false } sp-storage = { version = "6.0.0", default-features = false, path = "../storage" } impl-trait-for-tuples = "0.2.2" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 1d1a7a2c38b1d..01594ed69e312 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -19,7 +19,7 @@ either = { version = "1.5", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } paste = "1.0" rand = { version = "0.7.2", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index b37a4eb4b331d..d04a88d129d34 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.3.1", optional = true } +impl-serde = { version = "0.4.0", optional = true } ref-cast = "1.0.0" serde = { version = "1.0.136", features = ["derive"], optional = true } sp-debug-derive = { version = "4.0.0", default-features = false, path = "../debug-derive" } diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 2a20addf66b2b..28fa6e6213daf 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } serde = { version = "1.0.136", features = ["derive"], optional = true } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 6d2d57b590e6a..2636648f40387 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -24,7 +24,7 @@ hashbrown = { version = "0.12.3", optional = true } hash-db = { version = "0.15.2", default-features = false } lazy_static = { version = "1.4.0", optional = true } lru = { version = "0.7.5", optional = true } -memory-db = { version = "0.29.0", default-features = false } +memory-db = { version = "0.30.0", default-features = false } nohash-hasher = { version = "0.2.0", optional = true } parking_lot = { version = "0.12.1", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } @@ -38,7 +38,7 @@ sp-std = { version = "4.0.0", default-features = false, path = "../std" } [dev-dependencies] array-bytes = "4.1" criterion = "0.3.3" -trie-bench = "0.31.0" +trie-bench = "0.32.0" trie-standardmap = "0.15.2" sp-runtime = { version = "6.0.0", path = "../runtime" } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 1750ebb8cd90b..0dcbbd81fd93f 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -impl-serde = { version = "0.3.1", optional = true } +impl-serde = { version = "0.4.0", optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } serde = { version = "1.0.136", features = ["derive"], optional = true } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 744cc527e6012..698351cd69f64 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -23,7 +23,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } sp-inherents = { version = "4.0.0-dev", default-features = false, path = "../../primitives/inherents" } sp-keyring = { version = "6.0.0", optional = true, path = "../../primitives/keyring" } -memory-db = { version = "0.29.0", default-features = false } +memory-db = { version = "0.30.0", default-features = false } sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../primitives/offchain" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "4.0.0", default-features = false, path = "../../primitives/std" } @@ -42,7 +42,7 @@ sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = sp-trie = { version = "6.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.24.0", default-features = false } -parity-util-mem = { version = "0.11.0", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.12.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.12.0", default-features = false, path = "../../primitives/state-machine" } sp-externalities = { version = "0.12.0", default-features = false, path = "../../primitives/externalities" } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 108d0d338c2b3..8eedcb870a3d0 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -22,11 +22,11 @@ handlebars = "4.2.2" hash-db = "0.15.2" Inflector = "0.11.4" itertools = "0.10.3" -kvdb = "0.11.0" +kvdb = "0.12.0" lazy_static = "1.4.0" linked-hash-map = "0.5.4" log = "0.4.17" -memory-db = "0.29.0" +memory-db = "0.30.0" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" serde = "1.0.136" From f447beec6eefbf452520b90cb0d199eaaf114342 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Thu, 6 Oct 2022 10:11:53 +0200 Subject: [PATCH 214/348] Use `Option` for contract dry-runs (#12429) --- Cargo.lock | 14 +---- Cargo.toml | 1 - bin/node/runtime/Cargo.toml | 2 - bin/node/runtime/src/lib.rs | 15 +++-- frame/contracts/Cargo.toml | 1 + frame/contracts/runtime-api/Cargo.toml | 34 ----------- frame/contracts/runtime-api/README.md | 7 --- frame/contracts/runtime-api/src/lib.rs | 85 -------------------------- frame/contracts/src/lib.rs | 57 ++++++++++++++++- 9 files changed, 65 insertions(+), 151 deletions(-) delete mode 100644 frame/contracts/runtime-api/Cargo.toml delete mode 100644 frame/contracts/runtime-api/README.md delete mode 100644 frame/contracts/runtime-api/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 52b74628f07d3..9136df1cd9299 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3358,7 +3358,6 @@ dependencies = [ "pallet-collective", "pallet-contracts", "pallet-contracts-primitives", - "pallet-contracts-runtime-api", "pallet-conviction-voting", "pallet-democracy", "pallet-election-provider-multi-phase", @@ -5487,6 +5486,7 @@ dependencies = [ "scale-info", "serde", "smallvec", + "sp-api", "sp-core", "sp-io", "sp-keystore", @@ -5518,18 +5518,6 @@ dependencies = [ "syn", ] -[[package]] -name = "pallet-contracts-runtime-api" -version = "4.0.0-dev" -dependencies = [ - "pallet-contracts-primitives", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" diff --git a/Cargo.toml b/Cargo.toml index 02bc6aede8669..e203cbbee7e0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,6 @@ members = [ "frame/collective", "frame/contracts", "frame/contracts/primitives", - "frame/contracts/runtime-api", "frame/conviction-voting", "frame/democracy", "frame/fast-unstake", diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index ac3afc19da50f..6940e968e28e7 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -62,7 +62,6 @@ pallet-child-bounties = { version = "4.0.0-dev", default-features = false, path pallet-collective = { version = "4.0.0-dev", default-features = false, path = "../../../frame/collective" } pallet-contracts = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts" } pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../../../frame/contracts/primitives/" } -pallet-contracts-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/contracts/runtime-api/" } pallet-conviction-voting = { version = "4.0.0-dev", default-features = false, path = "../../../frame/conviction-voting" } pallet-democracy = { version = "4.0.0-dev", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "4.0.0-dev", default-features = false, path = "../../../frame/election-provider-multi-phase" } @@ -139,7 +138,6 @@ std = [ "pallet-collective/std", "pallet-contracts/std", "pallet-contracts-primitives/std", - "pallet-contracts-runtime-api/std", "pallet-conviction-voting/std", "pallet-democracy/std", "pallet-elections-phragmen/std", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d10448cc2d183..4a35b972ff7de 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1939,33 +1939,32 @@ impl_runtime_apis! { } } - impl pallet_contracts_runtime_api::ContractsApi< - Block, AccountId, Balance, BlockNumber, Hash, - > - for Runtime + impl pallet_contracts::ContractsApi for Runtime { fn call( origin: AccountId, dest: AccountId, value: Balance, - gas_limit: u64, + gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, ) -> pallet_contracts_primitives::ContractExecResult { - Contracts::bare_call(origin, dest, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, input_data, true) + let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); + Contracts::bare_call(origin, dest, value, gas_limit, storage_deposit_limit, input_data, true) } fn instantiate( origin: AccountId, value: Balance, - gas_limit: u64, + gas_limit: Option, storage_deposit_limit: Option, code: pallet_contracts_primitives::Code, data: Vec, salt: Vec, ) -> pallet_contracts_primitives::ContractInstantiateResult { - Contracts::bare_instantiate(origin, value, Weight::from_ref_time(gas_limit), storage_deposit_limit, code, data, salt, true) + let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); + Contracts::bare_instantiate(origin, value, gas_limit, storage_deposit_limit, code, data, salt, true) } fn upload_code( diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 7c3b677e06436..7483ec8935890 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -38,6 +38,7 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "primitives" } pallet-contracts-proc-macro = { version = "4.0.0-dev", path = "proc-macro" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-core = { version = "6.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "6.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../primitives/runtime" } diff --git a/frame/contracts/runtime-api/Cargo.toml b/frame/contracts/runtime-api/Cargo.toml deleted file mode 100644 index 05b0e05d4c568..0000000000000 --- a/frame/contracts/runtime-api/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "pallet-contracts-runtime-api" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2021" -license = "Apache-2.0" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -description = "Runtime API definition used to provide dry-run capabilities" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies -pallet-contracts-primitives = { version = "6.0.0", default-features = false, path = "../primitives" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-std = { version = "4.0.0", default-features = false, path = "../../../primitives/std" } - -[features] -default = ["std"] -std = [ - "sp-api/std", - "codec/std", - "scale-info/std", - "sp-std/std", - "sp-runtime/std", - "pallet-contracts-primitives/std", -] diff --git a/frame/contracts/runtime-api/README.md b/frame/contracts/runtime-api/README.md deleted file mode 100644 index fed285b23b2ac..0000000000000 --- a/frame/contracts/runtime-api/README.md +++ /dev/null @@ -1,7 +0,0 @@ -Runtime API definition used to provide dry-run capabilities - -This API should be imported and implemented by the runtime, -of a node that wants to provide clients with dry-run -capabilities. - -License: Apache-2.0 \ No newline at end of file diff --git a/frame/contracts/runtime-api/src/lib.rs b/frame/contracts/runtime-api/src/lib.rs deleted file mode 100644 index 79fd20c8c0163..0000000000000 --- a/frame/contracts/runtime-api/src/lib.rs +++ /dev/null @@ -1,85 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Runtime API definition used to provide dry-run capabilities. -//! -//! This API should be imported and implemented by the runtime, -//! of a node that wants to provide clients with dry-run -//! capabilities. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::Codec; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, GetStorageResult, -}; -use sp_std::vec::Vec; - -sp_api::decl_runtime_apis! { - /// The API to interact with contracts without using executive. - pub trait ContractsApi where - AccountId: Codec, - Balance: Codec, - BlockNumber: Codec, - Hash: Codec, - { - /// Perform a call from a specified account to a given contract. - /// - /// See `pallet_contracts::Pallet::call`. - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: u64, - storage_deposit_limit: Option, - input_data: Vec, - ) -> ContractExecResult; - - /// Instantiate a new contract. - /// - /// See `pallet_contracts::Pallet::instantiate`. - fn instantiate( - origin: AccountId, - value: Balance, - gas_limit: u64, - storage_deposit_limit: Option, - code: Code, - data: Vec, - salt: Vec, - ) -> ContractInstantiateResult; - - - /// Upload new code without instantiating a contract from it. - /// - /// See `pallet_contracts::Pallet::upload_code`. - fn upload_code( - origin: AccountId, - code: Vec, - storage_deposit_limit: Option, - ) -> CodeUploadResult; - - /// Query a given storage key in a given contract. - /// - /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the - /// specified account and `Ok(None)` if it doesn't. If the account specified by the address - /// doesn't exist, or doesn't have a contract then `Err` is returned. - fn get_storage( - address: AccountId, - key: Vec, - ) -> GetStorageResult; - } -} diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 0c90c3ff433b4..794b172cc6282 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -105,7 +105,7 @@ use crate::{ wasm::{OwnerInfo, PrefabWasmModule}, weights::WeightInfo, }; -use codec::{Encode, HasCompact}; +use codec::{Codec, Encode, HasCompact}; use frame_support::{ dispatch::{Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo}, ensure, @@ -1171,3 +1171,58 @@ where Weight::from(gas_limit).set_proof_size(u64::from(T::MaxCodeLen::get()) * 2) } } + +sp_api::decl_runtime_apis! { + /// The API used to dry-run contract interactions. + pub trait ContractsApi where + AccountId: Codec, + Balance: Codec, + BlockNumber: Codec, + Hash: Codec, + { + /// Perform a call from a specified account to a given contract. + /// + /// See [`crate::Pallet::bare_call`]. + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + input_data: Vec, + ) -> ContractExecResult; + + /// Instantiate a new contract. + /// + /// See `[crate::Pallet::bare_instantiate]`. + fn instantiate( + origin: AccountId, + value: Balance, + gas_limit: Option, + storage_deposit_limit: Option, + code: Code, + data: Vec, + salt: Vec, + ) -> ContractInstantiateResult; + + + /// Upload new code without instantiating a contract from it. + /// + /// See [`crate::Pallet::bare_upload_code`]. + fn upload_code( + origin: AccountId, + code: Vec, + storage_deposit_limit: Option, + ) -> CodeUploadResult; + + /// Query a given storage key in a given contract. + /// + /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the + /// specified account and `Ok(None)` if it doesn't. If the account specified by the address + /// doesn't exist, or doesn't have a contract then `Err` is returned. + fn get_storage( + address: AccountId, + key: Vec, + ) -> GetStorageResult; + } +} From 3cb5a4069974cdfae3f6aad18626e1dde19ce3fb Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 6 Oct 2022 12:20:27 +0300 Subject: [PATCH 215/348] Add pluggable BEEFY payload constructors (#12428) * primitives/beefy: move Payload to its own file * primitives/beefy: add Payload tests * primitives/beefy: add MmrRootProvider as custom BEEFY payload provider * client/beefy: use generic BEEFY 'PayloadProvider' * primitives/beefy: rename Payload::new to Payload::from_single_entry for clarity * fix visibility * fix cargo doc --- Cargo.lock | 1 + client/beefy/rpc/src/lib.rs | 5 +- client/beefy/src/communication/gossip.rs | 8 +- client/beefy/src/justification.rs | 4 +- client/beefy/src/lib.rs | 13 ++- client/beefy/src/tests.rs | 5 +- client/beefy/src/worker.rs | 90 ++++++------------- primitives/beefy/Cargo.toml | 2 + primitives/beefy/src/commitment.rs | 82 ++++-------------- primitives/beefy/src/lib.rs | 7 +- primitives/beefy/src/mmr.rs | 105 ++++++++++++++++++++++- primitives/beefy/src/payload.rs | 105 +++++++++++++++++++++++ primitives/beefy/src/witness.rs | 8 +- 13 files changed, 284 insertions(+), 151 deletions(-) create mode 100644 primitives/beefy/src/payload.rs diff --git a/Cargo.lock b/Cargo.lock index 9136df1cd9299..a6a7c09514325 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -539,6 +539,7 @@ dependencies = [ "sp-application-crypto", "sp-core", "sp-keystore", + "sp-mmr-primitives", "sp-runtime", "sp-std", ] diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index 6f21abc616db8..d29ed433c38db 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -170,7 +170,7 @@ mod tests { communication::notification::BeefyVersionedFinalityProofSender, justification::BeefyVersionedFinalityProof, }; - use beefy_primitives::{known_payload_ids, Payload, SignedCommitment}; + use beefy_primitives::{known_payloads, Payload, SignedCommitment}; use codec::{Decode, Encode}; use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; @@ -266,7 +266,8 @@ mod tests { } fn create_finality_proof() -> BeefyVersionedFinalityProof { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); BeefyVersionedFinalityProof::::V1(SignedCommitment { commitment: beefy_primitives::Commitment { payload, diff --git a/client/beefy/src/communication/gossip.rs b/client/beefy/src/communication/gossip.rs index 6c41a2e48932a..520548b943f96 100644 --- a/client/beefy/src/communication/gossip.rs +++ b/client/beefy/src/communication/gossip.rs @@ -237,8 +237,7 @@ mod tests { use crate::keystore::{tests::Keyring, BeefyKeystore}; use beefy_primitives::{ - crypto::Signature, known_payload_ids, Commitment, MmrRootHash, Payload, VoteMessage, - KEY_TYPE, + crypto::Signature, known_payloads, Commitment, MmrRootHash, Payload, VoteMessage, KEY_TYPE, }; use super::*; @@ -348,7 +347,10 @@ mod tests { } fn dummy_vote(block_number: u64) -> VoteMessage { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, MmrRootHash::default().encode()); + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + MmrRootHash::default().encode(), + ); let commitment = Commitment { payload, block_number, validator_set_id: 0 }; let signature = sign_commitment(&Keyring::Alice, &commitment); diff --git a/client/beefy/src/justification.rs b/client/beefy/src/justification.rs index d9be18593dac7..7243c692727f0 100644 --- a/client/beefy/src/justification.rs +++ b/client/beefy/src/justification.rs @@ -81,7 +81,7 @@ fn verify_with_validator_set( #[cfg(test)] pub(crate) mod tests { use beefy_primitives::{ - known_payload_ids, Commitment, Payload, SignedCommitment, VersionedFinalityProof, + known_payloads, Commitment, Payload, SignedCommitment, VersionedFinalityProof, }; use substrate_test_runtime_client::runtime::Block; @@ -94,7 +94,7 @@ pub(crate) mod tests { keys: &[Keyring], ) -> BeefyVersionedFinalityProof { let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs index 760fc753b18a3..1c61cac072207 100644 --- a/client/beefy/src/lib.rs +++ b/client/beefy/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use beefy_primitives::{BeefyApi, MmrRootHash}; +use beefy_primitives::{BeefyApi, MmrRootHash, PayloadProvider}; use parking_lot::Mutex; use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, Finalizer}; @@ -167,11 +167,13 @@ pub struct BeefyNetworkParams { } /// BEEFY gadget initialization parameters. -pub struct BeefyParams { +pub struct BeefyParams { /// BEEFY client pub client: Arc, /// Client Backend pub backend: Arc, + /// BEEFY Payload provider + pub payload_provider: P, /// Runtime Api Provider pub runtime: Arc, /// Local key store @@ -191,11 +193,12 @@ pub struct BeefyParams { /// Start the BEEFY gadget. /// /// This is a thin shim around running and awaiting a BEEFY worker. -pub async fn start_beefy_gadget(beefy_params: BeefyParams) +pub async fn start_beefy_gadget(beefy_params: BeefyParams) where B: Block, BE: Backend, C: Client + BlockBackend, + P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, N: GossipNetwork + NetworkRequest + SyncOracle + Send + Sync + 'static, @@ -203,6 +206,7 @@ where let BeefyParams { client, backend, + payload_provider, runtime, key_store, network_params, @@ -249,6 +253,7 @@ where let worker_params = worker::WorkerParams { client, backend, + payload_provider, runtime, network, key_store: key_store.into(), @@ -261,7 +266,7 @@ where min_block_delta, }; - let worker = worker::BeefyWorker::<_, _, _, _, _>::new(worker_params); + let worker = worker::BeefyWorker::<_, _, _, _, _, _>::new(worker_params); futures::future::join(worker.run(), on_demand_justifications_handler.run()).await; } diff --git a/client/beefy/src/tests.rs b/client/beefy/src/tests.rs index 8057bd7cab7a5..24cf89acd5734 100644 --- a/client/beefy/src/tests.rs +++ b/client/beefy/src/tests.rs @@ -38,6 +38,7 @@ use sc_utils::notification::NotificationReceiver; use beefy_primitives::{ crypto::{AuthorityId, Signature}, + mmr::MmrRootProvider, BeefyApi, ConsensusLog, MmrRootHash, ValidatorSet, VersionedFinalityProof, BEEFY_ENGINE_ID, KEY_TYPE as BeefyKeyType, }; @@ -372,10 +373,12 @@ where justifications_protocol_name: on_demand_justif_handler.protocol_name(), _phantom: PhantomData, }; + let payload_provider = MmrRootProvider::new(api.clone()); let beefy_params = crate::BeefyParams { client: peer.client().as_client(), backend: peer.client().as_backend(), + payload_provider, runtime: api.clone(), key_store: Some(keystore), network_params, @@ -384,7 +387,7 @@ where prometheus_registry: None, on_demand_justifications_handler: on_demand_justif_handler, }; - let task = crate::start_beefy_gadget::<_, _, _, _, _>(beefy_params); + let task = crate::start_beefy_gadget::<_, _, _, _, _, _>(beefy_params); fn assert_send(_: &T) {} assert_send(&task); diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs index 5bdc72357c412..a21807c8ee875 100644 --- a/client/beefy/src/worker.rs +++ b/client/beefy/src/worker.rs @@ -48,7 +48,7 @@ use sp_runtime::{ use beefy_primitives::{ crypto::{AuthorityId, Signature}, - known_payload_ids, BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, SignedCommitment, + BeefyApi, Commitment, ConsensusLog, MmrRootHash, Payload, PayloadProvider, SignedCommitment, ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, }; @@ -194,9 +194,10 @@ impl VoterOracle { } } -pub(crate) struct WorkerParams { +pub(crate) struct WorkerParams { pub client: Arc, pub backend: Arc, + pub payload_provider: P, pub runtime: Arc, pub network: N, pub key_store: BeefyKeystore, @@ -210,10 +211,11 @@ pub(crate) struct WorkerParams { } /// A BEEFY worker plays the BEEFY protocol -pub(crate) struct BeefyWorker { +pub(crate) struct BeefyWorker { // utilities client: Arc, backend: Arc, + payload_provider: P, runtime: Arc, network: N, key_store: BeefyKeystore, @@ -243,11 +245,12 @@ pub(crate) struct BeefyWorker { voting_oracle: VoterOracle, } -impl BeefyWorker +impl BeefyWorker where B: Block + Codec, BE: Backend, C: Client, + P: PayloadProvider, R: ProvideRuntimeApi, R::Api: BeefyApi + MmrApi, N: NetworkEventStream + NetworkRequest + SyncOracle + Send + Sync + Clone + 'static, @@ -258,10 +261,11 @@ where /// BEEFY pallet has been deployed on-chain. /// /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. - pub(crate) fn new(worker_params: WorkerParams) -> Self { + pub(crate) fn new(worker_params: WorkerParams) -> Self { let WorkerParams { client, backend, + payload_provider, runtime, key_store, network, @@ -282,6 +286,7 @@ where BeefyWorker { client: client.clone(), backend, + payload_provider, runtime, network, known_peers, @@ -299,17 +304,6 @@ where } } - /// Simple wrapper that gets MMR root from header digests or from client state. - fn get_mmr_root_digest(&self, header: &B::Header) -> Option { - find_mmr_root_digest::(header).or_else(|| { - self.runtime - .runtime_api() - .mmr_root(&BlockId::hash(header.hash())) - .ok() - .and_then(|r| r.ok()) - }) - } - /// Verify `active` validator set for `block` against the key store /// /// We want to make sure that we have _at least one_ key in our keystore that @@ -621,13 +615,12 @@ where }; let target_hash = target_header.hash(); - let mmr_root = if let Some(hash) = self.get_mmr_root_digest(&target_header) { + let payload = if let Some(hash) = self.payload_provider.payload(&target_header) { hash } else { warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", target_hash); return Ok(()) }; - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, mmr_root.encode()); let rounds = self.voting_oracle.rounds_mut().ok_or(Error::UninitSession)?; if !rounds.should_self_vote(&(payload.clone(), target_number)) { @@ -917,20 +910,6 @@ where } } -/// Extract the MMR root hash from a digest in the given header, if it exists. -fn find_mmr_root_digest(header: &B::Header) -> Option -where - B: Block, -{ - let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); - - let filter = |log: ConsensusLog| match log { - ConsensusLog::MmrRoot(root) => Some(root), - _ => None, - }; - header.digest().convert_first(|l| l.try_to(id).and_then(filter)) -} - /// Scan the `header` digest log for a BEEFY validator set change. Return either the new /// validator set or `None` in case no validator set change has been signaled. fn find_authorities_change(header: &B::Header) -> Option> @@ -1016,8 +995,8 @@ pub(crate) mod tests { BeefyRPCLinks, }; + use beefy_primitives::{known_payloads, mmr::MmrRootProvider}; use futures::{executor::block_on, future::poll_fn, task::Poll}; - use sc_client_api::{Backend as BackendT, HeaderBackend}; use sc_network::NetworkService; use sc_network_test::{PeersFullClient, TestNetFactory}; @@ -1032,7 +1011,14 @@ pub(crate) mod tests { peer: &BeefyPeer, key: &Keyring, min_block_delta: u32, - ) -> BeefyWorker>> { + ) -> BeefyWorker< + Block, + Backend, + PeersFullClient, + MmrRootProvider, + TestApi, + Arc>, + > { let keystore = create_beefy_keystore(*key); let (to_rpc_justif_sender, from_voter_justif_stream) = @@ -1064,9 +1050,11 @@ pub(crate) mod tests { "/beefy/justifs/1".into(), known_peers.clone(), ); + let payload_provider = MmrRootProvider::new(api.clone()); let worker_params = crate::worker::WorkerParams { client: peer.client().as_client(), backend: peer.client().as_backend(), + payload_provider, runtime: api, key_store: Some(keystore).into(), known_peers, @@ -1078,7 +1066,7 @@ pub(crate) mod tests { network, on_demand_justifications, }; - BeefyWorker::<_, _, _, _, _>::new(worker_params) + BeefyWorker::<_, _, _, _, _, _>::new(worker_params) } #[test] @@ -1300,30 +1288,6 @@ pub(crate) mod tests { assert_eq!(extracted, Some(validator_set)); } - #[test] - fn extract_mmr_root_digest() { - let mut header = Header::new( - 1u32.into(), - Default::default(), - Default::default(), - Default::default(), - Digest::default(), - ); - - // verify empty digest shows nothing - assert!(find_mmr_root_digest::(&header).is_none()); - - let mmr_root_hash = H256::random(); - header.digest_mut().push(DigestItem::Consensus( - BEEFY_ENGINE_ID, - ConsensusLog::::MmrRoot(mmr_root_hash).encode(), - )); - - // verify validator set is correctly extracted from digest - let extracted = find_mmr_root_digest::(&header); - assert_eq!(extracted, Some(mmr_root_hash)); - } - #[test] fn keystore_vs_validator_set() { let keys = &[Keyring::Alice]; @@ -1363,7 +1327,7 @@ pub(crate) mod tests { let create_finality_proof = |block_num: NumberFor| { let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: block_num, validator_set_id: validator_set.id(), }; @@ -1482,7 +1446,7 @@ pub(crate) mod tests { block_number: NumberFor, ) -> VoteMessage, AuthorityId, Signature> { let commitment = Commitment { - payload: Payload::new(*b"BF", vec![]), + payload: Payload::from_single_entry(*b"BF", vec![]), block_number, validator_set_id: 0, }; @@ -1574,7 +1538,7 @@ pub(crate) mod tests { // import/append BEEFY justification for session boundary block 10 let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: 10, validator_set_id: validator_set.id(), }; @@ -1608,7 +1572,7 @@ pub(crate) mod tests { // import/append BEEFY justification for block 12 let commitment = Commitment { - payload: Payload::new(known_payload_ids::MMR_ROOT_ID, vec![]), + payload: Payload::from_single_entry(known_payloads::MMR_ROOT_ID, vec![]), block_number: 12, validator_set_id: validator_set.id(), }; diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index 1c1afde36ce5c..fe6ce23337c86 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -33,6 +34,7 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", ] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs index 0e22c8d56d937..5765ff3609dbb 100644 --- a/primitives/beefy/src/commitment.rs +++ b/primitives/beefy/src/commitment.rs @@ -19,61 +19,7 @@ use codec::{Decode, Encode, Error, Input}; use scale_info::TypeInfo; use sp_std::{cmp, prelude::*}; -use crate::ValidatorSetId; - -/// Id of different payloads in the [`Commitment`] data -pub type BeefyPayloadId = [u8; 2]; - -/// Registry of all known [`BeefyPayloadId`]. -pub mod known_payload_ids { - use crate::BeefyPayloadId; - - /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. - /// - /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). - pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; -} - -/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. -/// -/// The idea is to store a vector of SCALE-encoded values with an extra identifier. -/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected -/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only -/// support a subset of possible values. -#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] -pub struct Payload(Vec<(BeefyPayloadId, Vec)>); - -impl Payload { - /// Construct a new payload given an initial vallue - pub fn new(id: BeefyPayloadId, value: Vec) -> Self { - Self(vec![(id, value)]) - } - - /// Returns a raw payload under given `id`. - /// - /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. - pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { - let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; - Some(&self.0[index].1) - } - - /// Returns a decoded payload value under given `id`. - /// - /// In case the value is not there or it cannot be decoded does not match `None` is returned. - pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { - self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) - } - - /// Push a `Vec` with a given id into the payload vec. - /// This method will internally sort the payload vec after every push. - /// - /// Returns self to allow for daisy chaining. - pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { - self.0.push((id, value)); - self.0.sort_by_key(|(id, _)| *id); - self - } -} +use crate::{Payload, ValidatorSetId}; /// A commitment signed by GRANDPA validators as part of BEEFY protocol. /// @@ -302,13 +248,11 @@ impl From> for VersionedFinalityProof { #[cfg(test)] mod tests { - use sp_core::{keccak_256, Pair}; - use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; - use super::*; + use crate::{crypto, known_payloads, KEY_TYPE}; use codec::Decode; - - use crate::{crypto, KEY_TYPE}; + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -341,7 +285,8 @@ mod tests { #[test] fn commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -362,7 +307,8 @@ mod tests { #[test] fn signed_commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -396,7 +342,8 @@ mod tests { #[test] fn signed_commitment_count_signatures() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -421,7 +368,8 @@ mod tests { block_number: u128, validator_set_id: crate::ValidatorSetId, ) -> TestCommitment { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); Commitment { payload, block_number, validator_set_id } } @@ -441,7 +389,8 @@ mod tests { #[test] fn versioned_commitment_encode_decode() { - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; @@ -467,7 +416,8 @@ mod tests { #[test] fn large_signed_commitment_encode_decode() { // given - let payload = Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".encode()); + let payload = + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, "Hello World!".encode()); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 87f1b8756af65..705366e1b4778 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -33,12 +33,11 @@ mod commitment; pub mod mmr; +mod payload; pub mod witness; -pub use commitment::{ - known_payload_ids, BeefyPayloadId, Commitment, Payload, SignedCommitment, - VersionedFinalityProof, -}; +pub use commitment::{Commitment, SignedCommitment, VersionedFinalityProof}; +pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs index 471cb96841b8e..b479d979f13f3 100644 --- a/primitives/beefy/src/mmr.rs +++ b/primitives/beefy/src/mmr.rs @@ -17,8 +17,8 @@ //! BEEFY + MMR utilties. //! -//! While BEEFY can be used completely indepentently as an additional consensus gadget, -//! it is designed around a main use case of making bridging standalone networks together. +//! While BEEFY can be used completely independently as an additional consensus gadget, +//! it is designed around a main use case of bridging standalone networks together. //! For that use case it's common to use some aggregated data structure (like MMR) to be //! used in conjunction with BEEFY, to be able to efficiently prove any past blockchain data. //! @@ -26,9 +26,13 @@ //! but we imagine they will be useful for other chains that either want to bridge with Polkadot //! or are completely standalone, but heavily inspired by Polkadot. -use crate::Vec; +use crate::{crypto::AuthorityId, ConsensusLog, MmrRootHash, Vec, BEEFY_ENGINE_ID}; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block, Header}, +}; /// A provider for extra data that gets added to the Mmr leaf pub trait BeefyDataProvider { @@ -121,9 +125,78 @@ pub struct BeefyAuthoritySet { /// Details of the next BEEFY authority set. pub type BeefyNextAuthoritySet = BeefyAuthoritySet; +/// Extract the MMR root hash from a digest in the given header, if it exists. +pub fn find_mmr_root_digest(header: &B::Header) -> Option { + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::MmrRoot(root) => Some(root), + _ => None, + }; + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} + +#[cfg(feature = "std")] +pub use mmr_root_provider::MmrRootProvider; +#[cfg(feature = "std")] +mod mmr_root_provider { + use super::*; + use crate::{known_payloads, payload::PayloadProvider, Payload}; + use sp_api::ProvideRuntimeApi; + use sp_mmr_primitives::MmrApi; + use sp_runtime::generic::BlockId; + use sp_std::{marker::PhantomData, sync::Arc}; + + /// A [`crate::Payload`] provider where payload is Merkle Mountain Range root hash. + /// + /// Encoded payload contains a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). + pub struct MmrRootProvider { + runtime: Arc, + _phantom: PhantomData, + } + + impl MmrRootProvider + where + B: Block, + R: ProvideRuntimeApi, + R::Api: MmrApi, + { + /// Create new BEEFY Payload provider with MMR Root as payload. + pub fn new(runtime: Arc) -> Self { + Self { runtime, _phantom: PhantomData } + } + + /// Simple wrapper that gets MMR root from header digests or from client state. + fn mmr_root_from_digest_or_runtime(&self, header: &B::Header) -> Option { + find_mmr_root_digest::(header).or_else(|| { + self.runtime + .runtime_api() + .mmr_root(&BlockId::hash(header.hash())) + .ok() + .and_then(|r| r.ok()) + }) + } + } + + impl PayloadProvider for MmrRootProvider + where + B: Block, + R: ProvideRuntimeApi, + R::Api: MmrApi, + { + fn payload(&self, header: &B::Header) -> Option { + self.mmr_root_from_digest_or_runtime(header).map(|mmr_root| { + Payload::from_single_entry(known_payloads::MMR_ROOT_ID, mmr_root.encode()) + }) + } + } +} + #[cfg(test)] mod tests { use super::*; + use crate::H256; + use sp_runtime::{traits::BlakeTwo256, Digest, DigestItem, OpaqueExtrinsic}; #[test] fn should_construct_version_correctly() { @@ -147,4 +220,30 @@ mod tests { fn should_panic_if_minor_too_large() { MmrLeafVersion::new(0, 32); } + + #[test] + fn extract_mmr_root_digest() { + type Header = sp_runtime::generic::Header; + type Block = sp_runtime::generic::Block; + let mut header = Header::new( + 1u64, + Default::default(), + Default::default(), + Default::default(), + Digest::default(), + ); + + // verify empty digest shows nothing + assert!(find_mmr_root_digest::(&header).is_none()); + + let mmr_root_hash = H256::random(); + header.digest_mut().push(DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::::MmrRoot(mmr_root_hash).encode(), + )); + + // verify validator set is correctly extracted from digest + let extracted = find_mmr_root_digest::(&header); + assert_eq!(extracted, Some(mmr_root_hash)); + } } diff --git a/primitives/beefy/src/payload.rs b/primitives/beefy/src/payload.rs new file mode 100644 index 0000000000000..0f23c3f381f19 --- /dev/null +++ b/primitives/beefy/src/payload.rs @@ -0,0 +1,105 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::traits::Block; +use sp_std::prelude::*; + +/// Id of different payloads in the [`crate::Commitment`] data. +pub type BeefyPayloadId = [u8; 2]; + +/// Registry of all known [`BeefyPayloadId`]. +pub mod known_payloads { + use crate::BeefyPayloadId; + + /// A [`Payload`](super::Payload) identifier for Merkle Mountain Range root hash. + /// + /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). + pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; +} + +/// A BEEFY payload type allowing for future extensibility of adding additional kinds of payloads. +/// +/// The idea is to store a vector of SCALE-encoded values with an extra identifier. +/// Identifiers MUST be sorted by the [`BeefyPayloadId`] to allow efficient lookup of expected +/// value. Duplicated identifiers are disallowed. It's okay for different implementations to only +/// support a subset of possible values. +#[derive(Decode, Encode, Debug, PartialEq, Eq, Clone, Ord, PartialOrd, Hash, TypeInfo)] +pub struct Payload(Vec<(BeefyPayloadId, Vec)>); + +impl Payload { + /// Construct a new payload given an initial vallue + pub fn from_single_entry(id: BeefyPayloadId, value: Vec) -> Self { + Self(vec![(id, value)]) + } + + /// Returns a raw payload under given `id`. + /// + /// If the [`BeefyPayloadId`] is not found in the payload `None` is returned. + pub fn get_raw(&self, id: &BeefyPayloadId) -> Option<&Vec> { + let index = self.0.binary_search_by(|probe| probe.0.cmp(id)).ok()?; + Some(&self.0[index].1) + } + + /// Returns a decoded payload value under given `id`. + /// + /// In case the value is not there or it cannot be decoded does not match `None` is returned. + pub fn get_decoded(&self, id: &BeefyPayloadId) -> Option { + self.get_raw(id).and_then(|raw| T::decode(&mut &raw[..]).ok()) + } + + /// Push a `Vec` with a given id into the payload vec. + /// This method will internally sort the payload vec after every push. + /// + /// Returns self to allow for daisy chaining. + pub fn push_raw(mut self, id: BeefyPayloadId, value: Vec) -> Self { + self.0.push((id, value)); + self.0.sort_by_key(|(id, _)| *id); + self + } +} + +/// Trait for custom BEEFY payload providers. +pub trait PayloadProvider { + /// Provide BEEFY payload if available for `header`. + fn payload(&self, header: &B::Header) -> Option; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn payload_methods_work_as_expected() { + let id1: BeefyPayloadId = *b"hw"; + let msg1: String = "1. Hello World!".to_string(); + let id2: BeefyPayloadId = *b"yb"; + let msg2: String = "2. Yellow Board!".to_string(); + let id3: BeefyPayloadId = *b"cs"; + let msg3: String = "3. Cello Cord!".to_string(); + + let payload = Payload::from_single_entry(id1, msg1.encode()) + .push_raw(id2, msg2.encode()) + .push_raw(id3, msg3.encode()); + + assert_eq!(payload.get_decoded(&id1), Some(msg1)); + assert_eq!(payload.get_decoded(&id2), Some(msg2)); + assert_eq!(payload.get_raw(&id3), Some(&msg3.encode())); + assert_eq!(payload.get_raw(&known_payloads::MMR_ROOT_ID), None); + } +} diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs index cebfc3de85049..2c45e0ade90c4 100644 --- a/primitives/beefy/src/witness.rs +++ b/primitives/beefy/src/witness.rs @@ -81,7 +81,7 @@ mod tests { use super::*; use codec::Decode; - use crate::{crypto, known_payload_ids, Payload, KEY_TYPE}; + use crate::{crypto, known_payloads, Payload, KEY_TYPE}; type TestCommitment = Commitment; type TestSignedCommitment = SignedCommitment; @@ -111,8 +111,10 @@ mod tests { } fn signed_commitment() -> TestSignedCommitment { - let payload = - Payload::new(known_payload_ids::MMR_ROOT_ID, "Hello World!".as_bytes().to_vec()); + let payload = Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + "Hello World!".as_bytes().to_vec(), + ); let commitment: TestCommitment = Commitment { payload, block_number: 5, validator_set_id: 0 }; From a84def97102166643bc3c807ae69892551c47536 Mon Sep 17 00:00:00 2001 From: Sergej Sakac <73715684+Szegoo@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:12:51 +0200 Subject: [PATCH 216/348] Maximum value for `MultiplierUpdate` (#12282) * Maximum value for MultiplierUpdate * Update frame/transaction-payment/src/lib.rs Co-authored-by: Stephen Shelton * Update lib.rs * return constant * fix in runtime * Update frame/transaction-payment/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/transaction-payment/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fixes * remove unused import * Update lib.rs * more readable * fix * fix nits Co-authored-by: Stephen Shelton Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> --- bin/node/runtime/src/impls.rs | 5 +++-- bin/node/runtime/src/lib.rs | 12 +++++++++--- frame/transaction-payment/src/lib.rs | 26 ++++++++++++++++++++------ 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index 0f9ed6e275196..0a5c797ba729f 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -126,8 +126,8 @@ mod multiplier_tests { use crate::{ constants::{currency::*, time::*}, - AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, - System, TargetBlockFullness, TransactionPayment, + AdjustmentVariable, MaximumMultiplier, MinimumMultiplier, Runtime, + RuntimeBlockWeights as BlockWeights, System, TargetBlockFullness, TransactionPayment, }; use frame_support::{ dispatch::DispatchClass, @@ -156,6 +156,7 @@ mod multiplier_tests { TargetBlockFullness, AdjustmentVariable, MinimumMultiplier, + MaximumMultiplier, >::convert(fm) } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4a35b972ff7de..34f6988c31643 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -65,7 +65,7 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, + self, BlakeTwo256, Block as BlockT, Bounded, ConvertInto, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, @@ -443,6 +443,7 @@ parameter_types! { pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(1, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); + pub MaximumMultiplier: Multiplier = Bounded::max_value(); } impl pallet_transaction_payment::Config for Runtime { @@ -451,8 +452,13 @@ impl pallet_transaction_payment::Config for Runtime { type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = - TargetedFeeAdjustment; + type FeeMultiplierUpdate = TargetedFeeAdjustment< + Self, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + MaximumMultiplier, + >; } impl pallet_asset_tx_payment::Config for Runtime { diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 80297d1a0d362..ce85bf93a90a7 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -127,12 +127,14 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction -pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M)>); +pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData<(T, S, V, M, X)>); /// Something that can convert the current multiplier to the next one. pub trait MultiplierUpdate: Convert { - /// Minimum multiplier + /// Minimum multiplier. Any outcome of the `convert` function should be at least this. fn min() -> Multiplier; + /// Maximum multiplier. Any outcome of the `convert` function should be less or equal this. + fn max() -> Multiplier; /// Target block saturation level fn target() -> Perquintill; /// Variability factor @@ -143,6 +145,9 @@ impl MultiplierUpdate for () { fn min() -> Multiplier { Default::default() } + fn max() -> Multiplier { + ::max_value() + } fn target() -> Perquintill { Default::default() } @@ -151,16 +156,20 @@ impl MultiplierUpdate for () { } } -impl MultiplierUpdate for TargetedFeeAdjustment +impl MultiplierUpdate for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, + X: Get, { fn min() -> Multiplier { M::get() } + fn max() -> Multiplier { + X::get() + } fn target() -> Perquintill { S::get() } @@ -169,18 +178,20 @@ where } } -impl Convert for TargetedFeeAdjustment +impl Convert for TargetedFeeAdjustment where T: frame_system::Config, S: Get, V: Get, M: Get, + X: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless // we recover here in case of errors, because any value below this would be stale and can // never change. let min_multiplier = M::get(); + let max_multiplier = X::get(); let previous = previous.max(min_multiplier); let weights = T::BlockWeights::get(); @@ -217,11 +228,11 @@ where if positive { let excess = first_term.saturating_add(second_term).saturating_mul(previous); - previous.saturating_add(excess).max(min_multiplier) + previous.saturating_add(excess).max(min_multiplier).min(max_multiplier) } else { // Defensive-only: first_term > second_term. Safe subtraction. let negative = first_term.saturating_sub(second_term).saturating_mul(previous); - previous.saturating_sub(negative).max(min_multiplier) + previous.saturating_sub(negative).max(min_multiplier).min(max_multiplier) } } } @@ -233,6 +244,9 @@ impl> MultiplierUpdate for ConstFeeMultiplier { fn min() -> Multiplier { M::get() } + fn max() -> Multiplier { + M::get() + } fn target() -> Perquintill { Default::default() } From f77f788c8cd8692f716a54f13c434a8774246d7a Mon Sep 17 00:00:00 2001 From: Sam Johnson Date: Thu, 6 Oct 2022 16:31:56 -0400 Subject: [PATCH 217/348] macro stubs for all pallet:: macros to improve documentation visibility and discovery + revamp of pallet macro documentation (#12334) * proof of concept working for pallet::whitelist_storage * fix comments * pallet macros docs rewrite WIP * fix issue with cargo fmt cobbling links * tweak capitalization * fix docs for storage_version * fix docs for pallet::hooks * fix several comments * fix invalid link * fix wrapping and add missing links for pallet::hooks docs * run rewrap on all text blocks in frame_support::pallet docs * cargo fmt * fix up pallet::call_index docs * fix docs for pallet::extra_constants * fix docs for pallet::error * fix docs for pallet::event * fix docs for pallet::event * * fix docs for pallet::storage * fix docs for pallet::getter * fix docs for pallet::storage_prefix * fix docs for pallet::unbounded * fix docs for pallet::whitelist_storage * fix docs for #[cfg(..)] (for storage items and attributes) * fix docs for pallet::storage macro expansion * fix docs for pallet::type_value * fix docs for pallet::genesis_config * fix docs for pallet::genesis_build * fix docs for pallet::inherent * fix docs for pallet::validate_unsigned * fix docs for pallet::origin * fix docs for general notes on instantiable pallets * fix docs for example of a non-instantiable pallet * fix docs for example of an instantiable pallet * fix docs for upgrade guidelines * fix docs for upgrade guidelines * fix docs for upgrade checking and final notes * fix some examples near the beginning * extract docs for `pallet::whitelist_storage` * add docs for pallet_macro_stub * fix order of pallet::config and pallet::constant * set up stub for pallet::config * set up stub for pallet::constant * fix * set up stub for pallet::disable_frame_system_supertrait_check * set up stub for pallet::generate_storage_info * set up stub for pallet::storage_version * set up stub for pallet::hooks * set up stub for pallet::weight * set up stub for pallet::compact * set up stub for pallet::call_index * set up stub for pallet::extra_constants * set up stub for pallet::error * set up stub for pallet::event * set up stub for pallet::generate_deposit * set up stub for pallet::storage * set up stub for pallet::getter * set up stub for pallet::storage_prefix * set up stub for pallet::unbounded * set up stub for pallet::type_value * set up stub for pallet::genesis_config * set up stub for pallet::genesis_build * set up stub for pallet::inherent * set up stub for pallet::validate_unsigned * set up stub for pallet::origin * fix comment * cargo fmt * tweak error message * Update frame/support/procedural/src/lib.rs Co-authored-by: Keith Yeung * Update frame/support/procedural/src/lib.rs Co-authored-by: Keith Yeung * switch order of derives Co-authored-by: Squirrel * tweak wording Co-authored-by: Squirrel * add more context info about `MAX_MODULE_ERROR_ENCODED_SIZE` Co-authored-by: Squirrel * tweak wording about where clause Co-authored-by: Squirrel * clarify wording about system/events key Co-authored-by: Squirrel * use "The Event enum" instead of "item" Co-authored-by: Squirrel * fix bad wording Co-authored-by: Squirrel * use enum instead of type Co-authored-by: Squirrel * expect => expects Co-authored-by: Squirrel * add additional note about storage prefix Co-authored-by: Squirrel * clearer note about GenesisConfig Co-authored-by: Squirrel * Use "The impl" instead of "The item" Co-authored-by: Squirrel * add note and link to tight-coupling docs Co-authored-by: Squirrel * cargo fmt * remove spaces around parenthesis * fix missing text for pallet::config * fix issue with pallet::constant intro * fix wording about codec index * fix pallet::error wording * fix comment about 1 byte => 256 errors * fix where clause comment * fix comment about where pallet events are stored * rewrap some text * fix pallet::storage docs * fix pallet::storage_prefix docs * tweak docs for pallet::genesis_build * tweak docs for pallet::config * specify that pallet::event must be present if pallet::config is present * add note about why we would want to bypass the supertrait check * mention that pallet::generate_store attribute is only valid on pallet struct * add note about adding new calls to the end to maintain existing order * add note about pallet::type_value and pallet::storage Co-authored-by: Squirrel * add note about using pallet::type_value alongside pallet::storage * include warning about modifying disaptchables on other pallet::call_index docs page * fix incorrect comment * add much more information for pallet::inherent * move pallet::pallet macro expansion notes back to their rightful place * re-run CI * fix macro expansion appearing in wrong place for pallet::pallet * replicate pallet::pallet docs on the pallet::pallet macro stub * force CI re-run Co-authored-by: Keith Yeung Co-authored-by: Squirrel Co-authored-by: parity-processbot <> --- frame/support/procedural/src/lib.rs | 680 +++++++++++++++++++- frame/support/src/lib.rs | 964 ++++++++++++++++------------ 2 files changed, 1223 insertions(+), 421 deletions(-) diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 0f72b28748cee..ccff5488c93be 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -36,6 +36,7 @@ mod transactional; mod tt_macro; use proc_macro::TokenStream; +use quote::quote; use std::{cell::RefCell, str::FromStr}; pub(crate) use storage::INHERENT_INSTANCE_NAME; @@ -402,7 +403,49 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { construct_runtime::construct_runtime(input) } -/// Macro to define a pallet. Docs are at `frame_support::pallet`. +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify +/// pallet information. +/// +/// The struct must be defined as follows: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// +/// ## Macro expansion: +/// +/// The macro adds this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: +/// * `GetStorageVersion` +/// * `OnGenesis`: contains some logic to write the pallet version into storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// +/// It implements `PalletInfoAccess` on `Pallet` to ease access to pallet information given by +/// `frame_support::traits::PalletInfo`. (The implementation uses the associated type +/// `frame_system::Config::PalletInfo`). +/// +/// It implements `StorageInfoTrait` on `Pallet` which give information about all storages. +/// +/// If the attribute `generate_store` is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute `set_storage_max_encoded_len` is set then the macro calls +/// `StorageInfoTrait` for each storage in the implementation of `StorageInfoTrait` for the +/// pallet. Otherwise it implements `StorageInfoTrait` for the pallet using the +/// `PartialStorageInfoTrait` implementation of storages. +/// +/// See `frame_support::pallet` docs for more info. #[proc_macro_attribute] pub fn pallet(attr: TokenStream, item: TokenStream) -> TokenStream { pallet::pallet(attr, item) @@ -583,3 +626,638 @@ pub fn storage_alias(_: TokenStream, input: TokenStream) -> TokenStream { .unwrap_or_else(|r| r.into_compile_error()) .into() } + +/// Used internally to decorate pallet attribute macro stubs when they are erroneously used +/// outside of a pallet module +fn pallet_macro_stub() -> TokenStream { + quote!(compile_error!( + "This attribute can only be used from within a pallet module marked with `#[frame_support::pallet]`" + )) + .into() +} + +/// The mandatory attribute `#[pallet::config]` defines the configurable options for the pallet. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config + $optionally_some_other_supertraits +/// $optional_where_clause +/// { +/// ... +/// } +/// ``` +/// +/// I.e. a regular trait definition named `Config`, with the supertrait +/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. +/// (Specifying other supertraits here is known as [tight +/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) +/// +/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds +/// `From` and `IsType<::RuntimeEvent>`. +/// +/// [`pallet::event`](`macro@event`) must be present if `RuntimeEvent` exists as a config item +/// in your `#[pallet::config]`. +#[proc_macro_attribute] +pub fn config(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by `Get` +/// from [`pallet::config`](`macro@config`) into metadata, e.g.: +/// +/// ```ignore +/// #[pallet::config] +/// pub trait Config: frame_system::Config { +/// #[pallet::constant] +/// type Foo: Get; +/// } +/// ``` +#[proc_macro_attribute] +pub fn constant(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To bypass the `frame_system::Config` supertrait check, use the attribute +/// `pallet::disable_frame_system_supertrait_check`, e.g.: +/// +/// ```ignore +/// #[pallet::config] +/// #[pallet::disable_frame_system_supertrait_check] +/// pub trait Config: pallet_timestamp::Config {} +/// ``` +/// +/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you +/// want to write an alternative to the `frame_system` pallet. +#[proc_macro_attribute] +pub fn disable_frame_system_supertrait_check(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with +/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: +/// +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_store(pub(super) trait Store)] +/// pub struct Pallet(_); +/// ``` +/// More precisely, the `Store` trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing access to the storage from pallet struct. +/// +/// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using +/// `::Foo`. +/// +/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct +/// definition. +#[proc_macro_attribute] +pub fn generate_store(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// To generate the full storage info (used for PoV calculation) use the attribute +/// `#[pallet::generate_storage_info]`, e.g.: +/// +/// ```ignore +/// #[pallet::pallet] +/// #[pallet::generate_storage_info] +/// pub struct Pallet(_); +/// ``` +/// +/// This requires all storage items to implement the trait `StorageInfoTrait`, thus all keys +/// and value types must be bound by `MaxEncodedLen`. Individual storages can opt-out from this +/// constraint by using [`#[pallet::unbounded]`](`macro@unbounded`) (see +/// [`#[pallet::storage]`](`macro@storage`) for more info). +#[proc_macro_attribute] +pub fn generate_storage_info(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Because the `pallet::pallet` macro implements `GetStorageVersion`, the current storage +/// version needs to be communicated to the macro. This can be done by using the +/// `pallet::storage_version` attribute: +/// +/// ```ignore +/// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); +/// +/// #[pallet::pallet] +/// #[pallet::storage_version(STORAGE_VERSION)] +/// pub struct Pallet(_); +/// ``` +/// +/// If not present, the current storage version is set to the default value. +#[proc_macro_attribute] +pub fn storage_version(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::hooks]` attribute allows you to specify a `Hooks` implementation for +/// `Pallet` that specifies pallet-specific logic. +/// +/// The item the attribute attaches to must be defined as follows: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet $optional_where_clause { +/// ... +/// } +/// ``` +/// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait +/// `Hooks>` (they are defined in preludes), for the type `Pallet` and +/// with an optional where clause. +/// +/// If no `#[pallet::hooks]` exists, then the following default implementation is +/// automatically generated: +/// ```ignore +/// #[pallet::hooks] +/// impl Hooks> for Pallet {} +/// ``` +/// +/// ## Macro expansion +/// +/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, +/// `OffchainWorker`, and `IntegrityTest` using the provided `Hooks` implementation. +/// +/// NOTE: `OnRuntimeUpgrade` is implemented with `Hooks::on_runtime_upgrade` and some +/// additional logic. E.g. logic to write the pallet version into storage. +/// +/// NOTE: The macro also adds some tracing logic when implementing the above traits. The +/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. +#[proc_macro_attribute] +pub fn hooks(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the +/// first argument must be `origin: OriginFor`. +#[proc_macro_attribute] +pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must +/// return a `DispatchResultWithPostInfo` or `DispatchResult`. +#[proc_macro_attribute] +pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, +/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. +/// +/// All call indexes start from 0, until it encounters a dispatchable function with a defined +/// call index. The dispatchable function that lexically follows the function with a defined +/// call index will have that call index, but incremented by 1, e.g. if there are 3 +/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` +/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. +/// +/// All arguments must implement [`Debug`], [`PartialEq`], [`Eq`], `Decode`, `Encode`, and +/// [`Clone`]. For ease of use, bound by the trait `frame_support::pallet_prelude::Member`. +/// +/// If no `#[pallet::call]` exists, then a default implementation corresponding to the +/// following code is automatically generated: +/// +/// ```ignore +/// #[pallet::call] +/// impl Pallet {} +/// ``` +/// +/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be +/// done with care. Indeed this will change the outer runtime call type (which is an enum with +/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the +/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so +/// that the `Call` enum encoding does not change after modification. As a general rule of +/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain +/// the existing order of calls. +/// +/// ### Macro expansion +/// +/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: +/// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), +/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, and `UnfilteredDispatchable`. +/// +/// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` +/// which returns the dispatchable metadata. +#[proc_macro_attribute] +pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// Allows you to define some extra constants to be added into constant metadata. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::extra_constants] +/// impl Pallet where $optional_where_clause { +/// /// $some_doc +/// $vis fn $fn_name() -> $some_return_type { +/// ... +/// } +/// ... +/// } +/// ``` +/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, +/// 0 generics, and some return type. +/// +/// ## Macro expansion +/// +/// The macro add some extra constants to pallet constant metadata. +#[proc_macro_attribute] +pub fn extra_constants(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned +/// from the dispatchable when an error occurs. The information for this error type is then +/// stored in metadata. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::error] +/// pub enum Error { +/// /// $some_optional_doc +/// $SomeFieldLessVariant, +/// /// $some_more_optional_doc +/// $SomeVariantWithOneField(FieldType), +/// ... +/// } +/// ``` +/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field +/// variants. +/// +/// Any field type in the enum variants must implement `TypeInfo` in order to be properly used +/// in the metadata, and its encoded size should be as small as possible, preferably 1 byte in +/// size in order to reduce storage size. The error enum itself has an absolute maximum encoded +/// size specified by `MAX_MODULE_ERROR_ENCODED_SIZE`. +/// +/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to +/// diagnose problems and give a better experience to the user. Don't skimp on having lots of +/// individual error conditions.) +/// +/// Field types in enum variants must also implement `PalletError`, otherwise the pallet will +/// fail to compile. Rust primitive types have already implemented the `PalletError` trait +/// along with some commonly used stdlib types such as [`Option`] and `PhantomData`, and hence +/// in most use cases, a manual implementation is not necessary and is discouraged. +/// +/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, +/// bounds and/or a where clause should not needed for any use-case. +/// +/// ## Macro expansion +/// +/// The macro implements the [`Debug`] trait and functions `as_u8` using variant position, and +/// `as_str` using variant doc. +/// +/// The macro also implements `From>` for `&'static str` and `From>` for +/// `DispatchError`. +#[proc_macro_attribute] +pub fn error(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::event]` attribute allows you to define pallet events. Pallet events are +/// stored under the `system` / `events` key when the block is applied (and then replaced when +/// the next block writes it's events). +/// +/// The Event enum must be defined as follows: +/// +/// ```ignore +/// #[pallet::event] +/// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional +/// pub enum Event<$some_generic> $optional_where_clause { +/// /// Some doc +/// $SomeName($SomeType, $YetanotherType, ...), +/// ... +/// } +/// ``` +/// +/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or +/// `T` or `T: Config`, and optional w here clause. +/// +/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], `Encode`, `Decode`, and +/// [`Debug`] (on std only). For ease of use, bound by the trait `Member`, available in +/// `frame_support::pallet_prelude`. +#[proc_macro_attribute] +pub fn event(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a +/// helper function on `Pallet` that handles deposit events. +/// +/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. +/// +/// ## Macro expansion +/// +/// The macro will add on enum `Event` the attributes: +/// * `#[derive(frame_support::CloneNoBound)]` +/// * `#[derive(frame_support::EqNoBound)]` +/// * `#[derive(frame_support::PartialEqNoBound)]` +/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// * `#[derive(codec::Encode)]` +/// * `#[derive(codec::Decode)]` +/// +/// The macro implements `From>` for (). +/// +/// The macro implements a metadata function on `Event` returning the `EventMetadata`. +/// +/// If `#[pallet::generate_deposit]` is present then the macro implements `fn deposit_event` on +/// `Pallet`. +#[proc_macro_attribute] +pub fn generate_deposit(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime +/// storage and also set its metadata. This attribute can be used multiple times. +/// +/// Item should be defined as: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; +/// ``` +/// +/// or with unnamed generic: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn $getter_name)] // optional +/// $vis type $StorageName<$some_generic> $optional_where_clause +/// = $StorageType<_, $some_generics, ...>; +/// ``` +/// +/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be +/// one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. The generic arguments of the +/// storage type can be given in two manners: named and unnamed. For named generic arguments, +/// the name for each argument should match the name defined for it on the storage struct: +/// * `StorageValue` expects `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `StorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `CountedStorageMap` expects `Hasher`, `Key`, `Value` and optionally `QueryKind` and `OnEmpty`, +/// * `StorageDoubleMap` expects `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` and optionally +/// `QueryKind` and `OnEmpty`. +/// +/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal generic type declaration. +/// +/// The `Prefix` generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the `CountedStorageMap` variant, the `Prefix` also implements +/// `CountedStorageMapInstance`. It also associates a `CounterPrefix`, which is implemented the +/// same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = CountedStorageaMap<...>` will store +/// its counter at the prefix: `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::storage] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// +/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ +/// Twox128(b"OtherName")`. +#[proc_macro_attribute] +pub fn storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a +/// getter function on `Pallet`. +/// +/// Also see [`pallet::storage`](`macro@storage`) +#[proc_macro_attribute] +pub fn getter(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the +/// storage prefix to use. This is helpful if you wish to rename the storage field but don't +/// want to perform a migration. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::storage_prefix = "foo"] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap; +/// ``` +/// +/// or +/// +/// ```ignore +/// #[pallet::storage] +/// #[pallet::getter(fn my_storage)] +/// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; +/// ``` +#[proc_macro_attribute] +pub fn storage_prefix(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When +/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on +/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This +/// can be useful for storage which can never go into PoV (Proof of Validity). +#[proc_macro_attribute] +pub fn unbounded(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The optional attribute `#[pallet::whitelist_storage]` will declare the +/// storage as whitelisted from benchmarking. Doing so will exclude reads of +/// that value's storage key from counting towards weight calculations during +/// benchmarking. +/// +/// This attribute should only be attached to storages that are known to be +/// read/used in every block. This will result in a more accurate benchmarking weight. +/// +/// ### Example +/// ```ignore +/// #[pallet::storage] +/// #[pallet::whitelist_storage] +/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; +/// ``` +/// +/// NOTE: As with all `pallet::*` attributes, this one _must_ be written as +/// `#[pallet::whitelist_storage]` and can only be placed inside a `pallet` module in order for +/// it to work properly. +#[proc_macro_attribute] +pub fn whitelist_storage(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::type_value]` attribute lets you define a struct implementing the `Get` trait +/// to ease the use of storage types. This attribute is meant to be used alongside +/// [`#[pallet::storage]`](`macro@storage`) to define a storage's default value. This attribute +/// can be used multiple times. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::type_value] +/// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } +/// ``` +/// +/// I.e.: a function definition with generics none or `T: Config` and a returned type. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::type_value] +/// fn MyDefault() -> T::Balance { 3.into() } +/// ``` +/// +/// ## Macro expansion +/// +/// The macro renames the function to some internal name, generates a struct with the original +/// name of the function and its generic, and implements `Get<$ReturnType>` by calling the user +/// defined function. +#[proc_macro_attribute] +pub fn type_value(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration +/// for the pallet. +/// +/// Item is defined as either an enum or a struct. It needs to be public and implement the +/// trait `GenesisBuild` with [`#[pallet::genesis_build]`](`macro@genesis_build`). The type +/// generics are constrained to be either none, or `T` or `T: Config`. +/// +/// E.g: +/// +/// ```ignore +/// #[pallet::genesis_config] +/// pub struct GenesisConfig { +/// _myfield: BalanceOf, +/// } +/// ``` +#[proc_macro_attribute] +pub fn genesis_config(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` +/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the pallet's +/// initial state. +/// +/// The impl must be defined as: +/// +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig<$maybe_generics> { +/// fn build(&self) { $expr } +/// } +/// ``` +/// +/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// type `GenesisConfig` with generics none or `T`. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::genesis_build] +/// impl GenesisBuild for GenesisConfig { +/// fn build(&self) {} +/// } +/// ``` +/// +/// ## Macro expansion +/// +/// The macro will add the following attribute: +/// * `#[cfg(feature = "std")]` +/// +/// The macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as a second +/// generic for non-instantiable pallets. +#[proc_macro_attribute] +pub fn genesis_build(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::inherent]` attribute allows the pallet to provide some +/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). +/// An inherent is some piece of data that is inserted by a block authoring node at block +/// creation time and can either be accepted or rejected by validators based on whether the +/// data falls within an acceptable range. +/// +/// The most common inherent is the `timestamp` that is inserted into every block. Since there +/// is no way to validate timestamps, validators simply check that the timestamp reported by +/// the block authoring node falls within an acceptable range. +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::inherent] +/// impl ProvideInherent for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// +/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type +/// `Pallet`, and some optional where clause. +/// +/// ## Macro expansion +/// +/// The macro currently makes no use of this information, but it might use this information in +/// the future to give information directly to `construct_runtime`. +#[proc_macro_attribute] +pub fn inherent(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned +/// transaction: +/// +/// Item must be defined as: +/// +/// ```ignore +/// #[pallet::validate_unsigned] +/// impl ValidateUnsigned for Pallet { +/// // ... regular trait implementation +/// } +/// ``` +/// +/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type +/// `Pallet`, and some optional where clause. +/// +/// NOTE: There is also the `sp_runtime::traits::SignedExtension` trait that can be used to add +/// some specific logic for transaction validation. +/// +/// ## Macro expansion +/// +/// The macro currently makes no use of this information, but it might use this information in +/// the future to give information directly to `construct_runtime`. +#[proc_macro_attribute] +pub fn validate_unsigned(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + +/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. +/// +/// Item must be either a type alias, an enum, or a struct. It needs to be public. +/// +/// E.g.: +/// +/// ```ignore +/// #[pallet::origin] +/// pub struct Origin(PhantomData<(T)>); +/// ``` +/// +/// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin +/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care +/// as it might require some migration. +/// +/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. +#[proc_macro_attribute] +pub fn origin(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 51aa05261dac3..9b0ee84c34d8a 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -744,7 +744,7 @@ macro_rules! assert_err { /// Assert an expression returns an error specified. /// -/// This can be used on`DispatchResultWithPostInfo` when the post info should +/// This can be used on `DispatchResultWithPostInfo` when the post info should /// be ignored. #[macro_export] macro_rules! assert_err_ignore_postinfo { @@ -1399,6 +1399,7 @@ pub mod pallet_prelude { PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; pub use codec::{Decode, Encode, MaxEncodedLen}; + pub use frame_support::pallet_macros::*; pub use scale_info::TypeInfo; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, @@ -1413,38 +1414,133 @@ pub mod pallet_prelude { pub use sp_weights::Weight; } -/// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. +/// The `pallet` attribute macro defines a pallet that can be used with +/// [`construct_runtime!`]. It must be attached to a module named `pallet` as follows: /// -/// It is define by a module item: /// ```ignore /// #[pallet] /// pub mod pallet { -/// ... +/// ... /// } /// ``` /// -/// Inside the module the macro will parse item with the attribute: `#[pallet::*]`, some -/// attributes are mandatory, some other optional. -/// -/// The attribute are explained with the syntax of non instantiable pallets, to see how pallet -/// with instance work see below example. +/// Note that various types can be automatically imported using +/// [`frame_support::pallet_prelude`] and `frame_system::pallet_prelude`: /// -/// Note various type can be automatically imported using pallet_prelude in frame_support and -/// frame_system: /// ```ignore /// #[pallet] /// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... /// } /// ``` /// -/// # Config trait: `#[pallet::config]` mandatory +/// # pallet::* Attributes +/// +/// The `pallet` macro will parse any items within your `pallet` module that are annotated with +/// `#[pallet::*]` attributes. Some of these attributes are mandatory and some are optional, +/// and they can attach to different types of items within your pallet depending on the +/// attribute in question. The full list of `#[pallet::*]` attributes is shown below in the +/// order in which they are mentioned in this document: +/// +/// * [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) +/// * [`pallet::config`](#config-trait-palletconfig-mandatory) +/// * [`pallet::constant`](#palletconstant) +/// * [`pallet::disable_frame_system_supertrait_check`](#disable_supertrait_check) +/// * [`pallet::generate_store($vis trait Store)`](#palletgenerate_storevis-trait-store) +/// * [`pallet::generate_storage_info`](#palletgenerate_storage_info) +/// * [`pallet::storage_version`](#palletstorage_version) +/// * [`pallet::hooks`](#hooks-pallethooks-optional) +/// * [`pallet::call`](#call-palletcall-optional) +/// * [`pallet::weight($expr)`](#palletweightexpr) +/// * [`pallet::compact`](#palletcompact-some_arg-some_type) +/// * [`pallet::call_index($idx)`](#palletcall_indexidx) +/// * [`pallet::extra_constants`](#extra-constants-palletextra_constants-optional) +/// * [`pallet::error`](#error-palleterror-optional) +/// * [`pallet::event`](#event-palletevent-optional) +/// * [`pallet::generate_deposit($visibility fn +/// deposit_event)`](#palletgenerate_depositvisibility-fn-deposit_event) +/// * [`pallet::storage`](#storage-palletstorage-optional) +/// * [`pallet::getter(fn $my_getter_fn_name)`](#palletgetterfn-my_getter_fn_name-optional) +/// * [`pallet::storage_prefix = "SomeName"`](#palletstorage_prefix--somename-optional) +/// * [`pallet::unbounded`](#palletunbounded-optional) +/// * [`pallet::whitelist_storage`](#palletwhitelist_storage-optional) +/// * [`cfg(..)`](#cfg-for-storage) (on storage items) +/// * [`pallet::type_value`](#type-value-pallettype_value-optional) +/// * [`pallet::genesis_config`](#genesis-config-palletgenesis_config-optional) +/// * [`pallet::genesis_build`](#genesis-build-palletgenesis_build-optional) +/// * [`pallet::inherent`](#inherent-palletinherent-optional) +/// * [`pallet::validate_unsigned`](#validate-unsigned-palletvalidate_unsigned-optional) +/// * [`pallet::origin`](#origin-palletorigin-optional) +/// +/// Note that at compile-time, the `#[pallet]` macro will analyze and expand all of these +/// attributes, ultimately removing their AST nodes before they can be parsed as real +/// attribute macro calls. This means that technically we do not need attribute macro +/// definitions for any of these attributes, however, for consistency and discoverability +/// reasons, we still maintain stub attribute macro definitions for all of these attributes in +/// the [`pallet_macros`] module which is automatically included in all pallets as part of the +/// pallet prelude. The actual "work" for all of these attribute macros can be found in the +/// macro expansion for `#[pallet]`. +/// +/// Also note that in this document, pallet attributes are explained using the syntax of +/// non-instantiable pallets. For an example of an instantiable pallet, see [this +/// example](#example-of-an-instantiable-pallet). +/// +/// # Pallet struct placeholder: `#[pallet::pallet]` (mandatory) +/// +/// The pallet struct placeholder `#[pallet::pallet]` is mandatory and allows you to specify +/// pallet information. +/// +/// The struct must be defined as follows: +/// ```ignore +/// #[pallet::pallet] +/// pub struct Pallet(_); +/// ``` +/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. /// -/// The trait defining generics of the pallet. +/// ## Macro expansion: +/// +/// The macro adds this attribute to the struct definition: +/// ```ignore +/// #[derive( +/// frame_support::CloneNoBound, +/// frame_support::EqNoBound, +/// frame_support::PartialEqNoBound, +/// frame_support::RuntimeDebugNoBound, +/// )] +/// ``` +/// and replaces the type `_` with `PhantomData`. It also implements on the pallet: +/// * [`GetStorageVersion`](`traits::GetStorageVersion`) +/// * [`OnGenesis`](`traits::OnGenesis`): contains some logic to write the pallet version into +/// storage. +/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. +/// +/// It declares `type Module` type alias for `Pallet`, used by `construct_runtime`. +/// +/// It implements [`PalletInfoAccess`](`traits::PalletInfoAccess') on `Pallet` to ease access +/// to pallet information given by [`frame_support::traits::PalletInfo`]. (The implementation +/// uses the associated type `frame_system::Config::PalletInfo`). +/// +/// It implements [`StorageInfoTrait`](`traits::StorageInfoTrait`) on `Pallet` which give +/// information about all storages. +/// +/// If the attribute `generate_store` is set then the macro creates the trait `Store` and +/// implements it on `Pallet`. +/// +/// If the attribute `set_storage_max_encoded_len` is set then the macro calls +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for each storage in the implementation of +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet. Otherwise it implements +/// [`StorageInfoTrait`](`traits::StorageInfoTrait`) for the pallet using the +/// [`PartialStorageInfoTrait`](`traits::PartialStorageInfoTrait`) implementation of storages. +/// +/// # Config trait: `#[pallet::config]` (mandatory) +/// +/// The mandatory attribute `#[pallet::config]` defines the configurable options for the +/// pallet. +/// +/// Item must be defined as: /// -/// Item must be defined as /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config + $optionally_some_other_supertraits @@ -1453,74 +1549,103 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular trait definition named `Config`, with supertrait `frame_system::Config`, -/// optionally other supertrait and where clause. /// -/// The associated type `RuntimeEvent` is reserved, if defined it must bounds -/// `From` and `IsType<::RuntimeEvent>`, see -/// `#[pallet::event]` for more information. +/// I.e. a regular trait definition named `Config`, with the supertrait +/// `frame_system::pallet::Config`, and optionally other supertraits and a where clause. +/// (Specifying other supertraits here is known as [tight +/// coupling](https://docs.substrate.io/reference/how-to-guides/pallet-design/use-tight-coupling/)) +/// +/// The associated type `RuntimeEvent` is reserved. If defined, it must have the bounds +/// `From` and `IsType<::RuntimeEvent>`. +/// +/// [`pallet::event`](`frame_support::pallet_macros::event`) must be present if `RuntimeEvent` +/// exists as a config item in your `#[pallet::config]`. +/// +/// Also see [`pallet::config`](`frame_support::pallet_macros::config`) +/// +/// ## `pallet::constant` +/// +/// The `#[pallet::constant]` attribute can be used to add an associated type trait bounded by +/// [`Get`](crate::traits::Get) from [`pallet::config`](#palletconfig) into metadata, e.g.: /// -/// To put `Get` associated type into metadatas, use the attribute `#[pallet::constant]`, e.g.: /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; +/// #[pallet::constant] +/// type Foo: Get; /// } /// ``` /// +/// Also see [`pallet::constant`](`frame_support::pallet_macros::constant`) +/// +/// ## `pallet::disable_frame_system_supertrait_check` +/// +/// /// To bypass the `frame_system::Config` supertrait check, use the attribute -/// `#[pallet::disable_frame_system_supertrait_check]`, e.g.: +/// `pallet::disable_frame_system_supertrait_check`, e.g.: +/// /// ```ignore /// #[pallet::config] /// #[pallet::disable_frame_system_supertrait_check] /// pub trait Config: pallet_timestamp::Config {} /// ``` /// -/// ### Macro expansion: +/// NOTE: Bypassing the `frame_system::Config` supertrait check is typically desirable when you +/// want to write an alternative to the `frame_system` pallet. /// -/// The macro expand pallet constant metadata with the information given by -/// `#[pallet::constant]`. +/// Also see +/// [`pallet::disable_frame_system_supertrait_check`](`frame_support::pallet_macros::disable_frame_system_supertrait_check`) /// -/// # Pallet struct placeholder: `#[pallet::pallet]` mandatory +/// ## Macro expansion: /// -/// The placeholder struct, on which is implemented pallet informations. +/// The macro expands pallet constant metadata with the information given by +/// `#[pallet::constant]`. /// -/// Item must be defined as followed: -/// ```ignore -/// #[pallet::pallet] -/// pub struct Pallet(_); -/// ``` -/// I.e. a regular struct definition named `Pallet`, with generic T and no where clause. +/// # `pallet::generate_store($vis trait Store)` +/// +/// To generate a `Store` trait associating all storages, annotate your `Pallet` struct with +/// the attribute `#[pallet::generate_store($vis trait Store)]`, e.g.: /// -/// To generate a `Store` trait associating all storages, use the attribute -/// `#[pallet::generate_store($vis trait Store)]`, e.g.: /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_store(pub(super) trait Store)] /// pub struct Pallet(_); /// ``` -/// More precisely the store trait contains an associated type for each storage. It is -/// implemented for `Pallet` allowing to access the storage from pallet struct. +/// More precisely, the `Store` trait contains an associated type for each storage. It is +/// implemented for `Pallet` allowing access to the storage from pallet struct. /// /// Thus when defining a storage named `Foo`, it can later be accessed from `Pallet` using /// `::Foo`. /// +/// NOTE: this attribute is only valid when applied _directly_ to your `Pallet` struct +/// definition. +/// +/// Also see [`pallet::generate_store`](`frame_support::pallet_macros::generate_store`). +/// +/// # `pallet::generate_storage_info` +/// /// To generate the full storage info (used for PoV calculation) use the attribute /// `#[pallet::generate_storage_info]`, e.g.: +/// /// ```ignore /// #[pallet::pallet] /// #[pallet::generate_storage_info] /// pub struct Pallet(_); /// ``` /// -/// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys -/// and value types must bound [`pallet_prelude::MaxEncodedLen`]. -/// Some individual storage can opt-out from this constraint by using `#[pallet::unbounded]`, -/// see `#[pallet::storage]` documentation. +/// This requires all storage items to implement the trait [`traits::StorageInfoTrait`], thus +/// all keys and value types must be bound by [`pallet_prelude::MaxEncodedLen`]. Individual +/// storages can opt-out from this constraint by using `#[pallet::unbounded]` (see +/// `#[pallet::storage]` for more info). /// -/// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to -/// be communicated to the macro. This can be done by using the `storage_version` attribute: +/// Also see [`pallet::generate_storage_info`](`frame_support::pallet_macros::generate_storage_info`) +/// +/// # `pallet::storage_version` +/// +/// Because the [`pallet::pallet`](#pallet-struct-placeholder-palletpallet-mandatory) macro +/// implements [`traits::GetStorageVersion`], the current storage version needs to be +/// communicated to the macro. This can be done by using the `pallet::storage_version` +/// attribute: /// /// ```ignore /// const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); @@ -1532,75 +1657,34 @@ pub mod pallet_prelude { /// /// If not present, the current storage version is set to the default value. /// -/// ### Macro expansion: -/// -/// The macro add this attribute to the struct definition: -/// ```ignore -/// #[derive( -/// frame_support::CloneNoBound, -/// frame_support::EqNoBound, -/// frame_support::PartialEqNoBound, -/// frame_support::RuntimeDebugNoBound, -/// )] -/// ``` -/// and replace the type `_` by `PhantomData`. -/// -/// It implements on pallet: -/// * [`traits::GetStorageVersion`] -/// * [`traits::OnGenesis`]: contains some logic to write pallet version into storage. -/// * `PalletErrorTypeInfo`: provides the type information for the pallet error, if defined. -/// -/// It declares `type Module` type alias for `Pallet`, used by [`construct_runtime`]. -/// -/// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet -/// informations given by [`frame_support::traits::PalletInfo`]. -/// (The implementation uses the associated type `frame_system::Config::PalletInfo`). -/// -/// It implements [`traits::StorageInfoTrait`] on `Pallet` which give information about all -/// storages. -/// -/// If the attribute generate_store is set then the macro creates the trait `Store` and -/// implements it on `Pallet`. -/// -/// If the attribute set_storage_max_encoded_len is set then the macro call -/// [`traits::StorageInfoTrait`] for each storage in the implementation of -/// [`traits::StorageInfoTrait`] for the pallet. -/// Otherwise it implements [`traits::StorageInfoTrait`] for the pallet using the -/// [`traits::PartialStorageInfoTrait`] implementation of storages. +/// Also see [`pallet::storage_version`](`frame_support::pallet_macros::storage_version`) /// -/// # Hooks: `#[pallet::hooks]` optional +/// # Hooks: `#[pallet::hooks]` (optional) /// -/// Implementation of `Hooks` on `Pallet` allowing to define some specific pallet logic. +/// The `pallet::hooks` attribute allows you to specify a `Hooks` implementation for `Pallet` +/// that specifies pallet-specific logic. /// -/// Item must be defined as +/// The item the attribute attaches to must be defined as follows: /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet $optional_where_clause { +/// ... /// } /// ``` /// I.e. a regular trait implementation with generic bound: `T: Config`, for the trait -/// `Hooks>` (they are defined in preludes), for the type `Pallet` -/// and with an optional where clause. +/// `Hooks>` (they are defined in preludes), for the type `Pallet` and +/// with an optional where clause. /// -/// If no `#[pallet::hooks]` exists, then a default implementation corresponding to the -/// following code is automatically generated: +/// If no `#[pallet::hooks]` exists, then the following default implementation is +/// automatically generated: /// ```ignore /// #[pallet::hooks] /// impl Hooks> for Pallet {} /// ``` /// -/// ### Macro expansion: -/// -/// The macro implements the traits `OnInitialize`, `OnIdle`, `OnFinalize`, `OnRuntimeUpgrade`, -/// `OffchainWorker`, `IntegrityTest` using `Hooks` implementation. -/// -/// NOTE: OnRuntimeUpgrade is implemented with `Hooks::on_runtime_upgrade` and some additional -/// logic. E.g. logic to write pallet version into storage. +/// Also see [`pallet::hooks`](`frame_support::pallet_macros::hooks`) /// -/// NOTE: The macro also adds some tracing logic when implementing the above traits. The -/// following hooks emit traces: `on_initialize`, `on_finalize` and `on_runtime_upgrade`. -/// -/// # Call: `#[pallet::call]` optional +/// # Call: `#[pallet::call]` (optional) /// /// Implementation of pallet dispatchables. /// @@ -1622,53 +1706,50 @@ pub mod pallet_prelude { /// } /// ``` /// I.e. a regular type implementation, with generic `T: Config`, on type `Pallet`, with -/// optional where clause. +/// an optional where clause. +/// +/// ## `#[pallet::weight($expr)]` +/// +/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, the +/// first argument must be `origin: OriginFor`. +/// +/// Also see [`pallet::weight`](`frame_support::pallet_macros::weight`) +/// +/// ### `#[pallet::compact] $some_arg: $some_type` +/// +/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must +/// return a `DispatchResultWithPostInfo` or `DispatchResult`. /// -/// Each dispatchable needs to define a weight with `#[pallet::weight($expr)]` attribute, -/// the first argument must be `origin: OriginFor`, compact encoding for argument can be -/// used using `#[pallet::compact]`, function must return `DispatchResultWithPostInfo` or -/// `DispatchResult`. +/// Also see [`pallet::compact`](`frame_support::pallet_macros::compact`) +/// +/// ## `#[pallet::call_index($idx)]` /// /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, -/// which defines and sets the codec index for the dispatchable function in the `Call` enum. +/// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// /// All call indexes start from 0, until it encounters a dispatchable function with a defined /// call index. The dispatchable function that lexically follows the function with a defined /// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` has -/// a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// All arguments must implement `Debug`, `PartialEq`, `Eq`, `Decode`, `Encode`, `Clone`. For -/// ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` +/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. /// -/// If no `#[pallet::call]` exists, then a default implementation corresponding to the -/// following code is automatically generated: -/// ```ignore -/// #[pallet::call] -/// impl Pallet {} -/// ``` -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some must be done with -/// care. Indeed this will change the outer runtime call type (which is an enum with one -/// variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// pallet-scheduler). Thus migration might be needed. To mitigate against some of this, the +/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be +/// done with care. Indeed this will change the outer runtime call type (which is an enum with +/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in +/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the /// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. -/// -/// ### Macro expansion +/// that the `Call` enum encoding does not change after modification. As a general rule of +/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain +/// the existing order of calls. /// -/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: -/// `Clone`, `Eq`, `PartialEq`, `Debug` (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `UnfilteredDispatchable`. +/// Also see [`pallet::call_index`](`frame_support::pallet_macros::call_index`) /// -/// The macro implement the `Callable` trait on `Pallet` and a function `call_functions` which -/// returns the dispatchable metadata. +/// # Extra constants: `#[pallet::extra_constants]` (optional) /// -/// # Extra constants: `#[pallet::extra_constants]` optional -/// -/// Allow to define some extra constants to put into constant metadata. +/// Allows you to define some extra constants to be added into constant metadata. /// /// Item must be defined as: +/// /// ```ignore /// #[pallet::extra_constants] /// impl Pallet where $optional_where_clause { @@ -1679,19 +1760,23 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular rust implement block with some optional where clause and functions with 0 -/// args, 0 generics, and some return type. +/// I.e. a regular rust `impl` block with some optional where clause and functions with 0 args, +/// 0 generics, and some return type. /// -/// ### Macro expansion +/// ## Macro expansion /// -/// The macro add some extra constant to pallet constant metadata. +/// The macro add some extra constants to pallet constant metadata. /// -/// # Error: `#[pallet::error]` optional +/// Also see: [`pallet::extra_constants`](`frame_support::pallet_macros::extra_constants`) /// -/// Allow to define an error type to be return from dispatchable on error. -/// This error type informations are put into metadata. +/// # Error: `#[pallet::error]` (optional) +/// +/// The `#[pallet::error]` attribute allows you to define an error enum that will be returned +/// from the dispatchable when an error occurs. The information for this error type is then +/// stored in metadata. /// /// Item must be defined as: +/// /// ```ignore /// #[pallet::error] /// pub enum Error { @@ -1702,7 +1787,7 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. a regular rust enum named `Error`, with generic `T` and fieldless or multiple-field +/// I.e. a regular enum named `Error`, with generic `T` and fieldless or multiple-field /// variants. /// /// Any field type in the enum variants must implement [`scale_info::TypeInfo`] in order to be @@ -1710,29 +1795,28 @@ pub mod pallet_prelude { /// preferably 1 byte in size in order to reduce storage size. The error enum itself has an /// absolute maximum encoded size specified by [`MAX_MODULE_ERROR_ENCODED_SIZE`]. /// +/// (1 byte can still be 256 different errors. The more specific the error, the easier it is to +/// diagnose problems and give a better experience to the user. Don't skimp on having lots of +/// individual error conditions.) +/// /// Field types in enum variants must also implement [`PalletError`](traits::PalletError), /// otherwise the pallet will fail to compile. Rust primitive types have already implemented /// the [`PalletError`](traits::PalletError) trait along with some commonly used stdlib types -/// such as `Option` and `PhantomData`, and hence in most use cases, a manual implementation is -/// not necessary and is discouraged. +/// such as [`Option`] and [`PhantomData`](`frame_support::dispatch::marker::PhantomData`), and +/// hence in most use cases, a manual implementation is not necessary and is discouraged. /// -/// The generic `T` mustn't bound anything and where clause is not allowed. But bounds and -/// where clause shouldn't be needed for any usecase. +/// The generic `T` must not bound anything and a `where` clause is not allowed. That said, +/// bounds and/or a where clause should not needed for any use-case. /// -/// ### Macro expansion +/// Also see: [`pallet::error`](`frame_support::pallet_macros::error`) /// -/// The macro implements `Debug` trait and functions `as_u8` using variant position, and -/// `as_str` using variant doc. +/// # Event: `#[pallet::event]` (optional) /// -/// The macro implements `From>` for `&'static str`. -/// The macro implements `From>` for `DispatchError`. +/// Allows you to define pallet events. Pallet events are stored under the `system` / `events` +/// key when the block is applied (and then replaced when the next block writes it's events). /// -/// # Event: `#[pallet::event]` optional +/// The Event enum must be defined as follows: /// -/// Allow to define pallet events, pallet events are stored in the block when they deposited -/// (and removed in next block). -/// -/// Item is defined as: /// ```ignore /// #[pallet::event] /// #[pallet::generate_deposit($visibility fn deposit_event)] // Optional @@ -1742,157 +1826,174 @@ pub mod pallet_prelude { /// ... /// } /// ``` -/// I.e. an enum (with named or unnamed fields variant), named Event, with generic: none or `T` -/// or `T: Config`, and optional where clause. /// -/// Each field must implement `Clone`, `Eq`, `PartialEq`, `Encode`, `Decode`, and `Debug` (on -/// std only). -/// For ease of use, bound the trait `Member` available in frame_support::pallet_prelude. +/// I.e. an enum (with named or unnamed fields variant), named `Event`, with generic: none or +/// `T` or `T: Config`, and optional w here clause. /// -/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generate a helper -/// function on `Pallet` to deposit event. +/// Each field must implement [`Clone`], [`Eq`], [`PartialEq`], [`Encode`], [`Decode`], and +/// [`Debug`] (on std only). For ease of use, bound by the trait +/// [`Member`](`frame_support::pallet_prelude::Member`), available in +/// frame_support::pallet_prelude. /// -/// NOTE: For instantiable pallet, event must be generic over T and I. +/// Also see [`pallet::event`](`frame_support::pallet_macros::event`) /// -/// ### Macro expansion: +/// ## `#[pallet::generate_deposit($visibility fn deposit_event)]` /// -/// Macro will add on enum `Event` the attributes: -/// * `#[derive(frame_support::CloneNoBound)]`, -/// * `#[derive(frame_support::EqNoBound)]`, -/// * `#[derive(frame_support::PartialEqNoBound)]`, -/// * `#[derive(codec::Encode)]`, -/// * `#[derive(codec::Decode)]`, -/// * `#[derive(frame_support::RuntimeDebugNoBound)]` +/// The attribute `#[pallet::generate_deposit($visibility fn deposit_event)]` generates a +/// helper function on `Pallet` that handles deposit events. /// -/// Macro implements `From>` for (). +/// NOTE: For instantiable pallets, the event must be generic over `T` and `I`. /// -/// Macro implements metadata function on `Event` returning the `EventMetadata`. +/// Also see [`pallet::generate_deposit`](`frame_support::pallet_macros::generate_deposit`) /// -/// If `#[pallet::generate_deposit]` then macro implement `fn deposit_event` on `Pallet`. +/// # Storage: `#[pallet::storage]` (optional) /// -/// # Storage: `#[pallet::storage]` optional +/// The `#[pallet::storage]` attribute lets you define some abstract storage inside of runtime +/// storage and also set its metadata. This attribute can be used multiple times. /// -/// Allow to define some abstract storage inside runtime storage and also set its metadata. -/// This attribute can be used multiple times. +/// Item should be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<$generic_name = $some_generics, $other_name = $some_other, ...>; /// ``` -/// or with unnamed generic +/// +/// or with unnamed generic: +/// /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn $getter_name)] // optional /// $vis type $StorageName<$some_generic> $optional_where_clause /// = $StorageType<_, $some_generics, ...>; /// ``` -/// I.e. it must be a type alias, with generics: `T` or `T: Config`, aliased type must be one -/// of `StorageValue`, `StorageMap` or `StorageDoubleMap` (defined in frame_support). -/// The generic arguments of the storage type can be given in two manner: named and unnamed. -/// For named generic argument: the name for each argument is the one as define on the storage -/// struct: -/// * [`pallet_prelude::StorageValue`] expect `Value` and optionally `QueryKind` and `OnEmpty`, -/// * [`pallet_prelude::StorageMap`] expect `Hasher`, `Key`, `Value` and optionally `QueryKind` -/// and `OnEmpty`, -/// * [`pallet_prelude::CountedStorageMap`] expect `Hasher`, `Key`, `Value` and optionally -/// `QueryKind` and `OnEmpty`, -/// * [`pallet_prelude::StorageDoubleMap`] expect `Hasher1`, `Key1`, `Hasher2`, `Key2`, `Value` -/// and optionally `QueryKind` and `OnEmpty`. -/// -/// For unnamed generic argument: Their first generic must be `_` as it is replaced by the -/// macro and other generic must declared as a normal declaration of type generic in rust. -/// -/// The Prefix generic written by the macro is generated using -/// `PalletInfo::name::>()` and the name of the storage type. -/// E.g. if runtime names the pallet "MyExample" then the storage `type Foo = ...` use the -/// prefix: `Twox128(b"MyExample") ++ Twox128(b"Foo")`. /// -/// For the `CountedStorageMap` variant, the Prefix also implements -/// `CountedStorageMapInstance`. It associate a `CounterPrefix`, which is implemented same as -/// above, but the storage prefix is prepend with `"CounterFor"`. -/// E.g. if runtime names the pallet "MyExample" then the storage -/// `type Foo = CountedStorageaMap<...>` will store its counter at the prefix: -/// `Twox128(b"MyExample") ++ Twox128(b"CounterForFoo")`. +/// I.e. it must be a type alias, with generics: `T` or `T: Config`. The aliased type must be +/// one of [`StorageValue`](`pallet_prelude::StorageValue`), +/// [`StorageMap`](`pallet_prelude::StorageMap`) or +/// [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`). The generic arguments of the +/// storage type can be given in two manners: named and unnamed. For named generic arguments, +/// the name for each argument should match the name defined for it on the storage struct: +/// * [`StorageValue`](`pallet_prelude::StorageValue`) expects `Value` and optionally +/// `QueryKind` and `OnEmpty`, +/// * [`StorageMap`](`pallet_prelude::StorageMap`) expects `Hasher`, `Key`, `Value` and +/// optionally `QueryKind` and `OnEmpty`, +/// * [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) expects `Hasher`, `Key`, +/// `Value` and optionally `QueryKind` and `OnEmpty`, +/// * [`StorageDoubleMap`](`pallet_prelude::StorageDoubleMap`) expects `Hasher1`, `Key1`, +/// `Hasher2`, `Key2`, `Value` and optionally `QueryKind` and `OnEmpty`. +/// +/// For unnamed generic arguments: Their first generic must be `_` as it is replaced by the +/// macro and other generic must declared as a normal generic type declaration. +/// +/// The `Prefix` generic written by the macro is generated using +/// `PalletInfo::name::>()` and the name of the storage type. E.g. if runtime names +/// the pallet "MyExample" then the storage `type Foo = ...` should use the prefix: +/// `Twox128(b"MyExample") ++ Twox128(b"Foo")`. +/// +/// For the [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`) variant, the `Prefix` +/// also implements +/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`). +/// It also associates a [`CounterPrefix`](`pallet_prelude::CounterPrefix'), which is +/// implemented the same as above, but the storage prefix is prepend with `"CounterFor"`. E.g. +/// if runtime names the pallet "MyExample" then the storage `type Foo = +/// CountedStorageaMap<...>` will store its counter at the prefix: `Twox128(b"MyExample") ++ +/// Twox128(b"CounterForFoo")`. /// /// E.g: +/// /// ```ignore /// #[pallet::storage] /// pub(super) type MyStorage = StorageMap; /// ``` -/// In this case the final prefix used by the map is -/// `Twox128(b"MyExample") ++ Twox128(b"OtherName")`. /// -/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows to define a +/// In this case the final prefix used by the map is `Twox128(b"MyExample") ++ +/// Twox128(b"OtherName")`. +/// +/// Also see [`pallet::storage`](`frame_support::pallet_macros::storage`) +/// +/// ## `#[pallet::getter(fn $my_getter_fn_name)]` (optional) +/// +/// The optional attribute `#[pallet::getter(fn $my_getter_fn_name)]` allows you to define a /// getter function on `Pallet`. /// -/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allow to define the storage -/// prefix to use, see how `Prefix` generic is implemented above. +/// Also see [`pallet::getter`](`frame_support::pallet_macros::getter`) +/// +/// ## `#[pallet::storage_prefix = "SomeName"]` (optional) +/// +/// The optional attribute `#[pallet::storage_prefix = "SomeName"]` allows you to define the +/// storage prefix to use, see how `Prefix` generic is implemented above. This is helpful if +/// you wish to rename the storage field but don't want to perform a migration. /// /// E.g: +/// /// ```ignore /// #[pallet::storage] /// #[pallet::storage_prefix = "foo"] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap; /// ``` +/// /// or +/// /// ```ignore /// #[pallet::storage] /// #[pallet::getter(fn my_storage)] /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// -/// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. -/// When implementating the storage info (when `#[pallet::generate_storage_info]` is specified -/// on the pallet struct placeholder), the size of the storage will be declared as unbounded. -/// This can be useful for storage which can never go into PoV (Proof of Validity). +/// Also see [`pallet::storage_prefix`](`frame_support::pallet_macros::storage_prefix`) +/// +/// ## `#[pallet::unbounded]` (optional) +/// +/// The optional attribute `#[pallet::unbounded]` declares the storage as unbounded. When +/// implementating the storage info (when `#[pallet::generate_storage_info]` is specified on +/// the pallet struct placeholder), the size of the storage will be declared as unbounded. This +/// can be useful for storage which can never go into PoV (Proof of Validity). +/// +/// Also see [`pallet::unbounded`](`frame_support::pallet_macros::unbounded`) +/// +/// ## `#[pallet::whitelist_storage]` (optional) /// +/// The optional attribute `#[pallet::whitelist_storage]` will declare the storage as +/// whitelisted from benchmarking. +/// +/// See +/// [`pallet::whitelist_storage`](frame_support::pallet_macros::whitelist_storage) +/// for more info. +/// +/// ## `#[cfg(..)]` (for storage) /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// /// E.g: +/// /// ```ignore /// #[cfg(feature = "my-feature")] /// #[pallet::storage] /// pub(super) type MyStorage = StorageValue; /// ``` /// -/// The optional attribute `#[pallet::whitelist_storage]` will declare the -/// storage as whitelisted from benchmarking. Doing so will exclude reads of -/// that value's storage key from counting towards weight calculations during -/// benchmarking. -/// -/// This attribute should only be attached to storages that are known to be -/// read/used in every block. This will result in a more accurate benchmarking weight. -/// -/// ### Example -/// ```ignore -/// #[pallet::storage] -/// #[pallet::whitelist_storage] -/// pub(super) type Number = StorageValue<_, T::BlockNumber, ValueQuery>; -/// ``` -/// /// All the `cfg` attributes are automatically copied to the items generated for the storage, /// i.e. the getter, storage prefix, and the metadata element etc. /// /// Any type placed as the `QueryKind` parameter must implement /// [`frame_support::storage::types::QueryKindTrait`]. There are 3 implementations of this /// trait by default: -/// 1. [`frame_support::storage::types::OptionQuery`], the default `QueryKind` used when this -/// type parameter is omitted. Specifying this as the `QueryKind` would cause storage map -/// APIs that return a `QueryKind` to instead return an `Option`, returning `Some` when a -/// value does exist under a specified storage key, and `None` otherwise. -/// 2. [`frame_support::storage::types::ValueQuery`] causes storage map APIs that return a -/// `QueryKind` to instead return the value type. In cases where a value does not exist -/// under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is used -/// to return an appropriate value. -/// 3. [`frame_support::storage::types::ResultQuery`] causes storage map APIs that return a -/// `QueryKind` to instead return a `Result`, with `T` being the value type and `E` -/// being the pallet error type specified by the `#[pallet::error]` attribute. In cases -/// where a value does not exist under a specified storage key, an `Err` with the specified -/// pallet error variant is returned. +/// +/// 1. [`OptionQuery`](`frame_support::storage::types::OptionQuery`), the default `QueryKind` +/// used when this type parameter is omitted. Specifying this as the `QueryKind` would cause +/// storage map APIs that return a `QueryKind` to instead return an [`Option`], returning +/// `Some` when a value does exist under a specified storage key, and `None` otherwise. +/// 2. [`ValueQuery`](`frame_support::storage::types::ValueQuery`) causes storage map APIs that +/// return a `QueryKind` to instead return the value type. In cases where a value does not +/// exist under a specified storage key, the `OnEmpty` type parameter on `QueryKindTrait` is +/// used to return an appropriate value. +/// 3. [`ResultQuery`](`frame_support::storage::types::ResultQuery`) causes storage map APIs +/// that return a `QueryKind` to instead return a `Result`, with `T` being the value +/// type and `E` being the pallet error type specified by the `#[pallet::error]` attribute. +/// In cases where a value does not exist under a specified storage key, an `Err` with the +/// specified pallet error variant is returned. /// /// NOTE: If the `QueryKind` generic parameter is still generic at this stage or is using some /// type alias then the generation of the getter might fail. In this case the getter can be @@ -1902,60 +2003,62 @@ pub mod pallet_prelude { /// usable at all). We use [`StorageHasher::METADATA`] for the metadata of the hasher of the /// storage item. Thus generic hasher is supported. /// -/// ### Macro expansion +/// ## Macro expansion /// /// For each storage item the macro generates a struct named /// `_GeneratedPrefixForStorage$NameOfStorage`, and implements /// [`StorageInstance`](traits::StorageInstance) on it using the pallet and storage name. It -/// then uses it as the first generic of the aliased type. -/// For `CountedStorageMap`, `CountedStorageMapInstance` is implemented, and another similar -/// struct is generated. +/// then uses it as the first generic of the aliased type. For +/// [`CountedStorageMap`](`pallet_prelude::CountedStorageMap`), +/// [`CountedStorageMapInstance`](`frame_support::storage::types::CountedStorageMapInstance`) +/// is implemented, and another similar struct is generated. /// -/// For named generic, the macro will reorder the generics, and remove the names. +/// For a named generic, the macro will reorder the generics, and remove the names. /// -/// The macro implements the function `storage_metadata` on `Pallet` implementing the metadata -/// for all storage items based on their kind: +/// The macro implements the function `storage_metadata` on the `Pallet` implementing the +/// metadata for all storage items based on their kind: /// * for a storage value, the type of the value is copied into the metadata /// * for a storage map, the type of the values and the key's type is copied into the metadata -/// * for a storage double map, the type of the values, and the types of key1 and key2 are +/// * for a storage double map, the type of the values, and the types of `key1` and `key2` are /// copied into the metadata. /// -/// # Type value: `#[pallet::type_value]` optional +/// # Type value: `#[pallet::type_value]` (optional) +/// +/// The `#[pallet::type_value]` attribute lets you define a struct implementing the +/// [`Get`](crate::traits::Get) trait to ease use of storage types. This attribute is meant to +/// be used alongside [`#[pallet::storage]`](#storage-palletstorage-optional) to define a +/// storage's default value. This attribute can be used multiple times. /// -/// Helper to define a struct implementing `Get` trait. To ease use of storage types. -/// This attribute can be used multiple time. +/// Item must be defined as: /// -/// Item is defined as /// ```ignore /// #[pallet::type_value] /// fn $MyDefaultName<$some_generic>() -> $default_type $optional_where_clause { $expr } /// ``` +/// /// I.e.: a function definition with generics none or `T: Config` and a returned type. /// /// E.g.: +/// /// ```ignore /// #[pallet::type_value] /// fn MyDefault() -> T::Balance { 3.into() } /// ``` /// -/// NOTE: This attribute is meant to be used alongside `#[pallet::storage]` to defined some -/// specific default value in storage. -/// -/// ### Macro expansion -/// -/// Macro renames the function to some internal name, generate a struct with the original name -/// of the function and its generic, and implement `Get<$ReturnType>` by calling the user -/// defined function. +/// Also see [`pallet::type_value`](`frame_support::pallet_macros::type_value`) /// -/// # Genesis config: `#[pallet::genesis_config]` optional +/// # Genesis config: `#[pallet::genesis_config]` (optional) /// -/// Allow to define the genesis configuration of the pallet. +/// The `#[pallet::genesis_config]` attribute allows you to define the genesis configuration +/// for the pallet. /// -/// Item is defined as either an enum or a struct. -/// It needs to be public and implement trait GenesisBuild with `#[pallet::genesis_build]`. -/// The type generics is constrained to be either none, or `T` or `T: Config`. +/// Item is defined as either an enum or a struct. It needs to be public and implement the +/// trait [`GenesisBuild`](`traits::GenesisBuild`) with +/// [`#[pallet::genesis_build]`](#genesis-build-palletgenesis_build-optional). The type +/// generics are constrained to be either none, or `T` or `T: Config`. /// /// E.g: +/// /// ```ignore /// #[pallet::genesis_config] /// pub struct GenesisConfig { @@ -1963,31 +2066,28 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ### Macro expansion +/// Also see [`pallet::genesis_config`](`frame_support::pallet_macros::genesis_config`) /// -/// Macro will add the following attribute on it: -/// * `#[cfg(feature = "std")]` -/// * `#[derive(Serialize, Deserialize)]` -/// * `#[serde(rename_all = "camelCase")]` -/// * `#[serde(deny_unknown_fields)]` -/// * `#[serde(bound(serialize = ""))]` -/// * `#[serde(bound(deserialize = ""))]` +/// # Genesis build: `#[pallet::genesis_build]` (optional) /// -/// # Genesis build: `#[pallet::genesis_build]` optional +/// The `#[pallet::genesis_build]` attribute allows you to define how `genesis_configuration` +/// is built. This takes as input the `GenesisConfig` type (as `self`) and constructs the +/// pallet's initial state. /// -/// Allow to define how genesis_configuration is built. +/// The impl must be defined as: /// -/// Item is defined as /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig<$maybe_generics> { /// fn build(&self) { $expr } /// } /// ``` -/// I.e. a rust trait implementation with generic `T: Config`, of trait `GenesisBuild` on +/// +/// I.e. a trait implementation with generic `T: Config`, of trait `GenesisBuild` on /// type `GenesisConfig` with generics none or `T`. /// /// E.g.: +/// /// ```ignore /// #[pallet::genesis_build] /// impl GenesisBuild for GenesisConfig { @@ -1995,87 +2095,93 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ### Macro expansion +/// Also see [`pallet::genesis_build`](`frame_support::pallet_macros::genesis_build`) /// -/// Macro will add the following attribute on it: -/// * `#[cfg(feature = "std")]` +/// # Inherent: `#[pallet::inherent]` (optional) /// -/// Macro will implement `sp_runtime::BuildModuleGenesisStorage` using `()` as second generic -/// for non-instantiable pallets. +/// The `#[pallet::inherent]` attribute allows the pallet to provide some +/// [inherent](https://docs.substrate.io/fundamentals/transaction-types/#inherent-transactions). +/// An inherent is some piece of data that is inserted by a block authoring node at block +/// creation time and can either be accepted or rejected by validators based on whether the +/// data falls within an acceptable range. /// -/// # Inherent: `#[pallet::inherent]` optional +/// The most common inherent is the `timestamp` that is inserted into every block. Since there +/// is no way to validate timestamps, validators simply check that the timestamp reported by +/// the block authoring node falls within an acceptable range. /// -/// Allow the pallet to provide some inherent: +/// Item must be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::inherent] /// impl ProvideInherent for Pallet { /// // ... regular trait implementation /// } /// ``` -/// I.e. a trait implementation with bound `T: Config`, of trait `ProvideInherent` for type -/// `Pallet`, and some optional where clause. /// -/// ### Macro expansion +/// I.e. a trait implementation with bound `T: Config`, of trait +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) for type `Pallet`, and some +/// optional where clause. +/// +/// Also see [`pallet::inherent`](`frame_support::pallet_macros::inherent`) /// -/// Macro make currently no use of this information, but it might use this information in the -/// future to give information directly to construct_runtime. +/// # Validate unsigned: `#[pallet::validate_unsigned]` (optional) /// -/// # Validate unsigned: `#[pallet::validate_unsigned]` optional +/// The `#[pallet::validate_unsigned]` attribute allows the pallet to validate some unsigned +/// transaction: /// -/// Allow the pallet to validate some unsigned transaction: +/// Item must be defined as: /// -/// Item is defined as: /// ```ignore /// #[pallet::validate_unsigned] /// impl ValidateUnsigned for Pallet { /// // ... regular trait implementation /// } /// ``` -/// I.e. a trait implementation with bound `T: Config`, of trait `ValidateUnsigned` for type -/// `Pallet`, and some optional where clause. /// -/// NOTE: There is also `sp_runtime::traits::SignedExtension` that can be used to add some -/// specific logic for transaction validation. +/// I.e. a trait implementation with bound `T: Config`, of trait +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) for type `Pallet`, and some +/// optional where clause. /// -/// ### Macro expansion +/// NOTE: There is also the [`sp_runtime::traits::SignedExtension`] trait that can be used to +/// add some specific logic for transaction validation. /// -/// Macro make currently no use of this information, but it might use this information in the -/// future to give information directly to construct_runtime. +/// Also see [`pallet::validate_unsigned`](`frame_support::pallet_macros::validate_unsigned`) /// -/// # Origin: `#[pallet::origin]` optional +/// # Origin: `#[pallet::origin]` (optional) /// -/// Allow to define some origin for the pallet. +/// The `#[pallet::origin]` attribute allows you to define some origin for the pallet. /// -/// Item must be either a type alias or an enum or a struct. It needs to be public. +/// Item must be either a type alias, an enum, or a struct. It needs to be public. /// /// E.g.: +/// /// ```ignore /// #[pallet::origin] /// pub struct Origin(PhantomData<(T)>); /// ``` /// /// **WARNING**: modifying origin changes the outer runtime origin. This outer runtime origin -/// can be stored on-chain (e.g. in pallet-scheduler), thus any change must be done with care +/// can be stored on-chain (e.g. in `pallet-scheduler`), thus any change must be done with care /// as it might require some migration. /// -/// NOTE: for instantiable pallet, origin must be generic over T and I. +/// NOTE: for instantiable pallets, the origin must be generic over `T` and `I`. /// -/// # General notes on instantiable pallet +/// Also see [`pallet::origin`](`frame_support::pallet_macros::origin`) /// -/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allow runtime -/// to implement multiple instance of the pallet, by using different type for the generic. -/// This is the sole purpose of the generic `I`. -/// But because `PalletInfo` requires `Pallet` placeholder to be static it is important to -/// bound `'static` whenever `PalletInfo` can be used. -/// And in order to have instantiable pallet usable as a regular pallet without instance, it is -/// important to bound `= ()` on every types. +/// # General notes on instantiable pallets /// -/// Thus impl bound look like `impl, I: 'static>`, and types look like +/// An instantiable pallet is one where Config is generic, i.e. `Config`. This allows +/// runtime to implement multiple instances of the pallet, by using different types for the +/// generic. This is the sole purpose of the generic `I`, but because +/// [`PalletInfo`](`traits::PalletInfo`) requires the `Pallet` placeholder to be static, it is +/// important to bound by `'static` whenever [`PalletInfo`](`traits::PalletInfo`) can be used. +/// Additionally, in order to make an instantiable pallet usable as a regular pallet without an +/// instance, it is important to bound by `= ()` on every type. +/// +/// Thus impl bound looks like `impl, I: 'static>`, and types look like /// `SomeType` or `SomeType, I: 'static = ()>`. /// -/// # Example for pallet without instance. +/// # Example of a non-instantiable pallet /// /// ``` /// pub use pallet::*; // reexport in crate namespace for `construct_runtime!` @@ -2269,7 +2375,7 @@ pub mod pallet_prelude { /// } /// ``` /// -/// # Example for pallet with instance. +/// # Example of an instantiable pallet /// /// ``` /// pub use pallet::*; @@ -2400,28 +2506,28 @@ pub mod pallet_prelude { /// } /// ``` /// -/// ## Upgrade guidelines: +/// # Upgrade guidelines /// /// 1. Export the metadata of the pallet for later checks /// - run your node with the pallet active /// - query the metadata using the `state_getMetadata` RPC and curl, or use `subsee -p /// > meta.json` -/// 2. generate the template upgrade for the pallet provided by decl_storage -/// with environment variable `PRINT_PALLET_UPGRADE`: -/// `PRINT_PALLET_UPGRADE=1 cargo check -p my_pallet` This template can be -/// used as information it contains all information for storages, genesis -/// config and genesis build. -/// 3. reorganize pallet to have trait `Config`, `decl_*` macros, `ValidateUnsigned`, -/// `ProvideInherent`, `Origin` all together in one file. Suggested order: -/// * Config, -/// * decl_module, -/// * decl_event, -/// * decl_error, -/// * decl_storage, -/// * origin, -/// * validate_unsigned, -/// * provide_inherent, -/// so far it should compile and all be correct. +/// 2. Generate the template upgrade for the pallet provided by `decl_storage` with the +/// environment variable `PRINT_PALLET_UPGRADE`: `PRINT_PALLET_UPGRADE=1 cargo check -p +/// my_pallet`. This template can be used as it contains all information for storages, +/// genesis config and genesis build. +/// 3. Reorganize the pallet to have the trait `Config`, `decl_*` macros, +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`), +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`), and Origin` all together in one +/// file. Suggested order: +/// * `Config`, +/// * `decl_module`, +/// * `decl_event`, +/// * `decl_error`, +/// * `decl_storage`, +/// * `origin`, +/// * `validate_unsigned`, +/// * `provide_inherent`, so far it should compile and all be correct. /// 4. start writing the new pallet module /// ```ignore /// pub use pallet::*; @@ -2441,16 +2547,17 @@ pub mod pallet_prelude { /// } /// ``` /// 5. **migrate Config**: move trait into the module with -/// * all const in decl_module to `#[pallet::constant]` -/// * add bound `IsType<::RuntimeEvent>` to `type RuntimeEvent` +/// * all const in `decl_module` to [`#[pallet::constant]`](#palletconstant) +/// * add the bound `IsType<::RuntimeEvent>` to `type +/// RuntimeEvent` /// 7. **migrate decl_module**: write: /// ```ignore /// #[pallet::hooks] /// impl Hooks for Pallet { /// } /// ``` -/// and write inside -/// `on_initialize`, `on_finalize`, `on_runtime_upgrade`, `offchain_worker`, `integrity_test`. +/// and write inside `on_initialize`, `on_finalize`, `on_runtime_upgrade`, +/// `offchain_worker`, and `integrity_test`. /// /// then write: /// ```ignore @@ -2458,25 +2565,26 @@ pub mod pallet_prelude { /// impl Pallet { /// } /// ``` -/// and write inside all the calls in decl_module with a few changes in the signature: -/// - origin must now be written completely, e.g. `origin: OriginFor` -/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you -/// might -/// need to put `Ok(().into())` at the end or the function. -/// - `#[compact]` must now be written `#[pallet::compact]` -/// - `#[weight = ..]` must now be written `#[pallet::weight(..)]` -/// -/// 7. **migrate event**: -/// rewrite as a simple enum under with the attribute `#[pallet::event]`, -/// use `#[pallet::generate_deposit($vis fn deposit_event)]` to generate deposit_event, -/// 8. **migrate error**: rewrite it with attribute `#[pallet::error]`. -/// 9. **migrate storage**: -/// decl_storage provide an upgrade template (see 3.). All storages, genesis config, genesis -/// build and default implementation of genesis config can be taken from it directly. -/// -/// Otherwise here is the manual process: -/// -/// first migrate the genesis logic. write: +/// and write inside all the calls in `decl_module` with a few changes in the signature: +/// - origin must now be written completely, e.g. `origin: OriginFor` +/// - result type must be `DispatchResultWithPostInfo`, you need to write it and also you +/// might need to put `Ok(().into())` at the end or the function. +/// - `#[compact]` must now be written +/// [`#[pallet::compact]`](#palletcompact-some_arg-some_type) +/// - `#[weight = ..]` must now be written [`#[pallet::weight(..)]`](#palletweightexpr) +/// +/// 7. **migrate event**: rewrite as a simple enum with the attribute +/// [`#[pallet::event]`](#event-palletevent-optional), use [`#[pallet::generate_deposit($vis +/// fn deposit_event)]`](#event-palletevent-optional) to generate `deposit_event`, +/// 8. **migrate error**: rewrite it with attribute +/// [`#[pallet::error]`](#error-palleterror-optional). +/// 9. **migrate storage**: `decl_storage` provide an upgrade template (see 3.). All storages, +/// genesis config, genesis build and default implementation of genesis config can be +/// taken from it directly. +/// +/// Otherwise here is the manual process: +/// +/// first migrate the genesis logic. write: /// ```ignore /// #[pallet::genesis_config] /// struct GenesisConfig { @@ -2494,79 +2602,85 @@ pub mod pallet_prelude { /// } /// } /// ``` -/// for each storages, if it contains config(..) then add a fields, and make its default to the -/// value in `= ..;` or the type default if none, if it contains no build then also add the -/// logic to build the value. -/// for each storages if it contains build(..) then add the logic to genesis_build. -/// -/// NOTE: in decl_storage: is executed first the individual config and build and at the end the -/// add_extra_genesis build -/// -/// Once this is done you can migrate storage individually, a few notes: -/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, -/// - for storage with `get(fn ..)` use `#[pallet::getter(fn ...)]` -/// - for storage with value being `Option<$something>` make generic `Value` being -/// `$something` -/// and generic `QueryKind` being `OptionQuery` (note: this is default). Otherwise make -/// `Value` the complete value type and `QueryKind` being `ValueQuery`. -/// - for storage with default value: `= $expr;` provide some specific OnEmpty generic. To do -/// so -/// use of `#[pallet::type_value]` to generate the wanted struct to put. -/// example: `MyStorage: u32 = 3u32` would be written: -/// ```ignore +/// for each storage, if it contains `config(..)` then add fields, and make it default to +/// the value in `= ..;` or the type default if none, if it contains no build then also add +/// the logic to build the value. for each storage if it contains `build(..)` then add the +/// logic to `genesis_build`. +/// +/// NOTE: within `decl_storage`: the individual config is executed first, followed by the +/// build and finally the `add_extra_genesis` build. +/// +/// Once this is done you can migrate storages individually, a few notes: +/// - for private storage use `pub(crate) type ` or `pub(super) type` or nothing, +/// - for storages with `get(fn ..)` use [`#[pallet::getter(fn +/// ...)]`](#palletgetterfn-my_getter_fn_name-optional) +/// - for storages with value being `Option<$something>` make generic `Value` being +/// `$something` and generic `QueryKind` being `OptionQuery` (note: this is default). +/// Otherwise make `Value` the complete value type and `QueryKind` being `ValueQuery`. +/// - for storages with default value: `= $expr;` provide some specific `OnEmpty` generic. +/// To do so use of `#[pallet::type_value]` to generate the wanted struct to put. +/// example: `MyStorage: u32 = 3u32` would be written: +/// +/// ```ignore /// #[pallet::type_value] fn MyStorageOnEmpty() -> u32 { 3u32 } /// #[pallet::storage] /// pub(super) type MyStorage = StorageValue<_, u32, ValueQuery, MyStorageOnEmpty>; /// ``` /// -/// NOTE: `decl_storage` also generates functions `assimilate_storage` and `build_storage` -/// directly on GenesisConfig, those are sometimes used in tests. In order not to break they -/// can be implemented manually, one can implement those functions by calling `GenesisBuild` -/// implementation. -/// -/// 10. **migrate origin**: move the origin to the pallet module under `#[pallet::origin]` -/// 11. **migrate validate_unsigned**: move the `ValidateUnsigned` implementation to the pallet -/// module under `#[pallet::validate_unsigned]` -/// 12. **migrate provide_inherent**: move the `ProvideInherent` implementation to the pallet -/// module under `#[pallet::inherent]` +/// NOTE: `decl_storage` also generates the functions `assimilate_storage` and +/// `build_storage` directly on `GenesisConfig`, and these are sometimes used in tests. +/// In order not to break they can be implemented manually, one can implement those +/// functions by calling the `GenesisBuild` implementation. +/// 10. **migrate origin**: move the origin to the pallet module to be under a +/// [`#[pallet::origin]`](#origin-palletorigin-optional) attribute +/// 11. **migrate validate_unsigned**: move the +/// [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) implementation to the pallet +/// module under a +/// [`#[pallet::validate_unsigned]`](#validate-unsigned-palletvalidate_unsigned-optional) +/// attribute +/// 12. **migrate provide_inherent**: move the +/// [`ProvideInherent`](`pallet_prelude::ProvideInherent`) implementation to the pallet +/// module under a [`#[pallet::inherent]`](#inherent-palletinherent-optional) attribute /// 13. rename the usage of `Module` to `Pallet` inside the crate. -/// 14. migration is done, now double check migration with the checking migration guidelines. +/// 14. migration is done, now double check the migration with the checking migration +/// guidelines shown below. /// -/// ## Checking upgrade guidelines: +/// # Checking upgrade guidelines: /// /// * compare metadata. Use [subsee](https://github.com/ascjones/subsee) to fetch the metadata -/// and do a diff of the resulting json before and after migration. This checks for: -/// * call, names, signature, docs -/// * event names, docs -/// * error names, docs -/// * storage names, hasher, prefixes, default value -/// * error , error, constant, +/// and do a diff of the resulting json before and after migration. This checks for: +/// * call, names, signature, docs +/// * event names, docs +/// * error names, docs +/// * storage names, hasher, prefixes, default value +/// * error, error, constant /// * manually check that: -/// * `Origin` is moved inside the macro under `#[pallet::origin]` if it exists -/// * `ValidateUnsigned` is moved inside the macro under `#[pallet::validate_unsigned)]` if it -/// exists -/// * `ProvideInherent` is moved inside macro under `#[pallet::inherent)]` if it exists -/// * `on_initialize`/`on_finalize`/`on_runtime_upgrade`/`offchain_worker` are moved to -/// `Hooks` -/// implementation -/// * storages with `config(..)` are converted to `GenesisConfig` field, and their default is -/// `= $expr;` if the storage have default value -/// * storages with `build($expr)` or `config(..)` are built in `GenesisBuild::build` -/// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct -/// default if specified -/// * `add_extra_genesis` build is written into `GenesisBuild::build` +/// * `Origin` was moved inside the macro under +/// [`#[pallet::origin]`](#origin-palletorigin-optional) if it exists +/// * [`ValidateUnsigned`](`pallet_prelude::ValidateUnsigned`) was moved inside the macro +/// under +/// [`#[pallet::validate_unsigned)]`](#validate-unsigned-palletvalidate_unsigned-optional) +/// if it exists +/// * [`ProvideInherent`](`pallet_prelude::ProvideInherent`) was moved inside the macro +/// under [`#[pallet::inherent)]`](#inherent-palletinherent-optional) if it exists +/// * `on_initialize` / `on_finalize` / `on_runtime_upgrade` / `offchain_worker` were moved +/// to the `Hooks` implementation +/// * storages with `config(..)` were converted to `GenesisConfig` field, and their default +/// is `= $expr;` if the storage has a default value +/// * storages with `build($expr)` or `config(..)` were built in `GenesisBuild::build` +/// * `add_extra_genesis` fields were converted to `GenesisConfig` field with their correct +/// default if specified +/// * `add_extra_genesis` build was written into `GenesisBuild::build` /// * storage items defined with [`pallet`] use the name of the pallet provided by -/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used the -/// `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). -/// Thus a runtime using the pallet must be careful with this change. -/// To handle this change: -/// * either ensure that the name of the pallet given to `construct_runtime!` is the same -/// as the name the pallet was giving to `decl_storage`, -/// * or do a storage migration from the old prefix used to the new prefix used. -/// -/// NOTE: The prefixes used by storage items are in the metadata. Thus, ensuring the metadata -/// hasn't changed does ensure that the `pallet_prefix`s used by the storage items haven't -/// changed. +/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used +/// the `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). Thus +/// a runtime using the pallet must be careful with this change. To handle this change: +/// * either ensure that the name of the pallet given to `construct_runtime!` is the same +/// as the name the pallet was giving to `decl_storage`, +/// * or do a storage migration from the old prefix used to the new prefix used. +/// +/// NOTE: The prefixes used by storage items are in metadata. Thus, ensuring the metadata +/// hasn't changed ensures that the `pallet_prefix`s used by the storage items haven't changed. /// /// # Notes when macro fails to show proper error message spans: /// @@ -2581,3 +2695,13 @@ pub mod pallet_prelude { /// ``` /// * use the newest nightly possible. pub use frame_support_procedural::pallet; + +/// Contains macro stubs for all of the pallet:: macros +pub mod pallet_macros { + pub use frame_support_procedural::{ + call_index, compact, config, constant, disable_frame_system_supertrait_check, error, event, + extra_constants, generate_deposit, generate_storage_info, generate_store, genesis_build, + genesis_config, getter, hooks, inherent, origin, storage, storage_prefix, storage_version, + type_value, unbounded, validate_unsigned, weight, whitelist_storage, + }; +} From 4a5a9dea00c9b4e4d34ff56368451aa4dac09d77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 7 Oct 2022 12:46:57 +0200 Subject: [PATCH 218/348] Upgrade pin-project (#12426) This fixes some warnings on latest nightly. --- Cargo.lock | 8 ++++---- client/network/Cargo.toml | 2 +- client/network/transactions/Cargo.toml | 2 +- client/service/Cargo.toml | 2 +- client/telemetry/Cargo.toml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6a7c09514325..6cb440433f204 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6805,18 +6805,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 8e3d68851c423..08d0f28394af0 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -32,7 +32,7 @@ linked-hash-map = "0.5.4" log = "0.4.17" lru = "0.7.5" parking_lot = "0.12.1" -pin-project = "1.0.10" +pin-project = "1.0.12" prost = "0.11" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } diff --git a/client/network/transactions/Cargo.toml b/client/network/transactions/Cargo.toml index 5578bb2c7191e..3b60497d42b9b 100644 --- a/client/network/transactions/Cargo.toml +++ b/client/network/transactions/Cargo.toml @@ -20,7 +20,7 @@ futures = "0.3.21" hex = "0.4.0" libp2p = "0.46.1" log = "0.4.17" -pin-project = "1.0.10" +pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 42bd2ae7276e2..308da96fbbe77 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -31,7 +31,7 @@ parking_lot = "0.12.1" log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" -pin-project = "1.0.10" +pin-project = "1.0.12" hash-db = "0.15.2" serde = "1.0.136" serde_json = "1.0.85" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 4be7c186720fc..0be1268e13d43 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" libp2p = { version = "0.46.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.17" parking_lot = "0.12.1" -pin-project = "1.0.10" +pin-project = "1.0.12" rand = "0.7.2" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.85" From a8e951d094273322e79f4ac8b66dd3715fd5c076 Mon Sep 17 00:00:00 2001 From: Koute Date: Fri, 7 Oct 2022 23:16:41 +0900 Subject: [PATCH 219/348] Extend the lower bounds of some of the benchmarks to also include `0` (#12386) * Extend the lower bounds of some of the benchmarks to also include `0` * Fix verify snippet for `pallet_bounties/spend_funds` --- frame/alliance/src/benchmarking.rs | 8 +- frame/benchmarking/src/baseline.rs | 2 +- frame/bounties/src/benchmarking.rs | 10 ++- frame/collective/src/benchmarking.rs | 77 ++++++++++---------- frame/elections-phragmen/src/benchmarking.rs | 2 +- frame/examples/basic/src/benchmarking.rs | 8 +- frame/gilt/src/benchmarking.rs | 4 +- frame/identity/src/benchmarking.rs | 24 +++--- frame/ranked-collective/src/benchmarking.rs | 2 +- frame/staking/src/benchmarking.rs | 11 +-- frame/system/benchmarking/src/lib.rs | 51 ++++++++----- 11 files changed, 109 insertions(+), 90 deletions(-) diff --git a/frame/alliance/src/benchmarking.rs b/frame/alliance/src/benchmarking.rs index e07d7c44a97ff..e2e1579fcc9b4 100644 --- a/frame/alliance/src/benchmarking.rs +++ b/frame/alliance/src/benchmarking.rs @@ -832,8 +832,8 @@ benchmarks_instance_pallet! { } add_unscrupulous_items { - let n in 1 .. T::MaxUnscrupulousItems::get(); - let l in 1 .. T::MaxWebsiteUrlLength::get(); + let n in 0 .. T::MaxUnscrupulousItems::get(); + let l in 0 .. T::MaxWebsiteUrlLength::get(); set_members::(); @@ -856,8 +856,8 @@ benchmarks_instance_pallet! { } remove_unscrupulous_items { - let n in 1 .. T::MaxUnscrupulousItems::get(); - let l in 1 .. T::MaxWebsiteUrlLength::get(); + let n in 0 .. T::MaxUnscrupulousItems::get(); + let l in 0 .. T::MaxWebsiteUrlLength::get(); set_members::(); diff --git a/frame/benchmarking/src/baseline.rs b/frame/benchmarking/src/baseline.rs index 6a310330cafb1..5fd845551daca 100644 --- a/frame/benchmarking/src/baseline.rs +++ b/frame/benchmarking/src/baseline.rs @@ -90,7 +90,7 @@ benchmarks! { } sr25519_verification { - let i in 1 .. 100; + let i in 0 .. 100; let public = SignerId::generate_pair(None); diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 6ccd587cebc10..07dd781c29af3 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -197,7 +197,7 @@ benchmarks_instance_pallet! { } spend_funds { - let b in 1 .. 100; + let b in 0 .. 100; setup_pot_account::(); create_approved_bounties::(b)?; @@ -214,9 +214,13 @@ benchmarks_instance_pallet! { ); } verify { - ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); ensure!(missed_any == false, "Missed some"); - assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) + if b > 0 { + ensure!(budget_remaining < BalanceOf::::max_value(), "Budget not used"); + assert_last_event::(Event::BountyBecameActive { index: b - 1 }.into()) + } else { + ensure!(budget_remaining == BalanceOf::::max_value(), "Budget used"); + } } impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index 7c444dbaa7b6a..fcebacf5762e7 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -36,68 +36,69 @@ fn assert_last_event, I: 'static>(generic_event: >:: benchmarks_instance_pallet! { set_members { - let m in 1 .. T::MaxMembers::get(); - let n in 1 .. T::MaxMembers::get(); - let p in 1 .. T::MaxProposals::get(); + let m in 0 .. T::MaxMembers::get(); + let n in 0 .. T::MaxMembers::get(); + let p in 0 .. T::MaxProposals::get(); // Set old members. // We compute the difference of old and new members, so it should influence timing. let mut old_members = vec![]; - let mut last_old_member = account::("old member", 0, SEED); for i in 0 .. m { - last_old_member = account::("old member", i, SEED); - old_members.push(last_old_member.clone()); + let old_member = account::("old member", i, SEED); + old_members.push(old_member); } let old_members_count = old_members.len() as u32; Collective::::set_members( SystemOrigin::Root.into(), old_members.clone(), - Some(last_old_member.clone()), + old_members.last().cloned(), T::MaxMembers::get(), )?; - // Set a high threshold for proposals passing so that they stay around. - let threshold = m.max(2); - // Length of the proposals should be irrelevant to `set_members`. - let length = 100; - for i in 0 .. p { - // Proposals should be different so that different proposal hashes are generated - let proposal: T::Proposal = SystemCall::::remark { - remark: vec![i as u8; length] - }.into(); - Collective::::propose( - SystemOrigin::Signed(last_old_member.clone()).into(), - threshold, - Box::new(proposal.clone()), - MAX_BYTES, - )?; - let hash = T::Hashing::hash_of(&proposal); - // Vote on the proposal to increase state relevant for `set_members`. - // Not voting for `last_old_member` because they proposed and not voting for the first member - // to keep the proposal from passing. - for j in 2 .. m - 1 { - let voter = &old_members[j as usize]; - let approve = true; - Collective::::vote( - SystemOrigin::Signed(voter.clone()).into(), - hash, - i, - approve, + // If there were any old members generate a bunch of proposals. + if m > 0 { + // Set a high threshold for proposals passing so that they stay around. + let threshold = m.max(2); + // Length of the proposals should be irrelevant to `set_members`. + let length = 100; + for i in 0 .. p { + // Proposals should be different so that different proposal hashes are generated + let proposal: T::Proposal = SystemCall::::remark { + remark: vec![i as u8; length] + }.into(); + Collective::::propose( + SystemOrigin::Signed(old_members.last().unwrap().clone()).into(), + threshold, + Box::new(proposal.clone()), + MAX_BYTES, )?; + let hash = T::Hashing::hash_of(&proposal); + // Vote on the proposal to increase state relevant for `set_members`. + // Not voting for last old member because they proposed and not voting for the first member + // to keep the proposal from passing. + for j in 2 .. m - 1 { + let voter = &old_members[j as usize]; + let approve = true; + Collective::::vote( + SystemOrigin::Signed(voter.clone()).into(), + hash, + i, + approve, + )?; + } } } // Construct `new_members`. // It should influence timing since it will sort this vector. let mut new_members = vec![]; - let mut last_member = account::("member", 0, SEED); for i in 0 .. n { - last_member = account::("member", i, SEED); - new_members.push(last_member.clone()); + let member = account::("member", i, SEED); + new_members.push(member); } - }: _(SystemOrigin::Root, new_members.clone(), Some(last_member), T::MaxMembers::get()) + }: _(SystemOrigin::Root, new_members.clone(), new_members.last().cloned(), T::MaxMembers::get()) verify { new_members.sort(); assert_eq!(Collective::::members(), new_members); diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 22d00a912a4f7..06ac8d7c60162 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -362,7 +362,7 @@ benchmarks! { // total number of voters. let v in (T::MaxVoters::get() / 2) .. T::MaxVoters::get(); // those that are defunct and need removal. - let d in 1 .. (T::MaxVoters::get() / 2); + let d in 0 .. (T::MaxVoters::get() / 2); // remove any previous stuff. clean::(); diff --git a/frame/examples/basic/src/benchmarking.rs b/frame/examples/basic/src/benchmarking.rs index 93e14f358208e..4d1659af46460 100644 --- a/frame/examples/basic/src/benchmarking.rs +++ b/frame/examples/basic/src/benchmarking.rs @@ -34,22 +34,22 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { - // This will measure the execution time of `set_dummy` for b in [1..1000] range. + // This will measure the execution time of `set_dummy` for b in [0..1000] range. set_dummy_benchmark { // This is the benchmark setup phase - let b in 1 .. 1000; + let b in 0 .. 1000; }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call verify { // This is the optional benchmark verification phase, asserting certain states. assert_eq!(Pallet::::dummy(), Some(b.into())) } - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + // This will measure the execution time of `accumulate_dummy` for b in [0..1000] range. // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same // as the extrinsic call. `_(...)` is used to represent the extrinsic name. // The benchmark verification phase is omitted. accumulate_dummy { - let b in 1 .. 1000; + let b in 0 .. 1000; // The caller account is whitelisted for DB reads/write by the benchmarking macro. let caller: T::AccountId = whitelisted_caller(); }: _(RawOrigin::Signed(caller), b.into()) diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 1767a8da4def0..92ebf81854f23 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -97,7 +97,7 @@ benchmarks! { pursue_target_per_item { // bids taken - let b in 1..T::MaxQueueLen::get(); + let b in 0..T::MaxQueueLen::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(b + 1)); @@ -113,7 +113,7 @@ benchmarks! { pursue_target_per_queue { // total queues hit - let q in 1..T::QueueCount::get(); + let q in 0..T::QueueCount::get(); let caller: T::AccountId = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(q + 1)); diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index c66658b92b0d2..c628387a4d22e 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -130,7 +130,7 @@ benchmarks! { set_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let caller = { // The target user let caller: T::AccountId = whitelisted_caller(); @@ -166,7 +166,7 @@ benchmarks! { set_subs_new { let caller: T::AccountId = whitelisted_caller(); // Create a new subs vec with s sub accounts - let s in 1 .. T::MaxSubAccounts::get() => (); + let s in 0 .. T::MaxSubAccounts::get() => (); let subs = create_sub_accounts::(&caller, s)?; ensure!(SubsOf::::get(&caller).1.len() == 0, "Caller already has subs"); }: set_subs(RawOrigin::Signed(caller.clone()), subs) @@ -177,7 +177,7 @@ benchmarks! { set_subs_old { let caller: T::AccountId = whitelisted_caller(); // Give them p many previous sub accounts. - let p in 1 .. T::MaxSubAccounts::get() => { + let p in 0 .. T::MaxSubAccounts::get() => { let _ = add_sub_accounts::(&caller, p)?; }; // Remove all subs. @@ -198,12 +198,12 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 1 .. T::MaxSubAccounts::get() => { + let s in 0 .. T::MaxSubAccounts::get() => { // Give them s many sub accounts let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; }; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); // Create their main identity with x additional fields let info = create_identity_info::(x); @@ -233,7 +233,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get() => { + let x in 0 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); @@ -251,7 +251,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get() => { + let x in 0 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields let info = create_identity_info::(x); let caller: T::AccountId = whitelisted_caller(); @@ -332,7 +332,7 @@ benchmarks! { let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); let r in 1 .. T::MaxRegistrars::get() - 1 => add_registrars::(r)?; - let x in 1 .. T::MaxAdditionalFields::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let info = create_identity_info::(x); let info_hash = T::Hashing::hash_of(&info); @@ -348,8 +348,8 @@ benchmarks! { kill_identity { let r in 1 .. T::MaxRegistrars::get() => add_registrars::(r)?; - let s in 1 .. T::MaxSubAccounts::get(); - let x in 1 .. T::MaxAdditionalFields::get(); + let s in 0 .. T::MaxSubAccounts::get(); + let x in 0 .. T::MaxAdditionalFields::get(); let target: T::AccountId = account("target", 0, SEED); let target_origin: ::RuntimeOrigin = RawOrigin::Signed(target.clone()).into(); @@ -379,7 +379,7 @@ benchmarks! { } add_sub { - let s in 1 .. T::MaxSubAccounts::get() - 1; + let s in 0 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let _ = add_sub_accounts::(&caller, s)?; @@ -415,7 +415,7 @@ benchmarks! { } quit_sub { - let s in 1 .. T::MaxSubAccounts::get() - 1; + let s in 0 .. T::MaxSubAccounts::get() - 1; let caller: T::AccountId = whitelisted_caller(); let sup = account("super", 0, SEED); diff --git a/frame/ranked-collective/src/benchmarking.rs b/frame/ranked-collective/src/benchmarking.rs index a4d074450e836..eb629b330abb2 100644 --- a/frame/ranked-collective/src/benchmarking.rs +++ b/frame/ranked-collective/src/benchmarking.rs @@ -138,7 +138,7 @@ benchmarks_instance_pallet! { } cleanup_poll { - let n in 1 .. 100; + let n in 0 .. 100; // Create a poll let class = T::Polls::classes().into_iter().next().unwrap(); diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index c7e6936ac75d8..dcb861e2ce419 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -88,6 +88,7 @@ pub fn create_validator_with_nominators( points_total += 10; points_individual.push((v_stash.clone(), 10)); + let original_nominator_count = Nominators::::count(); let mut nominators = Vec::new(); // Give the validator n nominators, but keep total users in the system the same. @@ -114,7 +115,7 @@ pub fn create_validator_with_nominators( assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); assert_ne!(Validators::::count(), 0); - assert_ne!(Nominators::::count(), 0); + assert_eq!(Nominators::::count(), original_nominator_count + nominators.len() as u32); // Give Era Points let reward = EraRewardPoints:: { @@ -544,7 +545,7 @@ benchmarks! { } payout_stakers_dead_controller { - let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -577,7 +578,7 @@ benchmarks! { } payout_stakers_alive_staked { - let n in 1 .. T::MaxNominatorRewardedPerValidator::get() as u32; + let n in 0 .. T::MaxNominatorRewardedPerValidator::get() as u32; let (validator, nominators) = create_validator_with_nominators::( n, T::MaxNominatorRewardedPerValidator::get() as u32, @@ -695,7 +696,7 @@ benchmarks! { new_era { let v in 1 .. 10; - let n in 1 .. 100; + let n in 0 .. 100; create_validators_with_nominators_for_era::( v, @@ -714,7 +715,7 @@ benchmarks! { #[extra] payout_all { let v in 1 .. 10; - let n in 1 .. 100; + let n in 0 .. 100; create_validators_with_nominators_for_era::( v, n, diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index dbe38da5775a7..0f7603fe1dd9f 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -64,7 +64,7 @@ benchmarks! { #[skip_meta] set_storage { - let i in 1 .. 1000; + let i in 0 .. 1000; // Set up i items to add let mut items = Vec::new(); @@ -72,56 +72,69 @@ benchmarks! { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); items.push((hash.clone(), hash.clone())); } + + let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); - let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; - assert_eq!(value, last_hash.as_ref().to_vec()); + // Verify that they're actually in the storage. + for (item, _) in items_to_verify { + let value = storage::unhashed::get_raw(&item).ok_or("No value stored")?; + assert_eq!(value, *item); + } } #[skip_meta] kill_storage { - let i in 1 .. 1000; + let i in 0 .. 1000; // Add i items to storage - let mut items = Vec::new(); + let mut items = Vec::with_capacity(i as usize); for j in 0 .. i { let hash = (i, j).using_encoded(T::Hashing::hash).as_ref().to_vec(); storage::unhashed::put_raw(&hash, &hash); items.push(hash); } - // We will verify this value is removed - let last_hash = (i, i - 1).using_encoded(T::Hashing::hash); - let value = storage::unhashed::get_raw(last_hash.as_ref()).ok_or("No value stored")?; - assert_eq!(value, last_hash.as_ref().to_vec()); + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } + let items_to_verify = items.clone(); }: _(RawOrigin::Root, items) verify { - assert_eq!(storage::unhashed::get_raw(last_hash.as_ref()), None); + // Verify that they're not in the storage anymore. + for item in items_to_verify { + assert!(storage::unhashed::get_raw(&item).is_none()); + } } #[skip_meta] kill_prefix { - let p in 1 .. 1000; + let p in 0 .. 1000; let prefix = p.using_encoded(T::Hashing::hash).as_ref().to_vec(); + let mut items = Vec::with_capacity(p as usize); // add p items that share a prefix for i in 0 .. p { let hash = (p, i).using_encoded(T::Hashing::hash).as_ref().to_vec(); let key = [&prefix[..], &hash[..]].concat(); storage::unhashed::put_raw(&key, &key); + items.push(key); } - // We will verify this value is removed - let last_hash = (p, p - 1).using_encoded(T::Hashing::hash).as_ref().to_vec(); - let last_key = [&prefix[..], &last_hash[..]].concat(); - let value = storage::unhashed::get_raw(&last_key).ok_or("No value stored")?; - assert_eq!(value, last_key); - + // Verify that they're actually in the storage. + for item in &items { + let value = storage::unhashed::get_raw(item).ok_or("No value stored")?; + assert_eq!(value, *item); + } }: _(RawOrigin::Root, prefix, p) verify { - assert_eq!(storage::unhashed::get_raw(&last_key), None); + // Verify that they're not in the storage anymore. + for item in items { + assert!(storage::unhashed::get_raw(&item).is_none()); + } } impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); From 1a5cdc81c711bc3a7464f24ccd2e1ba20a5ea594 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 7 Oct 2022 17:19:10 +0300 Subject: [PATCH 220/348] BEEFY: Define a `BeefyVerify` trait for signatures (#12299) * Define CustomVerify trait Signed-off-by: Serban Iorga * Use ECDSA CustomVerify for MultiSignature Signed-off-by: Serban Iorga * beefy: small simplifications Signed-off-by: Serban Iorga * Revert "Use ECDSA CustomVerify for MultiSignature" This reverts commit 136cff82505662dd92c864491814629d2bc349f0. * Revert "Define CustomVerify trait" This reverts commit adf91e9e6d1bdea6f00831f6067b74c3d945f9a2. * Define BeefyAuthorityId and BeefyVerify traits * Improve BeefyVerify unit tests Co-authored-by: Robert Hambrock * fmt & import sp_core::blake2_256 * Renamings * remove SignerToAccountId * fix Signed-off-by: Serban Iorga Co-authored-by: Robert Hambrock --- Cargo.lock | 1 + client/beefy/src/keystore.rs | 9 ++--- frame/beefy-mmr/src/lib.rs | 8 +--- primitives/beefy/Cargo.toml | 2 + primitives/beefy/src/lib.rs | 75 ++++++++++++++++++++++++++++++++++- primitives/core/src/crypto.rs | 5 +++ 6 files changed, 87 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6cb440433f204..49b3dd3cf957b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -538,6 +538,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", + "sp-io", "sp-keystore", "sp-mmr-primitives", "sp-runtime", diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs index b0259a42075ea..886c00fc5d817 100644 --- a/client/beefy/src/keystore.rs +++ b/client/beefy/src/keystore.rs @@ -19,12 +19,13 @@ use sp_application_crypto::RuntimeAppPublic; use sp_core::keccak_256; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::traits::Keccak256; use log::warn; use beefy_primitives::{ crypto::{Public, Signature}, - KEY_TYPE, + BeefyVerify, KEY_TYPE, }; use crate::error; @@ -98,11 +99,7 @@ impl BeefyKeystore { /// /// Return `true` if the signature is authentic, `false` otherwise. pub fn verify(public: &Public, sig: &Signature, message: &[u8]) -> bool { - let msg = keccak_256(message); - let sig = sig.as_ref(); - let public = public.as_ref(); - - sp_core::ecdsa::Pair::verify_prehashed(sig, &msg, public) + BeefyVerify::::verify(sig, message, public) } } diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs index 5b82c89ce84b6..0b7fc22cd279b 100644 --- a/frame/beefy-mmr/src/lib.rs +++ b/frame/beefy-mmr/src/lib.rs @@ -73,12 +73,8 @@ where /// Convert BEEFY secp256k1 public keys into Ethereum addresses pub struct BeefyEcdsaToEthereum; impl Convert> for BeefyEcdsaToEthereum { - fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { - sp_core::ecdsa::Public::try_from(a.as_ref()) - .map_err(|_| { - log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); - }) - .unwrap_or(sp_core::ecdsa::Public::from_raw([0u8; 33])) + fn convert(beefy_id: beefy_primitives::crypto::AuthorityId) -> Vec { + sp_core::ecdsa::Public::from(beefy_id) .to_eth_address() .map(|v| v.to_vec()) .map_err(|_| { diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml index fe6ce23337c86..f85e2edf4f442 100644 --- a/primitives/beefy/Cargo.toml +++ b/primitives/beefy/Cargo.toml @@ -18,6 +18,7 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "6.0.0", default-features = false, path = "../application-crypto" } sp-core = { version = "6.0.0", default-features = false, path = "../core" } +sp-io = { version = "6.0.0", default-features = false, path = "../io" } sp-mmr-primitives = { version = "4.0.0-dev", default-features = false, path = "../merkle-mountain-range" } sp-runtime = { version = "6.0.0", default-features = false, path = "../runtime" } sp-std = { version = "4.0.0", default-features = false, path = "../std" } @@ -34,6 +35,7 @@ std = [ "sp-api/std", "sp-application-crypto/std", "sp-core/std", + "sp-io/std", "sp-mmr-primitives/std", "sp-runtime/std", "sp-std/std", diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs index 705366e1b4778..453eb67315d4e 100644 --- a/primitives/beefy/src/lib.rs +++ b/primitives/beefy/src/lib.rs @@ -41,12 +41,30 @@ pub use payload::{known_payloads, BeefyPayloadId, Payload, PayloadProvider}; use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; +use sp_application_crypto::RuntimeAppPublic; use sp_core::H256; +use sp_runtime::traits::Hash; use sp_std::prelude::*; /// Key type for BEEFY module. pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); +/// Trait representing BEEFY authority id. +pub trait BeefyAuthorityId: RuntimeAppPublic {} + +/// Means of verification for a BEEFY authority signature. +/// +/// Accepts custom hashing fn for the message and custom convertor fn for the signer. +pub trait BeefyVerify { + /// Type of the signer. + type Signer: BeefyAuthorityId; + + /// Verify a signature. + /// + /// Return `true` if signature is valid for the value. + fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool; +} + /// BEEFY cryptographic types /// /// This module basically introduces three crypto types: @@ -60,7 +78,9 @@ pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::Ke /// The current underlying crypto scheme used is ECDSA. This can be changed, /// without affecting code restricted against the above listed crypto types. pub mod crypto { + use super::{BeefyAuthorityId, BeefyVerify, Hash}; use sp_application_crypto::{app_crypto, ecdsa}; + use sp_core::crypto::Wraps; app_crypto!(ecdsa, crate::KEY_TYPE); /// Identity of a BEEFY authority using ECDSA as its crypto. @@ -68,6 +88,26 @@ pub mod crypto { /// Signature for a BEEFY authority using ECDSA as its crypto. pub type AuthoritySignature = Signature; + + impl BeefyAuthorityId for AuthorityId {} + + impl BeefyVerify for AuthoritySignature + where + ::Output: Into<[u8; 32]>, + { + type Signer = AuthorityId; + + fn verify(&self, msg: &[u8], signer: &Self::Signer) -> bool { + let msg_hash = ::hash(msg).into(); + match sp_io::crypto::secp256k1_ecdsa_recover_compressed( + self.as_inner_ref().as_ref(), + &msg_hash, + ) { + Ok(raw_pubkey) => raw_pubkey.as_ref() == AsRef::<[u8]>::as_ref(signer), + _ => false, + } + } + } } /// The `ConsensusEngineId` of BEEFY. @@ -180,7 +220,8 @@ sp_api::decl_runtime_apis! { mod tests { use super::*; use sp_application_crypto::ecdsa::{self, Public}; - use sp_core::Pair; + use sp_core::{blake2_256, crypto::Wraps, keccak_256, Pair}; + use sp_runtime::traits::{BlakeTwo256, Keccak256}; #[test] fn validator_set() { @@ -194,4 +235,36 @@ mod tests { assert_eq!(validators.id(), set_id); assert_eq!(validators.validators(), &vec![alice.public()]); } + + #[test] + fn beefy_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = crypto::Pair::generate(); + + let keccak_256_signature: crypto::Signature = + pair.as_inner_ref().sign_prehashed(&keccak_256(msg)).into(); + + let blake2_256_signature: crypto::Signature = + pair.as_inner_ref().sign_prehashed(&blake2_256(msg)).into(); + + // Verification works if same hashing function is used when signing and verifying. + assert!(BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); + assert!(BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); + // Verification fails if distinct hashing functions are used when signing and verifying. + assert!(!BeefyVerify::::verify(&blake2_256_signature, msg, &pair.public())); + assert!(!BeefyVerify::::verify(&keccak_256_signature, msg, &pair.public())); + + // Other public key doesn't work + let (other_pair, _) = crypto::Pair::generate(); + assert!(!BeefyVerify::::verify( + &keccak_256_signature, + msg, + &other_pair.public() + )); + assert!(!BeefyVerify::::verify( + &blake2_256_signature, + msg, + &other_pair.public() + )); + } } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index db855620a8f0d..06703acea7202 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -984,6 +984,11 @@ pub trait IsWrappedBy: From + Into { pub trait Wraps: Sized { /// The inner type it is wrapping. type Inner: IsWrappedBy; + + /// Get a reference to the inner type that is wrapped. + fn as_inner_ref(&self) -> &Self::Inner { + Self::Inner::from_ref(self) + } } impl IsWrappedBy for T From b7c05622d0912816ac055fe086769f1e2fce575d Mon Sep 17 00:00:00 2001 From: Dan Shields <35669742+NukeManDan@users.noreply.github.com> Date: Sat, 8 Oct 2022 13:26:31 -0600 Subject: [PATCH 221/348] fix comment math (#12452) --- frame/ranked-collective/src/lib.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/frame/ranked-collective/src/lib.rs b/frame/ranked-collective/src/lib.rs index fa3a473fe7d73..111c5f70efdfa 100644 --- a/frame/ranked-collective/src/lib.rs +++ b/frame/ranked-collective/src/lib.rs @@ -199,11 +199,11 @@ impl Convert for Unit { /// Vote-weight scheme where all voters get one vote plus an additional vote for every excess rank /// they have. I.e.: /// -/// - Each member with no excess rank gets 1 vote; +/// - Each member with an excess rank of 0 gets 1 vote; /// - ...with an excess rank of 1 gets 2 votes; -/// - ...with an excess rank of 2 gets 2 votes; -/// - ...with an excess rank of 3 gets 3 votes; -/// - ...with an excess rank of 4 gets 4 votes. +/// - ...with an excess rank of 2 gets 3 votes; +/// - ...with an excess rank of 3 gets 4 votes; +/// - ...with an excess rank of 4 gets 5 votes. pub struct Linear; impl Convert for Linear { fn convert(r: Rank) -> Votes { @@ -214,11 +214,11 @@ impl Convert for Linear { /// Vote-weight scheme where all voters get one vote plus additional votes for every excess rank /// they have incrementing by one vote for each excess rank. I.e.: /// -/// - Each member with no excess rank gets 1 vote; -/// - ...with an excess rank of 1 gets 2 votes; -/// - ...with an excess rank of 2 gets 3 votes; -/// - ...with an excess rank of 3 gets 6 votes; -/// - ...with an excess rank of 4 gets 10 votes. +/// - Each member with an excess rank of 0 gets 1 vote; +/// - ...with an excess rank of 1 gets 3 votes; +/// - ...with an excess rank of 2 gets 6 votes; +/// - ...with an excess rank of 3 gets 10 votes; +/// - ...with an excess rank of 4 gets 15 votes. pub struct Geometric; impl Convert for Geometric { fn convert(r: Rank) -> Votes { From 460fff965a2fd25d3a75ef436ec265b4c2af47a0 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Sat, 8 Oct 2022 23:15:25 +0200 Subject: [PATCH 222/348] Remove "to_block" field from BlockRequests (#12447) * Remove "to_block" field from BlockRequests * Maybe fix the tests --- client/network/common/src/sync/message.rs | 2 -- client/network/sync/src/lib.rs | 9 +-------- client/network/sync/src/schema/api.v1.proto | 2 -- client/network/sync/src/warp.rs | 1 - 4 files changed, 1 insertion(+), 13 deletions(-) diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index 27ab2704e6471..db9f747108c9f 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -158,8 +158,6 @@ pub mod generic { pub fields: BlockAttributes, /// Start from this block. pub from: FromBlock, - /// End at this block. An implementation defined maximum is used when unspecified. - pub to: Option, /// Sequence direction. pub direction: Direction, /// Maximum number of blocks to return. An implementation defined maximum is used when diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 6ad4a8fbbdcc5..6348490351140 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -650,7 +650,6 @@ where id: 0, fields: BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(request.0), - to: None, direction: Direction::Ascending, max: Some(1), }; @@ -1608,7 +1607,6 @@ where FromBlock::Number(n) => Some(schema::v1::block_request::FromBlock::Number(n.encode())), }, - to_block: request.to.map(|h| h.encode()).unwrap_or_default(), direction: request.direction as i32, max_blocks: request.max.unwrap_or(0), support_multiple_justifications: true, @@ -2252,7 +2250,6 @@ fn ancestry_request(block: NumberFor) -> BlockRequest { id: 0, fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, from: FromBlock::Number(block), - to: None, direction: Direction::Ascending, max: Some(1), } @@ -2368,7 +2365,6 @@ fn peer_block_request( id: 0, fields: attrs, from, - to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2402,7 +2398,6 @@ fn peer_gap_block_request( id: 0, fields: attrs, from, - to: None, direction: Direction::Descending, max: Some((range.end - range.start).saturated_into::()), }; @@ -2452,7 +2447,6 @@ fn fork_sync_request( id: 0, fields: attributes, from: FromBlock::Hash(*hash), - to: None, direction: Direction::Descending, max: Some(count), }, @@ -2702,8 +2696,7 @@ mod test { assert!(sync.justification_requests().any(|(p, r)| { p == peer_id3 && r.fields == BlockAttributes::JUSTIFICATION && - r.from == FromBlock::Hash(b1_hash) && - r.to == None + r.from == FromBlock::Hash(b1_hash) })); assert_eq!( diff --git a/client/network/sync/src/schema/api.v1.proto b/client/network/sync/src/schema/api.v1.proto index b51137d1d51d4..203b157470a58 100644 --- a/client/network/sync/src/schema/api.v1.proto +++ b/client/network/sync/src/schema/api.v1.proto @@ -23,8 +23,6 @@ message BlockRequest { // Start with given block number. bytes number = 3; } - // End at this block. An implementation defined maximum is used when unspecified. - bytes to_block = 4; // optional // Sequence direction. Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. diff --git a/client/network/sync/src/warp.rs b/client/network/sync/src/warp.rs index 4f2a71d98613e..ab8a7c66b9856 100644 --- a/client/network/sync/src/warp.rs +++ b/client/network/sync/src/warp.rs @@ -193,7 +193,6 @@ where fields: BlockAttributes::HEADER | BlockAttributes::BODY | BlockAttributes::JUSTIFICATION, from: FromBlock::Hash(header.hash()), - to: Some(header.hash()), direction: Direction::Ascending, max: Some(1), }; From 73c4f94ce0cb41e35bd7fbc7095590b98a351dbb Mon Sep 17 00:00:00 2001 From: Leszek Wiesner Date: Sun, 9 Oct 2022 11:22:43 +0200 Subject: [PATCH 223/348] Vesting pallet - make WithdrawReasons configurable (#12109) * Vesting pallet - make WithdrawReasons configurable * Update `pallet-vesting` README Co-authored-by: parity-processbot <> --- bin/node/runtime/src/lib.rs | 5 ++++- frame/vesting/README.md | 3 ++- frame/vesting/src/lib.rs | 13 ++++++++++--- frame/vesting/src/mock.rs | 5 ++++- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 34f6988c31643..142173621036d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -34,7 +34,7 @@ use frame_support::{ traits::{ AsEnsureOriginWithArg, ConstU128, ConstU16, ConstU32, Currency, EitherOfDiverse, EqualPrivilegeOnly, Everything, Imbalance, InstanceFilter, KeyOwnerProofSystem, - LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, + LockIdentifier, Nothing, OnUnbalanced, U128CurrencyToVote, WithdrawReasons, }, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, @@ -1374,6 +1374,8 @@ impl pallet_society::Config for Runtime { parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); } impl pallet_vesting::Config for Runtime { @@ -1382,6 +1384,7 @@ impl pallet_vesting::Config for Runtime { type BlockNumberToBalance = ConvertInto; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = pallet_vesting::weights::SubstrateWeight; + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; // `VestingInfo` encode length is 36bytes. 28 schedules gets encoded as 1009 bytes, which is the // highest number of schedules that encodes less than 2^10. const MAX_VESTING_SCHEDULES: u32 = 28; diff --git a/frame/vesting/README.md b/frame/vesting/README.md index c3800eb994d4d..b19a60c5b6824 100644 --- a/frame/vesting/README.md +++ b/frame/vesting/README.md @@ -7,7 +7,8 @@ A simple module providing a means of placing a linear curve on an account's locked balance. This module ensures that there is a lock in place preventing the balance to drop below the *unvested* -amount for any reason other than transaction fee payment. +amount for reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` +configuration value. As the amount vested increases over time, the amount unvested reduces. However, locks remain in place and explicit action is needed on behalf of the user to ensure that the amount locked is diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 1ca8d41f9a41c..a92f94baf6cf9 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -24,7 +24,8 @@ //! //! A simple pallet providing a means of placing a linear curve on an account's locked balance. This //! pallet ensures that there is a lock in place preventing the balance to drop below the *unvested* -//! amount for any reason other than transaction fee payment. +//! amount for any reason other than the ones specified in `UnvestedFundsAllowedWithdrawReasons` +//! configuration value. //! //! As the amount vested increases over time, the amount unvested reduces. However, locks remain in //! place and explicit action is needed on behalf of the user to ensure that the amount locked is @@ -170,6 +171,10 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + /// Reasons that determine under which conditions the balance may drop below + /// the unvested amount. + type UnvestedFundsAllowedWithdrawReasons: Get; + /// Maximum number of vesting schedules an account may have at a given moment. const MAX_VESTING_SCHEDULES: u32; } @@ -249,7 +254,9 @@ pub mod pallet { Vesting::::try_append(who, vesting_info) .expect("Too many vesting schedules at genesis."); - let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; + let reasons = + WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); + T::Currency::set_lock(VESTING_ID, who, locked, reasons); } } @@ -569,7 +576,7 @@ impl Pallet { T::Currency::remove_lock(VESTING_ID, who); Self::deposit_event(Event::::VestingCompleted { account: who.clone() }); } else { - let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; + let reasons = WithdrawReasons::except(T::UnvestedFundsAllowedWithdrawReasons::get()); T::Currency::set_lock(VESTING_ID, who, total_locked_now, reasons); Self::deposit_event(Event::::VestingUpdated { account: who.clone(), diff --git a/frame/vesting/src/mock.rs b/frame/vesting/src/mock.rs index c4e520b37c8c8..0bd371a0353f1 100644 --- a/frame/vesting/src/mock.rs +++ b/frame/vesting/src/mock.rs @@ -17,7 +17,7 @@ use frame_support::{ parameter_types, - traits::{ConstU32, ConstU64, GenesisBuild}, + traits::{ConstU32, ConstU64, GenesisBuild, WithdrawReasons}, }; use sp_core::H256; use sp_runtime::{ @@ -87,6 +87,8 @@ impl pallet_balances::Config for Test { } parameter_types! { pub const MinVestedTransfer: u64 = 256 * 2; + pub UnvestedFundsAllowedWithdrawReasons: WithdrawReasons = + WithdrawReasons::except(WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE); pub static ExistentialDeposit: u64 = 0; } impl Config for Test { @@ -96,6 +98,7 @@ impl Config for Test { const MAX_VESTING_SCHEDULES: u32 = 3; type MinVestedTransfer = MinVestedTransfer; type WeightInfo = (); + type UnvestedFundsAllowedWithdrawReasons = UnvestedFundsAllowedWithdrawReasons; } pub struct ExtBuilder { From 0c1ccdaa53556a106aa69c23f19527e435970237 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Mon, 10 Oct 2022 10:10:53 +0300 Subject: [PATCH 224/348] Move block announcement protocol config out of `Protocol` (#12441) * Move Role(s) to `sc-network-common` * Introduce `NotificationHandshake` type * Move block announce protocol config creation to `ChainSync` * Include block announcement into `notification_protocols` * Apply review comments * Remove unneeded include * Add missing include * Apply review comments --- .../src/communication/gossip.rs | 2 +- .../src/communication/tests.rs | 5 +- client/finality-grandpa/src/lib.rs | 1 + client/network-gossip/src/bridge.rs | 2 +- client/network-gossip/src/state_machine.rs | 2 +- client/network-gossip/src/validator.rs | 2 +- client/network/Cargo.toml | 2 +- client/network/common/src/config.rs | 28 ++++ client/network/common/src/protocol.rs | 1 + client/network/common/src/protocol/event.rs | 24 +-- client/network/common/src/protocol/role.rs | 121 ++++++++++++++ client/network/common/src/sync/message.rs | 28 +++- client/network/src/behaviour.rs | 5 +- client/network/src/config.rs | 29 +--- client/network/src/lib.rs | 3 +- client/network/src/protocol.rs | 151 ++++-------------- client/network/src/protocol/message.rs | 60 +------ client/network/src/service.rs | 14 +- client/network/src/service/tests.rs | 110 ++++++++++++- client/network/sync/src/lib.rs | 73 ++++++++- client/network/test/src/lib.rs | 19 ++- client/network/transactions/src/lib.rs | 6 +- client/service/src/builder.rs | 14 ++ 23 files changed, 439 insertions(+), 263 deletions(-) create mode 100644 client/network/common/src/protocol/role.rs diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 95efedf7b23b7..1ba5e0da33c96 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -90,7 +90,7 @@ use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use rand::seq::SliceRandom; use sc_network::{PeerId, ReputationChange}; -use sc_network_common::protocol::event::ObservedRole; +use sc_network_common::protocol::role::ObservedRole; use sc_network_gossip::{MessageIntent, ValidatorContext}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index b73f6cdecdd4f..eab7bb2df50cf 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -28,10 +28,7 @@ use parity_scale_codec::Encode; use sc_network::{config::Role, Multiaddr, PeerId, ReputationChange}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::{ - event::{Event as NetworkEvent, ObservedRole}, - ProtocolName, - }, + protocol::{event::Event as NetworkEvent, role::ObservedRole, ProtocolName}, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSender, NotificationSenderError, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index d5c05fea78aa2..a7326d57c2bf0 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -695,6 +695,7 @@ pub fn grandpa_peers_set_config( fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. max_notification_size: 1024 * 1024, + handshake: None, set_config: sc_network_common::config::SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 121fa6dc9a50d..5563b3be35e8d 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -317,7 +317,7 @@ mod tests { use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network_common::{ config::MultiaddrWithPeerId, - protocol::event::ObservedRole, + protocol::role::ObservedRole, service::{ NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NotificationSender, NotificationSenderError, diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 30a2e9d1494be..600383cf5f70d 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -22,7 +22,7 @@ use ahash::AHashSet; use libp2p::PeerId; use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::protocol::{event::ObservedRole, ProtocolName}; +use sc_network_common::protocol::{role::ObservedRole, ProtocolName}; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 185c2cfeed2c7..77dcc3bdc8791 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use libp2p::PeerId; -use sc_network_common::protocol::event::ObservedRole; +use sc_network_common::protocol::role::ObservedRole; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 08d0f28394af0..99e6c9e708e7f 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -57,7 +57,7 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3" -async-std = "1.11.0" +async-std = { version = "1.11.0", features = ["attributes"] } rand = "0.7.2" tempfile = "3.1.0" sc-network-light = { version = "0.10.0-dev", path = "./light" } diff --git a/client/network/common/src/config.rs b/client/network/common/src/config.rs index fb23cd0174922..e4a7f04c8d6e8 100644 --- a/client/network/common/src/config.rs +++ b/client/network/common/src/config.rs @@ -20,6 +20,7 @@ use crate::protocol; +use codec::Encode; use libp2p::{multiaddr, Multiaddr, PeerId}; use std::{fmt, str, str::FromStr}; @@ -199,6 +200,30 @@ impl Default for SetConfig { } } +/// Custom handshake for the notification protocol +#[derive(Debug, Clone)] +pub struct NotificationHandshake(Vec); + +impl NotificationHandshake { + /// Create new `NotificationHandshake` from an object that implements `Encode` + pub fn new(handshake: H) -> Self { + Self(handshake.encode()) + } + + /// Create new `NotificationHandshake` from raw bytes + pub fn from_bytes(bytes: Vec) -> Self { + Self(bytes) + } +} + +impl std::ops::Deref for NotificationHandshake { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// Extension to [`SetConfig`] for sets that aren't the default set. /// /// > **Note**: As new fields might be added in the future, please consider using the `new` method @@ -218,6 +243,8 @@ pub struct NonDefaultSetConfig { /// If a fallback is used, it will be reported in /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` pub fallback_names: Vec, + /// Handshake of the protocol + pub handshake: Option, /// Maximum allowed size of single notifications. pub max_notification_size: u64, /// Base configuration. @@ -231,6 +258,7 @@ impl NonDefaultSetConfig { notifications_protocol, max_notification_size, fallback_names: Vec::new(), + handshake: None, set_config: SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/network/common/src/protocol.rs b/client/network/common/src/protocol.rs index 11edc373a2620..04bfaedbcac71 100644 --- a/client/network/common/src/protocol.rs +++ b/client/network/common/src/protocol.rs @@ -27,6 +27,7 @@ use std::{ use libp2p::core::upgrade; pub mod event; +pub mod role; /// The protocol name transmitted on the wire. #[derive(Debug, Clone)] diff --git a/client/network/common/src/protocol/event.rs b/client/network/common/src/protocol/event.rs index 3d8c183da495c..236913df1b120 100644 --- a/client/network/common/src/protocol/event.rs +++ b/client/network/common/src/protocol/event.rs @@ -20,6 +20,7 @@ //! events that happen on the network like DHT get/put results received. use super::ProtocolName; +use crate::protocol::role::ObservedRole; use bytes::Bytes; use libp2p::{core::PeerId, kad::record::Key}; @@ -97,26 +98,3 @@ pub enum Event { messages: Vec<(ProtocolName, Bytes)>, }, } - -/// Role that the peer sent to us during the handshake, with the addition of what our local node -/// knows about that peer. -/// -/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a -/// > node says about itself, while `ObservedRole` is a `Role` merged with the -/// > information known locally about that node. -#[derive(Debug, Clone)] -pub enum ObservedRole { - /// Full node. - Full, - /// Light node. - Light, - /// Third-party authority. - Authority, -} - -impl ObservedRole { - /// Returns `true` for `ObservedRole::Light`. - pub fn is_light(&self) -> bool { - matches!(self, Self::Light) - } -} diff --git a/client/network/common/src/protocol/role.rs b/client/network/common/src/protocol/role.rs new file mode 100644 index 0000000000000..ed22830fd7170 --- /dev/null +++ b/client/network/common/src/protocol/role.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::{self, Encode, EncodeLike, Input, Output}; + +/// Role that the peer sent to us during the handshake, with the addition of what our local node +/// knows about that peer. +/// +/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a +/// > node says about itself, while `ObservedRole` is a `Role` merged with the +/// > information known locally about that node. +#[derive(Debug, Clone)] +pub enum ObservedRole { + /// Full node. + Full, + /// Light node. + Light, + /// Third-party authority. + Authority, +} + +impl ObservedRole { + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, Self::Light) + } +} + +/// Role of the local node. +#[derive(Debug, Clone)] +pub enum Role { + /// Regular full node. + Full, + /// Actual authority. + Authority, +} + +impl Role { + /// True for [`Role::Authority`]. + pub fn is_authority(&self) -> bool { + matches!(self, Self::Authority) + } +} + +impl std::fmt::Display for Role { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Full => write!(f, "FULL"), + Self::Authority => write!(f, "AUTHORITY"), + } + } +} + +bitflags::bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } +} + +impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Self::FULL | Self::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Self::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } +} + +impl<'a> From<&'a Role> for Roles { + fn from(roles: &'a Role) -> Self { + match roles { + Role::Full => Self::FULL, + Role::Authority => Self::AUTHORITY, + } + } +} + +impl Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } +} + +impl EncodeLike for Roles {} + +impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } +} diff --git a/client/network/common/src/sync/message.rs b/client/network/common/src/sync/message.rs index db9f747108c9f..346f1dbce9bcc 100644 --- a/client/network/common/src/sync/message.rs +++ b/client/network/common/src/sync/message.rs @@ -19,10 +19,12 @@ //! Network packet message types. These get serialized and put into the lower level protocol //! payload. +use crate::protocol::role::Roles; + use bitflags::bitflags; use codec::{Decode, Encode, Error, Input, Output}; pub use generic::{BlockAnnounce, FromBlock}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; /// Type alias for using the block request type using block type parameters. pub type BlockRequest = @@ -218,3 +220,27 @@ pub mod generic { } } } + +/// Handshake sent when we open a block announces substream. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct BlockAnnouncesHandshake { + /// Roles of the node. + pub roles: Roles, + /// Best block number. + pub best_number: NumberFor, + /// Best block hash. + pub best_hash: B::Hash, + /// Genesis block hash. + pub genesis_hash: B::Hash, +} + +impl BlockAnnouncesHandshake { + pub fn build( + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> Self { + Self { genesis_hash, roles, best_number, best_hash } + } +} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 14962c837aa10..b31f36eb46692 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -19,7 +19,7 @@ use crate::{ discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, peer_info, - protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, + protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, request_responses, }; @@ -41,7 +41,8 @@ use sc_consensus::import_queue::{IncomingBlock, RuntimeOrigin}; use sc_network_common::{ config::ProtocolId, protocol::{ - event::{DhtEvent, ObservedRole}, + event::DhtEvent, + role::{ObservedRole, Roles}, ProtocolName, }, request_responses::{IfDisconnected, ProtocolConfig, RequestFailure}, diff --git a/client/network/src/config.rs b/client/network/src/config.rs index b2adfa81d065b..db3e8f0b98a33 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,6 +23,7 @@ pub use sc_network_common::{ config::ProtocolId, + protocol::role::Role, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -93,6 +94,9 @@ where /// Registry for recording prometheus metrics to. pub metrics_registry: Option, + /// Block announce protocol configuration + pub block_announce_config: NonDefaultSetConfig, + /// Request response configuration for the block request protocol. /// /// [`RequestResponseConfig::name`] is used to tag outgoing block requests with the correct @@ -130,31 +134,6 @@ where pub request_response_protocol_configs: Vec, } -/// Role of the local node. -#[derive(Debug, Clone)] -pub enum Role { - /// Regular full node. - Full, - /// Actual authority. - Authority, -} - -impl Role { - /// True for [`Role::Authority`]. - pub fn is_authority(&self) -> bool { - matches!(self, Self::Authority { .. }) - } -} - -impl fmt::Display for Role { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Full => write!(f, "FULL"), - Self::Authority { .. } => write!(f, "AUTHORITY"), - } - } -} - /// Sync operation mode. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum SyncMode { diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index d17f47328b804..27f2a938154fe 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -260,7 +260,8 @@ pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::PeerInfo; pub use sc_network_common::{ protocol::{ - event::{DhtEvent, Event, ObservedRole}, + event::{DhtEvent, Event}, + role::ObservedRole, ProtocolName, }, request_responses::{IfDisconnected, RequestFailure}, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index fbf651de9d49a..325e044527efa 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -31,10 +31,7 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, log, trace, warn, Level}; -use message::{ - generic::{Message as GenericMessage, Roles}, - Message, -}; +use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use sc_client_api::HeaderBackend; @@ -42,13 +39,14 @@ use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, IncomingBlock, RuntimeOrigin, }; use sc_network_common::{ - config::{NonReservedPeerMode, ProtocolId}, + config::NonReservedPeerMode, error, - protocol::ProtocolName, + protocol::{role::Roles, ProtocolName}, request_responses::RequestFailure, sync::{ message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, BlockState, + BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, + BlockResponse, BlockState, }, warp::{EncodedProof, WarpProofRequest}, BadPeer, ChainSync, OnBlockData, OnBlockJustification, OnStateData, OpaqueBlockRequest, @@ -85,8 +83,6 @@ const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximum allowed size for a block announce. -const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. @@ -235,30 +231,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -/// Handshake sent when we open a block announces substream. -#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] -struct BlockAnnouncesHandshake { - /// Roles of the node. - roles: Roles, - /// Best block number. - best_number: NumberFor, - /// Best block hash. - best_hash: B::Hash, - /// Genesis block hash. - genesis_hash: B::Hash, -} - -impl BlockAnnouncesHandshake { - fn build( - roles: Roles, - best_number: NumberFor, - best_hash: B::Hash, - genesis_hash: B::Hash, - ) -> Self { - Self { genesis_hash, roles, best_number, best_hash } - } -} - impl Protocol where B: BlockT, @@ -268,12 +240,10 @@ where pub fn new( roles: Roles, chain: Arc, - protocol_id: ProtocolId, - fork_id: &Option, network_config: &config::NetworkConfiguration, - notifications_protocols_handshakes: Vec>, metrics_registry: Option<&Registry>, chain_sync: Box>, + block_announces_protocol: sc_network_common::config::NonDefaultSetConfig, ) -> error::Result<(Self, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); @@ -365,51 +335,24 @@ where sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; - let block_announces_protocol = { - let genesis_hash = - chain.hash(0u32.into()).ok().flatten().expect("Genesis block exists; qed"); - let genesis_hash = genesis_hash.as_ref(); - if let Some(fork_id) = fork_id { - format!( - "/{}/{}/block-announces/1", - array_bytes::bytes2hex("", genesis_hash), - fork_id - ) - } else { - format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) - } - }; - - let legacy_ba_protocol_name = format!("/{}/block-announces/1", protocol_id.as_ref()); - let behaviour = { - let best_number = info.best_number; - let best_hash = info.best_hash; - let genesis_hash = info.genesis_hash; - - let block_announces_handshake = - BlockAnnouncesHandshake::::build(roles, best_number, best_hash, genesis_hash) - .encode(); - - let sync_protocol_config = notifications::ProtocolConfig { - name: block_announces_protocol.into(), - fallback_names: iter::once(legacy_ba_protocol_name.into()).collect(), - handshake: block_announces_handshake, - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - }; - Notifications::new( peerset, - iter::once(sync_protocol_config).chain( - network_config.extra_sets.iter().zip(notifications_protocols_handshakes).map( - |(s, hs)| notifications::ProtocolConfig { - name: s.notifications_protocol.clone(), - fallback_names: s.fallback_names.clone(), - handshake: hs, - max_notification_size: s.max_notification_size, - }, - ), - ), + // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. + // This protocol must be the first notification protocol given to + // `Notifications` + iter::once(notifications::ProtocolConfig { + name: block_announces_protocol.notifications_protocol.clone(), + fallback_names: block_announces_protocol.fallback_names.clone(), + handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(), + max_notification_size: block_announces_protocol.max_notification_size, + }) + .chain(network_config.extra_sets.iter().map(|s| notifications::ProtocolConfig { + name: s.notifications_protocol.clone(), + fallback_names: s.fallback_names.clone(), + handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()), + max_notification_size: s.max_notification_size, + })), ) }; @@ -437,10 +380,8 @@ where }, peerset_handle: peerset_handle.clone(), behaviour, - notification_protocols: network_config - .extra_sets - .iter() - .map(|s| s.notifications_protocol.clone()) + notification_protocols: iter::once(block_announces_protocol.notifications_protocol) + .chain(network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone())) .collect(), bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { @@ -469,10 +410,7 @@ where pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: ProtocolName) { if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - self.behaviour.disconnect_peer( - peer_id, - sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS), - ); + self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position)); } else { warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } @@ -1095,8 +1033,7 @@ where /// Sets the list of reserved peers for the given protocol/peerset. pub fn set_reserved_peerset_peers(&self, protocol: ProtocolName, peers: HashSet) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .set_reserved_peers(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peers); + self.peerset_handle.set_reserved_peers(sc_peerset::SetId::from(index), peers); } else { error!( target: "sub-libp2p", @@ -1109,10 +1046,7 @@ where /// Removes a `PeerId` from the list of reserved peers. pub fn remove_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_reserved_peer( - sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), - peer, - ); + self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1125,8 +1059,7 @@ where /// Adds a `PeerId` to the list of reserved peers. pub fn add_set_reserved_peer(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1148,8 +1081,7 @@ where /// Add a peer to a peers set. pub fn add_to_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle - .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1162,10 +1094,7 @@ where /// Remove a peer from a peers set. pub fn remove_from_peers_set(&self, protocol: ProtocolName, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_from_peers_set( - sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), - peer, - ); + self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index), peer); } else { error!( target: "sub-libp2p", @@ -1627,14 +1556,12 @@ where } } else { match ( - message::Roles::decode_all(&mut &received_handshake[..]), + Roles::decode_all(&mut &received_handshake[..]), self.peers.get(&peer_id), ) { (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, roles, notifications_sink, @@ -1646,9 +1573,7 @@ where // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), negotiated_fallback, roles: peer.info.roles, notifications_sink, @@ -1672,9 +1597,7 @@ where } else { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), notifications_sink, } }, @@ -1699,9 +1622,7 @@ where } else { CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, - protocol: self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(), + protocol: self.notification_protocols[usize::from(set_id)].clone(), } } }, @@ -1734,9 +1655,7 @@ where _ if self.bad_handshake_substreams.contains(&(peer_id, set_id)) => CustomMessageOutcome::None, _ => { - let protocol_name = self.notification_protocols - [usize::from(set_id) - NUM_HARDCODED_PEERSETS] - .clone(); + let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); CustomMessageOutcome::NotificationsReceived { remote: peer_id, messages: vec![(protocol_name, message.freeze())], diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 3e1281753b82c..ef652387d2c7d 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -21,7 +21,7 @@ pub use self::generic::{ RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, RemoteHeaderRequest, - RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, Roles, + RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, }; use codec::{Decode, Encode}; use sc_client_api::StorageProof; @@ -57,11 +57,11 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { use super::{RemoteCallResponse, RemoteReadResponse}; - use bitflags::bitflags; - use codec::{Decode, Encode, Input, Output}; + use codec::{Decode, Encode, Input}; use sc_client_api::StorageProof; use sc_network_common::{ message::RequestId, + protocol::role::Roles, sync::message::{ generic::{BlockRequest, BlockResponse}, BlockAnnounce, @@ -69,60 +69,6 @@ pub mod generic { }; use sp_runtime::ConsensusEngineId; - bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; - } - } - - impl Roles { - /// Does this role represents a client that holds full chain data locally? - pub fn is_full(&self) -> bool { - self.intersects(Self::FULL | Self::AUTHORITY) - } - - /// Does this role represents a client that does not participates in the consensus? - pub fn is_authority(&self) -> bool { - *self == Self::AUTHORITY - } - - /// Does this role represents a client that does not hold full chain data locally? - pub fn is_light(&self) -> bool { - !self.is_full() - } - } - - impl<'a> From<&'a crate::config::Role> for Roles { - fn from(roles: &'a crate::config::Role) -> Self { - match roles { - crate::config::Role::Full => Self::FULL, - crate::config::Role::Authority { .. } => Self::AUTHORITY, - } - } - } - - impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } - } - - impl codec::EncodeLike for Roles {} - - impl codec::Decode for Roles { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) - } - } - /// Consensus is mostly opaque to us #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 180482e75ece2..28e479b702779 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -34,14 +34,10 @@ use crate::{ network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - protocol::{ - self, message::generic::Roles, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, - Ready, - }, + protocol::{self, NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready}, transport, ReputationChange, }; -use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{ core::{either::EitherError, upgrade, ConnectedPoint, Executor}, @@ -222,19 +218,13 @@ where local_peer_id.to_base58(), ); - let default_notif_handshake_message = Roles::from(¶ms.role).encode(); - let (protocol, peerset_handle, mut known_addresses) = Protocol::new( From::from(¶ms.role), params.chain.clone(), - params.protocol_id.clone(), - ¶ms.fork_id, ¶ms.network_config, - (0..params.network_config.extra_sets.len()) - .map(|_| default_notif_handshake_message.clone()) - .collect(), params.metrics_registry.as_ref(), params.chain_sync, + params.block_announce_config, )?; // List of multiaddresses that we know in the network. diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index c8f137f79c6dc..7c651c675b83e 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -20,10 +20,15 @@ use crate::{config, NetworkService, NetworkWorker}; use futures::prelude::*; use libp2p::PeerId; +use sc_client_api::{BlockBackend, HeaderBackend}; use sc_network_common::{ - config::{MultiaddrWithPeerId, NonDefaultSetConfig, ProtocolId, SetConfig, TransportConfig}, - protocol::event::Event, + config::{ + MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, TransportConfig, + }, + protocol::{event::Event, role::Roles}, service::{NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo}, + sync::message::BlockAnnouncesHandshake, }; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ @@ -31,7 +36,7 @@ use sc_network_sync::{ ChainSync, }; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header as _}; +use sp_runtime::traits::{Block as BlockT, Header as _, Zero}; use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; @@ -132,7 +137,33 @@ fn build_test_full_node( None, ) .unwrap(); + + let block_announce_config = NonDefaultSetConfig { + notifications_protocol: BLOCK_ANNOUNCE_PROTO_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::< + substrate_test_runtime_client::runtime::Block, + >::build( + Roles::from(&config::Role::Full), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ))), + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + }; + let worker = NetworkWorker::new(config::Params { + block_announce_config, role: config::Role::Full, executor: None, network_config, @@ -161,6 +192,7 @@ fn build_test_full_node( (service, event_stream) } +const BLOCK_ANNOUNCE_PROTO_NAME: &str = "/block-announces"; const PROTOCOL_NAME: &str = "/foo"; /// Builds two nodes and their associated events stream. @@ -178,6 +210,7 @@ fn build_nodes_one_proto() -> ( notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], @@ -190,6 +223,7 @@ fn build_nodes_one_proto() -> ( notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -368,6 +402,7 @@ fn lots_of_incoming_peers_works() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { in_peers: u32::MAX, ..Default::default() }, }], transport: TransportConfig::MemoryOnly, @@ -387,6 +422,7 @@ fn lots_of_incoming_peers_works() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -504,6 +540,7 @@ fn fallback_name_working() { notifications_protocol: NEW_PROTOCOL_NAME.into(), fallback_names: vec![PROTOCOL_NAME.into()], max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }], listen_addresses: vec![listen_addr.clone()], @@ -516,6 +553,7 @@ fn fallback_name_working() { notifications_protocol: PROTOCOL_NAME.into(), fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -561,6 +599,72 @@ fn fallback_name_working() { }); } +// Disconnect peer by calling `Protocol::disconnect_peer()` with the supplied block announcement +// protocol name and verify that `SyncDisconnected` event is emitted +#[async_std::test] +async fn disconnect_sync_peer_using_block_announcement_protocol_name() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: vec![], + max_notification_size: 1024 * 1024, + handshake: None, + set_config: Default::default(), + }], + listen_addresses: vec![listen_addr.clone()], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + let (node2, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { + extra_sets: vec![NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME.into(), + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + handshake: None, + set_config: SetConfig { + reserved_nodes: vec![MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id(), + }], + ..Default::default() + }, + }], + listen_addresses: vec![], + transport: TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); + + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {}, + }; + } + + loop { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {}, + }; + } + + // disconnect peer using `PROTOCOL_NAME`, verify `NotificationStreamClosed` event is emitted + node2.disconnect_peer(node1.local_peer_id(), PROTOCOL_NAME.into()); + assert!(std::matches!( + events_stream2.next().await, + Some(Event::NotificationStreamClosed { .. }) + )); + let _ = events_stream2.next().await; // ignore the reopen event + + // now disconnect using the block announcement protocol, verify that `SyncDisconnected` is + // emitted + node2.disconnect_peer(node1.local_peer_id(), BLOCK_ANNOUNCE_PROTO_NAME.into()); + assert!(std::matches!(events_stream2.next().await, Some(Event::SyncDisconnected { .. }))); +} + #[test] #[should_panic(expected = "don't match the transport")] fn ensure_listen_addresses_consistent_with_transport_memory() { diff --git a/client/network/sync/src/lib.rs b/client/network/sync/src/lib.rs index 6348490351140..280e530eca9a9 100644 --- a/client/network/sync/src/lib.rs +++ b/client/network/sync/src/lib.rs @@ -50,15 +50,21 @@ use log::{debug, error, info, trace, warn}; use prost::Message; use sc_client_api::{BlockBackend, ProofProvider}; use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; -use sc_network_common::sync::{ - message::{ - BlockAnnounce, BlockAttributes, BlockData, BlockRequest, BlockResponse, Direction, - FromBlock, +use sc_network_common::{ + config::{ + NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + }, + protocol::role::Roles, + sync::{ + message::{ + BlockAnnounce, BlockAnnouncesHandshake, BlockAttributes, BlockData, BlockRequest, + BlockResponse, Direction, FromBlock, + }, + warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, + BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, + OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, + PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }, - warp::{EncodedProof, WarpProofRequest, WarpSyncPhase, WarpSyncProgress, WarpSyncProvider}, - BadPeer, ChainSync as ChainSyncT, Metrics, OnBlockData, OnBlockJustification, OnStateData, - OpaqueBlockRequest, OpaqueBlockResponse, OpaqueStateRequest, OpaqueStateResponse, PeerInfo, - PollBlockAnnounceValidation, SyncMode, SyncState, SyncStatus, }; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; @@ -76,6 +82,7 @@ use sp_runtime::{ }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, + iter, ops::Range, pin::Pin, sync::Arc, @@ -121,6 +128,9 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; /// Number of peers that need to be connected before warp sync is started. const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; +/// Maximum allowed size for a block announce. +const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; + mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -2231,6 +2241,53 @@ where } None } + + /// Get config for the block announcement protocol + pub fn get_block_announce_proto_config( + &self, + protocol_id: ProtocolId, + fork_id: &Option, + roles: Roles, + best_number: NumberFor, + best_hash: B::Hash, + genesis_hash: B::Hash, + ) -> NonDefaultSetConfig { + let block_announces_protocol = { + let genesis_hash = genesis_hash.as_ref(); + if let Some(ref fork_id) = fork_id { + format!( + "/{}/{}/block-announces/1", + array_bytes::bytes2hex("", genesis_hash), + fork_id + ) + } else { + format!("/{}/block-announces/1", array_bytes::bytes2hex("", genesis_hash)) + } + }; + + NonDefaultSetConfig { + notifications_protocol: block_announces_protocol.into(), + fallback_names: iter::once( + format!("/{}/block-announces/1", protocol_id.as_ref()).into(), + ) + .collect(), + max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, + handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + roles, + best_number, + best_hash, + genesis_hash, + ))), + // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement + // protocol is still hardcoded into the peerset. + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } } // This is purely during a backwards compatible transitionary period and should be removed diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 9d5abf98ceff0..a7c58631dc0f7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -55,7 +55,7 @@ use sc_network_common::{ config::{ MultiaddrWithPeerId, NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, TransportConfig, }, - protocol::ProtocolName, + protocol::{role::Roles, ProtocolName}, service::{NetworkBlock, NetworkStateInfo, NetworkSyncForkRequest}, sync::warp::{AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncProvider}, }; @@ -77,7 +77,7 @@ use sp_core::H256; use sp_runtime::{ codec::{Decode, Encode}, generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, }; use substrate_test_runtime_client::AccountKeyring; @@ -802,6 +802,7 @@ where notifications_protocol: p, fallback_names: Vec::new(), max_notification_size: 1024 * 1024, + handshake: None, set_config: Default::default(), }) .collect(); @@ -879,6 +880,19 @@ where Some(warp_sync), ) .unwrap(); + let block_announce_config = chain_sync.get_block_announce_proto_config( + protocol_id.clone(), + &fork_id, + Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); + let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -889,6 +903,7 @@ where import_queue, chain_sync: Box::new(chain_sync), metrics_registry: None, + block_announce_config, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, diff --git a/client/network/transactions/src/lib.rs b/client/network/transactions/src/lib.rs index b75bd411b39c4..5239a94ef23f3 100644 --- a/client/network/transactions/src/lib.rs +++ b/client/network/transactions/src/lib.rs @@ -35,10 +35,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network_common::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, error, - protocol::{ - event::{Event, ObservedRole}, - ProtocolName, - }, + protocol::{event::Event, role::ObservedRole, ProtocolName}, service::{NetworkEventStream, NetworkNotification, NetworkPeers}, utils::{interval, LruHashSet}, ExHashT, @@ -145,6 +142,7 @@ impl TransactionsHandlerPrototype { notifications_protocol: self.protocol_name.clone(), fallback_names: self.fallback_protocol_names.clone(), max_notification_size: MAX_TRANSACTIONS_SIZE, + handshake: None, set_config: SetConfig { in_peers: 0, out_peers: 0, diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index dfd532a14c172..4301e17a8c31e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -40,6 +40,7 @@ use sc_keystore::LocalKeystore; use sc_network::{config::SyncMode, NetworkService}; use sc_network_bitswap::BitswapRequestHandler; use sc_network_common::{ + protocol::role::Roles, service::{NetworkStateInfo, NetworkStatusProvider}, sync::warp::WarpSyncProvider, }; @@ -843,6 +844,18 @@ where config.network.max_parallel_downloads, warp_sync_provider, )?; + let block_announce_config = chain_sync.get_block_announce_proto_config( + protocol_id.clone(), + &config.chain_spec.fork_id().map(ToOwned::to_owned), + Roles::from(&config.role.clone()), + client.info().best_number, + client.info().best_hash, + client + .block_hash(Zero::zero()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + ); request_response_protocol_configs.push(config.network.ipfs_server.then(|| { let (handler, protocol_config) = BitswapRequestHandler::new(client.clone()); @@ -865,6 +878,7 @@ where import_queue: Box::new(import_queue), chain_sync: Box::new(chain_sync), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), + block_announce_config, block_request_protocol_config, state_request_protocol_config, warp_sync_protocol_config, From df81976c40e2e0573d59c51e12eb3c15c3ff3057 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Mon, 10 Oct 2022 16:25:11 +0200 Subject: [PATCH 225/348] Dont ignore errors in pallet benchmarking (#12449) Signed-off-by: Oliver Tale-Yazdi Signed-off-by: Oliver Tale-Yazdi --- .../benchmarking-cli/src/pallet/command.rs | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/utils/frame/benchmarking-cli/src/pallet/command.rs b/utils/frame/benchmarking-cli/src/pallet/command.rs index 72592617c52ac..daf4aa74e1394 100644 --- a/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -297,16 +297,15 @@ impl PalletCmd { for (s, selected_components) in all_components.iter().enumerate() { // First we run a verification if !self.no_verify { - // Dont use these results since verification code will add overhead let state = &state_without_tracking; - let _results = StateMachine::new( + let result = StateMachine::new( state, &mut changes, &executor, "Benchmark_dispatch_benchmark", &( - &pallet.clone(), - &extrinsic.clone(), + &pallet, + &extrinsic, &selected_components.clone(), true, // run verification code 1, // no need to do internal repeats @@ -321,6 +320,20 @@ impl PalletCmd { .map_err(|e| { format!("Error executing and verifying runtime benchmark: {}", e) })?; + // Dont use these results since verification code will add overhead. + let _batch = + , String> as Decode>::decode( + &mut &result[..], + ) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))? + .map_err(|e| { + format!( + "Benchmark {}::{} failed: {}", + String::from_utf8_lossy(&pallet), + String::from_utf8_lossy(&extrinsic), + e + ) + })?; } // Do one loop of DB tracking. { From 9672c362240d77d11f3bd56c701743f0ed86f84b Mon Sep 17 00:00:00 2001 From: Vlad Date: Mon, 10 Oct 2022 21:04:51 +0300 Subject: [PATCH 226/348] Update UI tests for Rust 1.64 (#12440) * Update UI tests for Rust 1.64 * Test with the staging image * Switch back to production --- .../no_std_genesis_config.stderr | 2 +- .../pallet_error_too_large.stderr | 2 +- .../undefined_call_part.stderr | 2 +- .../undefined_event_part.stderr | 2 +- .../undefined_genesis_config_part.stderr | 2 +- .../undefined_inherent_part.stderr | 12 +++++------ .../undefined_origin_part.stderr | 8 +++++-- .../undefined_validate_unsigned_part.stderr | 21 ++++++++++++------- ...ed_keyword_two_times_integrity_test.stderr | 2 +- ...ved_keyword_two_times_on_initialize.stderr | 4 ++-- .../tests/derive_no_bound_ui/debug.stderr | 4 ++-- .../call_argument_invalid_bound.stderr | 2 +- .../call_argument_invalid_bound_2.stderr | 16 +++++++------- .../call_argument_invalid_bound_3.stderr | 2 +- .../pallet_ui/event_field_not_member.stderr | 2 +- ...age_ensure_span_are_ok_on_wrong_gen.stderr | 4 ++-- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 4 ++-- .../ui/impl_incorrect_method_signature.stderr | 17 +++++++++++++-- ...reference_in_impl_runtime_apis_call.stderr | 16 ++++++++++++-- 19 files changed, 80 insertions(+), 44 deletions(-) diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 1f08ab87c1f79..26c0717c0ad37 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -10,7 +10,7 @@ error: `Pallet` does not have the std feature enabled, this will cause the `test 49 | | } | |_^ | - = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` --> tests/construct_runtime_ui/no_std_genesis_config.rs:40:1 diff --git a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr index 161873866b6f3..99a543eef7a8a 100644 --- a/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr +++ b/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.stderr @@ -10,4 +10,4 @@ error[E0080]: evaluation of constant value failed 83 | | } | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:74:1 | - = note: this error originates in the macro `$crate::panic::panic_2021` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index c162a22bb87b0..6baf01e866f64 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,4 +13,4 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index 920e627d43c31..8f2bf7be15749 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` --> tests/construct_runtime_ui/undefined_event_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 1bc109a45ac57..aae3aaa80c865 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:49:1 diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 9f646469d86a8..74af0c264cd5e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -13,13 +13,13 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `create_inherent` not found for this + | -------------------- function or associated item `create_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -39,7 +39,7 @@ error[E0599]: no function or associated item named `is_inherent` found for struc --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `is_inherent` not found for this + | -------------------- function or associated item `is_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -59,7 +59,7 @@ error[E0599]: no function or associated item named `check_inherent` found for st --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `check_inherent` not found for this + | -------------------- function or associated item `check_inherent` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -79,7 +79,7 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ associated item `INHERENT_IDENTIFIER` not found for this + | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -99,7 +99,7 @@ error[E0599]: no function or associated item named `is_inherent_required` found --> tests/construct_runtime_ui/undefined_inherent_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `is_inherent_required` not found for this + | -------------------- function or associated item `is_inherent_required` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index b93a2a92911ea..1a8fe64da1758 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` --> tests/construct_runtime_ui/undefined_origin_part.rs:49:1 @@ -56,6 +56,10 @@ error[E0282]: type annotations needed ... | 57 | | } 58 | | } - | |_^ cannot infer type for type parameter `AccountId` declared on the enum `RawOrigin` + | |_^ cannot infer type of the type parameter `AccountId` declared on the enum `RawOrigin` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) +help: consider specifying the generic argument + | +58 | }:: + | +++++++++++++ diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index a5e4fe3c1cd5a..6f0b13c58933e 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,22 +13,27 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 58 | | } | |_- in this macro invocation | - = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:56:3 | -49 | construct_runtime! { - | ------------------ variant or associated item `Pallet` not found here -... -56 | Pallet: pallet::{Pallet, ValidateUnsigned}, - | ^^^^^^ variant or associated item not found in `RuntimeCall` +49 | / construct_runtime! { +50 | | pub enum Runtime where +51 | | Block = Block, +52 | | NodeBlock = Block, +... | +56 | | Pallet: pallet::{Pallet, ValidateUnsigned}, + | | ^^^^^^ variant or associated item not found in `RuntimeCall` +57 | | } +58 | | } + | |_- variant or associated item `Pallet` not found for this enum error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `pre_dispatch` not found for this + | -------------------- function or associated item `pre_dispatch` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where @@ -49,7 +54,7 @@ error[E0599]: no function or associated item named `validate_unsigned` found for --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:49:1 | 11 | pub struct Pallet(_); - | ------------------------ function or associated item `validate_unsigned` not found for this + | -------------------- function or associated item `validate_unsigned` not found for this struct ... 49 | / construct_runtime! { 50 | | pub enum Runtime where diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 47ff1c8af8cd2..4212707599d41 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -10,7 +10,7 @@ error: `integrity_test` can only be passed once as input. 7 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` --> tests/decl_module_ui/reserved_keyword_two_times_integrity_test.rs:7:2 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index bfefadee99403..94bde853e4cc8 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -1,5 +1,5 @@ error: `on_initialize` can only be passed once as input. - --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 + --> tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs:1:1 | 1 | / frame_support::decl_module! { 2 | | pub struct Module for enum Call where origin: T::RuntimeOrigin, system=self { @@ -10,4 +10,4 @@ error: `on_initialize` can only be passed once as input. 11 | | } | |_^ | - = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` which comes from the expansion of the macro `frame_support::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/derive_no_bound_ui/debug.stderr b/frame/support/test/tests/derive_no_bound_ui/debug.stderr index 7580cab2ea0b3..acc7f80b37663 100644 --- a/frame/support/test/tests/derive_no_bound_ui/debug.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/debug.stderr @@ -1,8 +1,8 @@ error[E0277]: `::C` doesn't implement `std::fmt::Debug` - --> $DIR/debug.rs:7:2 + --> tests/derive_no_bound_ui/debug.rs:7:2 | 7 | c: T::C, | ^ `::C` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::C` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `::C` to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index 1d581ea7ed572..86e8d33c8dad1 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -6,7 +6,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index b1487776eac50..c6acccaaba7d4 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -6,7 +6,7 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 @@ -21,21 +21,23 @@ error[E0369]: binary operation `==` cannot be applied to type `&::Bar: WrapperTypeEncode` is not satisfied - --> tests/pallet_ui/call_argument_invalid_bound_2.rs:20:36 + --> tests/pallet_ui/call_argument_invalid_bound_2.rs:1:1 | -1 | / #[frame_support::pallet] +1 | #[frame_support::pallet] + | ^----------------------- + | | + | _in this procedural macro expansion + | | 2 | | mod pallet { 3 | | use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; 4 | | use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; ... | 16 | | 17 | | #[pallet::call] - | |__________________- required by a bound introduced by this call -... -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` + | |__________________^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | = note: required because of the requirements on the impl of `Encode` for `::Bar` + = note: this error originates in the derive macro `frame_support::codec::Encode` which comes from the expansion of the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied --> tests/pallet_ui/call_argument_invalid_bound_2.rs:17:12 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index a0418760ba7e2..4a0b2ea67c7d6 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -7,7 +7,7 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` = help: the trait `std::fmt::Debug` is not implemented for `Bar` = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&Bar` to the object type `dyn std::fmt::Debug` help: consider annotating `Bar` with `#[derive(Debug)]` | 17 | #[derive(Debug)] diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index 92623e0329fe3..f95da9deef90a 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -18,4 +18,4 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` | = help: the trait `std::fmt::Debug` is not implemented for `::Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&::Bar` - = note: required for the cast to the object type `dyn std::fmt::Debug` + = note: required for the cast from `&::Bar` to the object type `dyn std::fmt::Debug` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index b8a9a1128d669..cced83f207c41 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 5032f63bc1b1b..ab377e05d3901 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -28,7 +28,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` @@ -103,7 +103,7 @@ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied <&[(T,)] as EncodeLike>> <&[(T,)] as EncodeLike>> <&[T] as EncodeLike>> - and 280 others + and 278 others = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 2c47c2f480add..f5b6ac1da4576 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -18,5 +18,18 @@ note: type in trait error[E0308]: mismatched types --> tests/ui/impl_incorrect_method_signature.rs:19:11 | -19 | fn test(data: String) {} - | ^^^^ expected `u64`, found struct `std::string::String` +17 | / sp_api::impl_runtime_apis! { +18 | | impl self::Api for Runtime { +19 | | fn test(data: String) {} + | | ^^^^ expected `u64`, found struct `std::string::String` +20 | | } +... | +32 | | } +33 | | } + | |_- arguments to this function are incorrect + | +note: associated function defined here + --> tests/ui/impl_incorrect_method_signature.rs:13:6 + | +13 | fn test(data: u64); + | ^^^^ diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index 479e1cf05a9d1..6a99dcd3a1aed 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -18,9 +18,21 @@ note: type in trait error[E0308]: mismatched types --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:19:11 | -19 | fn test(data: &u64) { - | ^^^^^^^ expected `u64`, found `&u64` +17 | / sp_api::impl_runtime_apis! { +18 | | impl self::Api for Runtime { +19 | | fn test(data: &u64) { + | | ^^^^^^^ expected `u64`, found `&u64` +20 | | unimplemented!() +... | +34 | | } +35 | | } + | |_- arguments to this function are incorrect + | +note: associated function defined here + --> tests/ui/type_reference_in_impl_runtime_apis_call.rs:13:6 | +13 | fn test(data: u64); + | ^^^^ help: consider removing the borrow | 19 - fn test(data: &u64) { From 488fc24d98cfe643402b86990ae0aff27ba927b3 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 11 Oct 2022 11:49:12 +0300 Subject: [PATCH 227/348] rpc: Implement `transaction` RPC API (#12328) * rpc/tx: Add transaction structures for serialization Signed-off-by: Alexandru Vasile * rpc/tx: Add public facing `TransactionEvent` To circumvent the fact that serde does not allow mixing `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` the public facing subscription structure is serialized and deserialized to an intermmediate representation. Signed-off-by: Alexandru Vasile * rpc/tx: Add trait for the `transaction` API Signed-off-by: Alexandru Vasile * rpc/tx: Convert RPC errors to transaction events Signed-off-by: Alexandru Vasile * rpc/tx: Implement `transaction` RPC methods Signed-off-by: Alexandru Vasile * tx-pool: Propagate tx index to events Signed-off-by: Alexandru Vasile * tx-pool: Adjust testing to reflect tx index in events Signed-off-by: Alexandru Vasile * rpc/tx: Convert tx-pool events for the new RPC spec Signed-off-by: Alexandru Vasile * rpc/tx: Convert tx-pool `FinalityTimeout` event to `Dropped` Signed-off-by: Alexandru Vasile * service: Enable the `transaction` API Signed-off-by: Alexandru Vasile * rpc/tx: Add tests for tx event encoding and decoding Signed-off-by: Alexandru Vasile * tx: Add indentation for subscriptions Signed-off-by: Alexandru Vasile * rpc/tx: Fix documentation Signed-off-by: Alexandru Vasile * rpc/tx: Serialize usize to hex Signed-off-by: Alexandru Vasile * tx-pool: Rename closure parameters Signed-off-by: Alexandru Vasile * service: Separate RPC spec versions Signed-off-by: Alexandru Vasile * rpc/tx: Use `H256` for testing block's hash Signed-off-by: Alexandru Vasile * rpc/tx: Serialize numbers as string Signed-off-by: Alexandru Vasile * tx-pool: Backward compatibility with RPC v1 Signed-off-by: Alexandru Vasile * Update client/rpc-spec-v2/src/transaction/transaction.rs Co-authored-by: Niklas Adolfsson * rpc/tx: Remove comment about serde clone Signed-off-by: Alexandru Vasile * rpc/tx: Use RPC custom error code for invalid tx format Signed-off-by: Alexandru Vasile * Update client/rpc-spec-v2/src/transaction/event.rs Co-authored-by: James Wilson * rpc/tx: Adjust internal structures for serialization/deserialization Signed-off-by: Alexandru Vasile Signed-off-by: Alexandru Vasile Co-authored-by: Niklas Adolfsson Co-authored-by: James Wilson --- Cargo.lock | 11 + client/rpc-spec-v2/Cargo.toml | 10 + client/rpc-spec-v2/src/lib.rs | 4 + client/rpc-spec-v2/src/transaction/api.rs | 37 ++ client/rpc-spec-v2/src/transaction/error.rs | 100 +++++ client/rpc-spec-v2/src/transaction/event.rs | 353 ++++++++++++++++++ client/rpc-spec-v2/src/transaction/mod.rs | 38 ++ .../src/transaction/transaction.rs | 208 +++++++++++ client/service/Cargo.toml | 1 + client/service/src/builder.rs | 12 + client/transaction-pool/api/Cargo.toml | 3 + client/transaction-pool/api/src/lib.rs | 62 ++- client/transaction-pool/src/graph/listener.rs | 17 +- client/transaction-pool/src/graph/pool.rs | 4 +- client/transaction-pool/src/graph/watcher.rs | 8 +- client/transaction-pool/tests/pool.rs | 41 +- 16 files changed, 873 insertions(+), 36 deletions(-) create mode 100644 client/rpc-spec-v2/src/transaction/api.rs create mode 100644 client/rpc-spec-v2/src/transaction/error.rs create mode 100644 client/rpc-spec-v2/src/transaction/event.rs create mode 100644 client/rpc-spec-v2/src/transaction/mod.rs create mode 100644 client/rpc-spec-v2/src/transaction/transaction.rs diff --git a/Cargo.lock b/Cargo.lock index 49b3dd3cf957b..04b90dfffba1e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8792,10 +8792,19 @@ dependencies = [ name = "sc-rpc-spec-v2" version = "0.10.0-dev" dependencies = [ + "futures", "hex", "jsonrpsee", + "parity-scale-codec", "sc-chain-spec", + "sc-transaction-pool-api", + "serde", "serde_json", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-runtime", + "thiserror", "tokio", ] @@ -8848,6 +8857,7 @@ dependencies = [ "sc-offchain", "sc-rpc", "sc-rpc-server", + "sc-rpc-spec-v2", "sc-sysinfo", "sc-telemetry", "sc-tracing", @@ -9068,6 +9078,7 @@ dependencies = [ "futures", "log", "serde", + "serde_json", "sp-blockchain", "sp-runtime", "thiserror", diff --git a/client/rpc-spec-v2/Cargo.toml b/client/rpc-spec-v2/Cargo.toml index 12dec7464e6d0..885d415eb50d2 100644 --- a/client/rpc-spec-v2/Cargo.toml +++ b/client/rpc-spec-v2/Cargo.toml @@ -16,7 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.15.1", features = ["server", "macros"] } # Internal chain structures for "chain_spec". sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } +# Pool for submitting extrinsics required by "transaction" +sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } +sp-core = { version = "6.0.0", path = "../../primitives/core" } +sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "3.0.0" } +thiserror = "1.0" +serde = "1.0" hex = "0.4" +futures = "0.3.21" [dev-dependencies] serde_json = "1.0" diff --git a/client/rpc-spec-v2/src/lib.rs b/client/rpc-spec-v2/src/lib.rs index 297fda13172d6..f4b9d2f95bf97 100644 --- a/client/rpc-spec-v2/src/lib.rs +++ b/client/rpc-spec-v2/src/lib.rs @@ -24,3 +24,7 @@ #![deny(unused_crate_dependencies)] pub mod chain_spec; +pub mod transaction; + +/// Task executor that is being used by RPC subscriptions. +pub type SubscriptionTaskExecutor = std::sync::Arc; diff --git a/client/rpc-spec-v2/src/transaction/api.rs b/client/rpc-spec-v2/src/transaction/api.rs new file mode 100644 index 0000000000000..2f0c799f1cc19 --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/api.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API trait for transactions. + +use crate::transaction::event::TransactionEvent; +use jsonrpsee::proc_macros::rpc; +use sp_core::Bytes; + +#[rpc(client, server)] +pub trait TransactionApi { + /// Submit an extrinsic to watch. + /// + /// See [`TransactionEvent`](crate::transaction::event::TransactionEvent) for details on + /// transaction life cycle. + #[subscription( + name = "transaction_unstable_submitAndWatch" => "transaction_unstable_submitExtrinsic", + unsubscribe = "transaction_unstable_unwatch", + item = TransactionEvent, + )] + fn submit_and_watch(&self, bytes: Bytes); +} diff --git a/client/rpc-spec-v2/src/transaction/error.rs b/client/rpc-spec-v2/src/transaction/error.rs new file mode 100644 index 0000000000000..72a5959992f9e --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/error.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transaction RPC errors. +//! +//! Errors are interpreted as transaction events for subscriptions. + +use crate::transaction::event::{TransactionError, TransactionEvent}; +use sc_transaction_pool_api::error::Error as PoolError; +use sp_runtime::transaction_validity::InvalidTransaction; + +/// Transaction RPC errors. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Transaction pool error. + #[error("Transaction pool error: {}", .0)] + Pool(#[from] PoolError), + /// Verification error. + #[error("Extrinsic verification error: {}", .0)] + Verification(Box), +} + +impl From for TransactionEvent { + fn from(e: Error) -> Self { + match e { + Error::Verification(e) => TransactionEvent::Invalid(TransactionError { + error: format!("Verification error: {}", e), + }), + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => + TransactionEvent::Invalid(TransactionError { + error: format!("Invalid transaction with custom error: {}", e), + }), + Error::Pool(PoolError::InvalidTransaction(e)) => { + let msg: &str = e.into(); + TransactionEvent::Invalid(TransactionError { + error: format!("Invalid transaction: {}", msg), + }) + }, + Error::Pool(PoolError::UnknownTransaction(e)) => { + let msg: &str = e.into(); + TransactionEvent::Invalid(TransactionError { + error: format!("Unknown transaction validity: {}", msg), + }) + }, + Error::Pool(PoolError::TemporarilyBanned) => + TransactionEvent::Invalid(TransactionError { + error: "Transaction is temporarily banned".into(), + }), + Error::Pool(PoolError::AlreadyImported(_)) => + TransactionEvent::Invalid(TransactionError { + error: "Transaction is already imported".into(), + }), + Error::Pool(PoolError::TooLowPriority { old, new }) => + TransactionEvent::Invalid(TransactionError { + error: format!( + "The priority of the transactin is too low (pool {} > current {})", + old, new + ), + }), + Error::Pool(PoolError::CycleDetected) => TransactionEvent::Invalid(TransactionError { + error: "The transaction contains a cyclic dependency".into(), + }), + Error::Pool(PoolError::ImmediatelyDropped) => + TransactionEvent::Invalid(TransactionError { + error: "The transaction could not enter the pool because of the limit".into(), + }), + Error::Pool(PoolError::Unactionable) => TransactionEvent::Invalid(TransactionError { + error: "Transaction cannot be propagated and the local node does not author blocks" + .into(), + }), + Error::Pool(PoolError::NoTagsProvided) => TransactionEvent::Invalid(TransactionError { + error: "Transaction does not provide any tags, so the pool cannot identify it" + .into(), + }), + Error::Pool(PoolError::InvalidBlockId(_)) => + TransactionEvent::Invalid(TransactionError { + error: "The provided block ID is not valid".into(), + }), + Error::Pool(PoolError::RejectedFutureTransaction) => + TransactionEvent::Invalid(TransactionError { + error: "The pool is not accepting future transactions".into(), + }), + } + } +} diff --git a/client/rpc-spec-v2/src/transaction/event.rs b/client/rpc-spec-v2/src/transaction/event.rs new file mode 100644 index 0000000000000..3c75eaff10fd4 --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/event.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! The transaction's event returned as json compatible object. + +use serde::{Deserialize, Serialize}; + +/// The transaction was broadcasted to a number of peers. +/// +/// # Note +/// +/// The RPC does not guarantee that the peers have received the +/// transaction. +/// +/// When the number of peers is zero, the event guarantees that +/// shutting down the local node will lead to the transaction +/// not being included in the chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionBroadcasted { + /// The number of peers the transaction was broadcasted to. + #[serde(with = "as_string")] + pub num_peers: usize, +} + +/// The transaction was included in a block of the chain. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionBlock { + /// The hash of the block the transaction was included into. + pub hash: Hash, + /// The index (zero-based) of the transaction within the body of the block. + #[serde(with = "as_string")] + pub index: usize, +} + +/// The transaction could not be processed due to an error. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionError { + /// Reason of the error. + pub error: String, +} + +/// The transaction was dropped because of exceeding limits. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionDropped { + /// True if the transaction was broadcasted to other peers and + /// may still be included in the block. + pub broadcasted: bool, + /// Reason of the event. + pub error: String, +} + +/// Possible transaction status events. +/// +/// The status events can be grouped based on their kinds as: +/// +/// 1. Runtime validated the transaction: +/// - `Validated` +/// +/// 2. Inside the `Ready` queue: +/// - `Broadcast` +/// +/// 3. Leaving the pool: +/// - `BestChainBlockIncluded` +/// - `Invalid` +/// +/// 4. Block finalized: +/// - `Finalized` +/// +/// 5. At any time: +/// - `Dropped` +/// - `Error` +/// +/// The subscription's stream is considered finished whenever the following events are +/// received: `Finalized`, `Error`, `Invalid` or `Dropped`. However, the user is allowed +/// to unsubscribe at any moment. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +// We need to manually specify the trait bounds for the `Hash` trait to ensure `into` and +// `from` still work. +#[serde(bound( + serialize = "Hash: Serialize + Clone", + deserialize = "Hash: Deserialize<'de> + Clone" +))] +#[serde(into = "TransactionEventIR", from = "TransactionEventIR")] +pub enum TransactionEvent { + /// The transaction was validated by the runtime. + Validated, + /// The transaction was broadcasted to a number of peers. + Broadcasted(TransactionBroadcasted), + /// The transaction was included in a best block of the chain. + /// + /// # Note + /// + /// This may contain `None` if the block is no longer a best + /// block of the chain. + BestChainBlockIncluded(Option>), + /// The transaction was included in a finalized block. + Finalized(TransactionBlock), + /// The transaction could not be processed due to an error. + Error(TransactionError), + /// The transaction is marked as invalid. + Invalid(TransactionError), + /// The client was not capable of keeping track of this transaction. + Dropped(TransactionDropped), +} + +/// Intermediate representation (IR) for the transaction events +/// that handles block events only. +/// +/// The block events require a JSON compatible interpretation similar to: +/// +/// ```json +/// { event: "EVENT", block: { hash: "0xFF", index: 0 } } +/// ``` +/// +/// This IR is introduced to circumvent that the block events need to +/// be serialized/deserialized with "tag" and "content", while other +/// events only require "tag". +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event", content = "block")] +enum TransactionEventBlockIR { + /// The transaction was included in the best block of the chain. + BestChainBlockIncluded(Option>), + /// The transaction was included in a finalized block of the chain. + Finalized(TransactionBlock), +} + +/// Intermediate representation (IR) for the transaction events +/// that handles non-block events only. +/// +/// The non-block events require a JSON compatible interpretation similar to: +/// +/// ```json +/// { event: "EVENT", num_peers: 0 } +/// ``` +/// +/// This IR is introduced to circumvent that the block events need to +/// be serialized/deserialized with "tag" and "content", while other +/// events only require "tag". +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[serde(tag = "event")] +enum TransactionEventNonBlockIR { + Validated, + Broadcasted(TransactionBroadcasted), + Error(TransactionError), + Invalid(TransactionError), + Dropped(TransactionDropped), +} + +/// Intermediate representation (IR) used for serialization/deserialization of the +/// [`TransactionEvent`] in a JSON compatible format. +/// +/// Serde cannot mix `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` +/// for specific enum variants. Therefore, this IR is introduced to circumvent this +/// restriction, while exposing a simplified [`TransactionEvent`] for users of the +/// rust ecosystem. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(bound(serialize = "Hash: Serialize", deserialize = "Hash: Deserialize<'de>"))] +#[serde(rename_all = "camelCase")] +#[serde(untagged)] +enum TransactionEventIR { + Block(TransactionEventBlockIR), + NonBlock(TransactionEventNonBlockIR), +} + +impl From> for TransactionEventIR { + fn from(value: TransactionEvent) -> Self { + match value { + TransactionEvent::Validated => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated), + TransactionEvent::Broadcasted(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)), + TransactionEvent::BestChainBlockIncluded(event) => + TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)), + TransactionEvent::Finalized(event) => + TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)), + TransactionEvent::Error(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)), + TransactionEvent::Invalid(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)), + TransactionEvent::Dropped(event) => + TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)), + } + } +} + +impl From> for TransactionEvent { + fn from(value: TransactionEventIR) -> Self { + match value { + TransactionEventIR::NonBlock(status) => match status { + TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, + TransactionEventNonBlockIR::Broadcasted(event) => + TransactionEvent::Broadcasted(event), + TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), + TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), + TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), + }, + TransactionEventIR::Block(block) => match block { + TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), + TransactionEventBlockIR::BestChainBlockIncluded(event) => + TransactionEvent::BestChainBlockIncluded(event), + }, + } + } +} + +/// Serialize and deserialize helper as string. +mod as_string { + use super::*; + use serde::{Deserializer, Serializer}; + + pub fn serialize(data: &usize, serializer: S) -> Result { + data.to_string().serialize(serializer) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { + String::deserialize(deserializer)? + .parse() + .map_err(|e| serde::de::Error::custom(format!("Parsing failed: {}", e))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256; + + #[test] + fn validated_event() { + let event: TransactionEvent<()> = TransactionEvent::Validated; + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"validated"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn broadcasted_event() { + let event: TransactionEvent<()> = + TransactionEvent::Broadcasted(TransactionBroadcasted { num_peers: 2 }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"broadcasted","numPeers":"2"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn best_chain_event() { + let event: TransactionEvent<()> = TransactionEvent::BestChainBlockIncluded(None); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"bestChainBlockIncluded","block":null}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + + let event: TransactionEvent = + TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash: H256::from_low_u64_be(1), + index: 2, + })); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"bestChainBlockIncluded","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"2"}}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn finalized_event() { + let event: TransactionEvent = TransactionEvent::Finalized(TransactionBlock { + hash: H256::from_low_u64_be(1), + index: 10, + }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"finalized","block":{"hash":"0x0000000000000000000000000000000000000000000000000000000000000001","index":"10"}}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn error_event() { + let event: TransactionEvent<()> = + TransactionEvent::Error(TransactionError { error: "abc".to_string() }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"error","error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn invalid_event() { + let event: TransactionEvent<()> = + TransactionEvent::Invalid(TransactionError { error: "abc".to_string() }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"invalid","error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } + + #[test] + fn dropped_event() { + let event: TransactionEvent<()> = TransactionEvent::Dropped(TransactionDropped { + broadcasted: true, + error: "abc".to_string(), + }); + let ser = serde_json::to_string(&event).unwrap(); + + let exp = r#"{"event":"dropped","broadcasted":true,"error":"abc"}"#; + assert_eq!(ser, exp); + + let event_dec: TransactionEvent<()> = serde_json::from_str(exp).unwrap(); + assert_eq!(event_dec, event); + } +} diff --git a/client/rpc-spec-v2/src/transaction/mod.rs b/client/rpc-spec-v2/src/transaction/mod.rs new file mode 100644 index 0000000000000..bb983894a428c --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/mod.rs @@ -0,0 +1,38 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction API. +//! +//! The transaction methods allow submitting a transaction and subscribing to +//! its status updates generated by the chain. +//! +//! # Note +//! +//! Methods are prefixed by `transaction`. + +pub mod api; +pub mod error; +pub mod event; +pub mod transaction; + +pub use api::TransactionApiServer; +pub use event::{ + TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, + TransactionEvent, +}; +pub use transaction::Transaction; diff --git a/client/rpc-spec-v2/src/transaction/transaction.rs b/client/rpc-spec-v2/src/transaction/transaction.rs new file mode 100644 index 0000000000000..e2cf736dff17a --- /dev/null +++ b/client/rpc-spec-v2/src/transaction/transaction.rs @@ -0,0 +1,208 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! API implementation for submitting transactions. + +use crate::{ + transaction::{ + api::TransactionApiServer, + error::Error, + event::{ + TransactionBlock, TransactionBroadcasted, TransactionDropped, TransactionError, + TransactionEvent, + }, + }, + SubscriptionTaskExecutor, +}; +use jsonrpsee::{ + core::async_trait, + types::{ + error::{CallError, ErrorObject}, + SubscriptionResult, + }, + SubscriptionSink, +}; +use sc_transaction_pool_api::{ + error::IntoPoolError, BlockHash, TransactionFor, TransactionPool, TransactionSource, + TransactionStatus, +}; +use std::sync::Arc; + +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::Bytes; +use sp_runtime::{generic, traits::Block as BlockT}; + +use codec::Decode; +use futures::{FutureExt, StreamExt, TryFutureExt}; + +/// An API for transaction RPC calls. +pub struct Transaction { + /// Substrate client. + client: Arc, + /// Transactions pool. + pool: Arc, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, +} + +impl Transaction { + /// Creates a new [`Transaction`]. + pub fn new(client: Arc, pool: Arc, executor: SubscriptionTaskExecutor) -> Self { + Transaction { client, pool, executor } + } +} + +/// Currently we treat all RPC transactions as externals. +/// +/// Possibly in the future we could allow opt-in for special treatment +/// of such transactions, so that the block authors can inject +/// some unique transactions via RPC and have them included in the pool. +const TX_SOURCE: TransactionSource = TransactionSource::External; + +/// Extrinsic has an invalid format. +/// +/// # Note +/// +/// This is similar to the old `author` API error code. +const BAD_FORMAT: i32 = 1001; + +#[async_trait] +impl TransactionApiServer> for Transaction +where + Pool: TransactionPool + Sync + Send + 'static, + Pool::Hash: Unpin, + ::Hash: Unpin, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, +{ + fn submit_and_watch(&self, mut sink: SubscriptionSink, xt: Bytes) -> SubscriptionResult { + // This is the only place where the RPC server can return an error for this + // subscription. Other defects must be signaled as events to the sink. + let decoded_extrinsic = match TransactionFor::::decode(&mut &xt[..]) { + Ok(decoded_extrinsic) => decoded_extrinsic, + Err(e) => { + let err = CallError::Custom(ErrorObject::owned( + BAD_FORMAT, + format!("Extrinsic has invalid format: {}", e), + None::<()>, + )); + let _ = sink.reject(err); + return Ok(()) + }, + }; + + let best_block_hash = self.client.info().best_hash; + + let submit = self + .pool + .submit_and_watch( + &generic::BlockId::hash(best_block_hash), + TX_SOURCE, + decoded_extrinsic, + ) + .map_err(|e| { + e.into_pool_error() + .map(Error::from) + .unwrap_or_else(|e| Error::Verification(Box::new(e))) + }); + + let fut = async move { + match submit.await { + Ok(stream) => { + let mut state = TransactionState::new(); + let stream = + stream.filter_map(|event| async move { state.handle_event(event) }); + sink.pipe_from_stream(stream.boxed()).await; + }, + Err(err) => { + // We have not created an `Watcher` for the tx. Make sure the + // error is still propagated as an event. + let event: TransactionEvent<::Hash> = err.into(); + sink.pipe_from_stream(futures::stream::once(async { event }).boxed()).await; + }, + }; + }; + + self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); + Ok(()) + } +} + +/// The transaction's state that needs to be preserved between +/// multiple events generated by the transaction-pool. +/// +/// # Note +/// +/// In the future, the RPC server can submit only the last event when multiple +/// identical events happen in a row. +#[derive(Clone, Copy)] +struct TransactionState { + /// True if the transaction was previously broadcasted. + broadcasted: bool, +} + +impl TransactionState { + /// Construct a new [`TransactionState`]. + pub fn new() -> Self { + TransactionState { broadcasted: false } + } + + /// Handle events generated by the transaction-pool and convert them + /// to the new API expected state. + #[inline] + pub fn handle_event( + &mut self, + event: TransactionStatus, + ) -> Option> { + match event { + TransactionStatus::Ready | TransactionStatus::Future => + Some(TransactionEvent::::Validated), + TransactionStatus::Broadcast(peers) => { + // Set the broadcasted flag once if we submitted the transaction to + // at least one peer. + self.broadcasted = self.broadcasted || !peers.is_empty(); + + Some(TransactionEvent::Broadcasted(TransactionBroadcasted { + num_peers: peers.len(), + })) + }, + TransactionStatus::InBlock((hash, index)) => + Some(TransactionEvent::BestChainBlockIncluded(Some(TransactionBlock { + hash, + index, + }))), + TransactionStatus::Retracted(_) => Some(TransactionEvent::BestChainBlockIncluded(None)), + TransactionStatus::FinalityTimeout(_) => + Some(TransactionEvent::Dropped(TransactionDropped { + broadcasted: self.broadcasted, + error: "Maximum number of finality watchers has been reached".into(), + })), + TransactionStatus::Finalized((hash, index)) => + Some(TransactionEvent::Finalized(TransactionBlock { hash, index })), + TransactionStatus::Usurped(_) => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic was rendered invalid by another extrinsic".into(), + })), + TransactionStatus::Dropped => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic dropped from the pool due to exceeding limits".into(), + })), + TransactionStatus::Invalid => Some(TransactionEvent::Invalid(TransactionError { + error: "Extrinsic marked as invalid".into(), + })), + } + } +} diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 308da96fbbe77..a0c8f21effec1 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -69,6 +69,7 @@ sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/a sp-transaction-storage-proof = { version = "4.0.0-dev", path = "../../primitives/transaction-storage-proof" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } sc-rpc = { version = "4.0.0-dev", path = "../rpc" } +sc-rpc-spec-v2 = { version = "0.10.0-dev", path = "../rpc-spec-v2" } sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-block-builder = { version = "4.0.0-dev", path = "../../primitives/block-builder" } sc-informant = { version = "0.10.0-dev", path = "../informant" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4301e17a8c31e..987198d4b7f48 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -57,6 +57,7 @@ use sc_rpc::{ system::SystemApiServer, DenyUnsafe, SubscriptionTaskExecutor, }; +use sc_rpc_spec_v2::transaction::TransactionApiServer; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -673,6 +674,13 @@ where (chain, state, child_state) }; + let transaction_v2 = sc_rpc_spec_v2::transaction::Transaction::new( + client.clone(), + transaction_pool.clone(), + task_executor.clone(), + ) + .into_rpc(); + let author = sc_rpc::author::Author::new( client.clone(), transaction_pool, @@ -690,6 +698,10 @@ where rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; } + // Part of the RPC v2 spec. + rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; + + // Part of the old RPC spec. rpc_api.merge(chain).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(author).map_err(|e| Error::Application(e.into()))?; rpc_api.merge(system).map_err(|e| Error::Application(e.into()))?; diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index d34ffe512b023..1ab0f32bc8bad 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -15,3 +15,6 @@ serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0.30" sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-runtime = { version = "6.0.0", default-features = false, path = "../../../primitives/runtime" } + +[dev-dependencies] +serde_json = "1.0" diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index 0ebb8f9d4cd9c..c0a94516ffc97 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -108,15 +108,18 @@ pub enum TransactionStatus { Ready, /// The transaction has been broadcast to the given peers. Broadcast(Vec), - /// Transaction has been included in block with given hash. - InBlock(BlockHash), + /// Transaction has been included in block with given hash + /// at the given position. + #[serde(with = "v1_compatible")] + InBlock((BlockHash, TxIndex)), /// The block this transaction was included in has been retracted. Retracted(BlockHash), /// Maximum number of finality watchers has been reached, /// old watchers are being removed. FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - Finalized(BlockHash), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA. + #[serde(with = "v1_compatible")] + Finalized((BlockHash, TxIndex)), /// Transaction has been replaced in the pool, by another transaction /// that provides the same tags. (e.g. same (sender, nonce)). Usurped(Hash), @@ -143,6 +146,8 @@ pub type TransactionFor